udp_diag: Fix the !ipv6 case
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
e3a7ae2c
SK
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
6b7c5b94 235
ca9e4988
AK
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
e3a7ae2c 239 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
a65027e4 242 if (status)
e3a7ae2c 243 goto err;
6b7c5b94 244
e3a7ae2c
SK
245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 247 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
248 if (status)
249 goto err;
6b7c5b94 250
e3a7ae2c
SK
251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
257 return status;
258}
259
89a88ab8
AK
260static void populate_be2_stats(struct be_adapter *adapter)
261{
ac124ff9
SP
262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 265 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 268
ac124ff9 269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
ac124ff9 287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
ac124ff9 295 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 296 else
ac124ff9 297 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
ac124ff9
SP
311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 314 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 317
ac124ff9 318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
319 drvs->rx_pause_frames = port_stats->rx_pause_frames;
320 drvs->rx_crc_errors = port_stats->rx_crc_errors;
321 drvs->rx_control_frames = port_stats->rx_control_frames;
322 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332 drvs->rx_dropped_header_too_small =
333 port_stats->rx_dropped_header_too_small;
334 drvs->rx_input_fifo_overflow_drop =
335 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 336 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
337 drvs->rx_alignment_symbol_errors =
338 port_stats->rx_alignment_symbol_errors;
ac124ff9 339 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
349 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
005d5696
SX
354static void populate_lancer_stats(struct be_adapter *adapter)
355{
89a88ab8 356
005d5696 357 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
358 struct lancer_pport_stats *pport_stats =
359 pport_stats_from_cmd(adapter);
360
361 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 365 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 366 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
367 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371 drvs->rx_dropped_tcp_length =
372 pport_stats->rx_dropped_invalid_tcp_length;
373 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376 drvs->rx_dropped_header_too_small =
377 pport_stats->rx_dropped_header_too_small;
378 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 380 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 381 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
382 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 384 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 385 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
386 drvs->forwarded_packets = pport_stats->num_forwards_lo;
387 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 388 drvs->rx_drops_too_many_frags =
ac124ff9 389 pport_stats->rx_drops_too_many_frags_lo;
005d5696 390}
89a88ab8 391
09c1c68f
SP
392static void accumulate_16bit_val(u32 *acc, u16 val)
393{
394#define lo(x) (x & 0xFFFF)
395#define hi(x) (x & 0xFFFF0000)
396 bool wrapped = val < lo(*acc);
397 u32 newacc = hi(*acc) + val;
398
399 if (wrapped)
400 newacc += 65536;
401 ACCESS_ONCE(*acc) = newacc;
402}
403
89a88ab8
AK
404void be_parse_stats(struct be_adapter *adapter)
405{
ac124ff9
SP
406 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407 struct be_rx_obj *rxo;
408 int i;
409
005d5696
SX
410 if (adapter->generation == BE_GEN3) {
411 if (lancer_chip(adapter))
412 populate_lancer_stats(adapter);
413 else
414 populate_be3_stats(adapter);
415 } else {
89a88ab8 416 populate_be2_stats(adapter);
005d5696 417 }
ac124ff9
SP
418
419 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
420 for_all_rx_queues(adapter, rxo, i) {
421 /* below erx HW counter can actually wrap around after
422 * 65535. Driver accumulates a 32-bit value
423 */
424 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426 }
89a88ab8
AK
427}
428
ab1594e9
SP
429static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430 struct rtnl_link_stats64 *stats)
6b7c5b94 431{
ab1594e9 432 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 433 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 434 struct be_rx_obj *rxo;
3c8def97 435 struct be_tx_obj *txo;
ab1594e9
SP
436 u64 pkts, bytes;
437 unsigned int start;
3abcdeda 438 int i;
6b7c5b94 439
3abcdeda 440 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
441 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 do {
443 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444 pkts = rx_stats(rxo)->rx_pkts;
445 bytes = rx_stats(rxo)->rx_bytes;
446 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447 stats->rx_packets += pkts;
448 stats->rx_bytes += bytes;
449 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
452 }
453
3c8def97 454 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
455 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 do {
457 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458 pkts = tx_stats(txo)->tx_pkts;
459 bytes = tx_stats(txo)->tx_bytes;
460 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461 stats->tx_packets += pkts;
462 stats->tx_bytes += bytes;
3c8def97 463 }
6b7c5b94
SP
464
465 /* bad pkts received */
ab1594e9 466 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
467 drvs->rx_alignment_symbol_errors +
468 drvs->rx_in_range_errors +
469 drvs->rx_out_range_errors +
470 drvs->rx_frame_too_long +
471 drvs->rx_dropped_too_small +
472 drvs->rx_dropped_too_short +
473 drvs->rx_dropped_header_too_small +
474 drvs->rx_dropped_tcp_length +
ab1594e9 475 drvs->rx_dropped_runt;
68110868 476
6b7c5b94 477 /* detailed rx errors */
ab1594e9 478 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long;
68110868 481
ab1594e9 482 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
483
484 /* frame alignment errors */
ab1594e9 485 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 486
6b7c5b94
SP
487 /* receiver fifo overrun */
488 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 489 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
490 drvs->rx_input_fifo_overflow_drop +
491 drvs->rx_drops_no_pbuf;
ab1594e9 492 return stats;
6b7c5b94
SP
493}
494
ea172a01 495void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 496{
6b7c5b94
SP
497 struct net_device *netdev = adapter->netdev;
498
ea172a01
SP
499 /* when link status changes, link speed must be re-queried from card */
500 adapter->link_speed = -1;
501 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502 netif_carrier_on(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 } else {
505 netif_carrier_off(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 507 }
6b7c5b94
SP
508}
509
3c8def97 510static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 511 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 512{
3c8def97
SP
513 struct be_tx_stats *stats = tx_stats(txo);
514
ab1594e9 515 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
516 stats->tx_reqs++;
517 stats->tx_wrbs += wrb_cnt;
518 stats->tx_bytes += copied;
519 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 520 if (stopped)
ac124ff9 521 stats->tx_stops++;
ab1594e9 522 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
523}
524
525/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
526static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 bool *dummy)
6b7c5b94 528{
ebc8d2ab
DM
529 int cnt = (skb->len > skb->data_len);
530
531 cnt += skb_shinfo(skb)->nr_frags;
532
6b7c5b94
SP
533 /* to account for hdr wrb */
534 cnt++;
fe6d2a38
SP
535 if (lancer_chip(adapter) || !(cnt & 1)) {
536 *dummy = false;
537 } else {
6b7c5b94
SP
538 /* add a dummy to make it an even num */
539 cnt++;
540 *dummy = true;
fe6d2a38 541 }
6b7c5b94
SP
542 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543 return cnt;
544}
545
546static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547{
548 wrb->frag_pa_hi = upper_32_bits(addr);
549 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551}
552
cc4ce020
SK
553static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 555{
cc4ce020
SK
556 u8 vlan_prio = 0;
557 u16 vlan_tag = 0;
558
6b7c5b94
SP
559 memset(hdr, 0, sizeof(*hdr));
560
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
49e4b847 563 if (skb_is_gso(skb)) {
6b7c5b94
SP
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 567 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
569 if (lancer_chip(adapter) && adapter->sli_family ==
570 LANCER_A0_SLI_FAMILY) {
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 if (is_tcp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 tcpcs, hdr, 1);
575 else if (is_udp_pkt(skb))
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 udpcs, hdr, 1);
578 }
6b7c5b94
SP
579 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 if (is_tcp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582 else if (is_udp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 }
585
4c5102f9 586 if (vlan_tx_tag_present(skb)) {
6b7c5b94 587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
588 vlan_tag = vlan_tx_tag_get(skb);
589 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590 /* If vlan priority provided by OS is NOT in available bmap */
591 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593 adapter->recommended_prio;
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
595 }
596
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601}
602
2b7bcebf 603static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
604 bool unmap_single)
605{
606 dma_addr_t dma;
607
608 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 611 if (wrb->frag_len) {
7101e111 612 if (unmap_single)
2b7bcebf
IV
613 dma_unmap_single(dev, dma, wrb->frag_len,
614 DMA_TO_DEVICE);
7101e111 615 else
2b7bcebf 616 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
617 }
618}
6b7c5b94 619
3c8def97 620static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
621 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622{
7101e111
SP
623 dma_addr_t busaddr;
624 int i, copied = 0;
2b7bcebf 625 struct device *dev = &adapter->pdev->dev;
6b7c5b94 626 struct sk_buff *first_skb = skb;
6b7c5b94
SP
627 struct be_eth_wrb *wrb;
628 struct be_eth_hdr_wrb *hdr;
7101e111
SP
629 bool map_single = false;
630 u16 map_head;
6b7c5b94 631
6b7c5b94
SP
632 hdr = queue_head_node(txq);
633 queue_head_inc(txq);
7101e111 634 map_head = txq->head;
6b7c5b94 635
ebc8d2ab 636 if (skb->len > skb->data_len) {
e743d313 637 int len = skb_headlen(skb);
2b7bcebf
IV
638 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639 if (dma_mapping_error(dev, busaddr))
7101e111
SP
640 goto dma_err;
641 map_single = true;
ebc8d2ab
DM
642 wrb = queue_head_node(txq);
643 wrb_fill(wrb, busaddr, len);
644 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645 queue_head_inc(txq);
646 copied += len;
647 }
6b7c5b94 648
ebc8d2ab 649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 650 const struct skb_frag_struct *frag =
ebc8d2ab 651 &skb_shinfo(skb)->frags[i];
b061b39e 652 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 653 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 654 if (dma_mapping_error(dev, busaddr))
7101e111 655 goto dma_err;
ebc8d2ab 656 wrb = queue_head_node(txq);
9e903e08 657 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
9e903e08 660 copied += skb_frag_size(frag);
6b7c5b94
SP
661 }
662
663 if (dummy_wrb) {
664 wrb = queue_head_node(txq);
665 wrb_fill(wrb, 0, 0);
666 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667 queue_head_inc(txq);
668 }
669
cc4ce020 670 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
671 be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673 return copied;
7101e111
SP
674dma_err:
675 txq->head = map_head;
676 while (copied) {
677 wrb = queue_head_node(txq);
2b7bcebf 678 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
679 map_single = false;
680 copied -= wrb->frag_len;
681 queue_head_inc(txq);
682 }
683 return 0;
6b7c5b94
SP
684}
685
61357325 686static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 687 struct net_device *netdev)
6b7c5b94
SP
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
690 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
692 u32 wrb_cnt = 0, copied = 0;
693 u32 start = txq->head;
694 bool dummy_wrb, stopped = false;
695
fe6d2a38 696 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 697
3c8def97 698 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
699 if (copied) {
700 /* record the sent skb in the sent_skb table */
3c8def97
SP
701 BUG_ON(txo->sent_skb_list[start]);
702 txo->sent_skb_list[start] = skb;
c190e3c8
AK
703
704 /* Ensure txq has space for the next skb; Else stop the queue
705 * *BEFORE* ringing the tx doorbell, so that we serialze the
706 * tx compls of the current transmit which'll wake up the queue
707 */
7101e111 708 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
709 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 txq->len) {
3c8def97 711 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
712 stopped = true;
713 }
6b7c5b94 714
c190e3c8 715 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 716
3c8def97 717 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 718 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
719 } else {
720 txq->head = start;
721 dev_kfree_skb_any(skb);
6b7c5b94 722 }
6b7c5b94
SP
723 return NETDEV_TX_OK;
724}
725
726static int be_change_mtu(struct net_device *netdev, int new_mtu)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
730 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
732 dev_info(&adapter->pdev->dev,
733 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
734 BE_MIN_MTU,
735 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
736 return -EINVAL;
737 }
738 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739 netdev->mtu, new_mtu);
740 netdev->mtu = new_mtu;
741 return 0;
742}
743
744/*
82903e4b
AK
745 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 747 */
1da87b7f 748static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 749{
6b7c5b94
SP
750 u16 vtag[BE_NUM_VLANS_SUPPORTED];
751 u16 ntags = 0, i;
82903e4b 752 int status = 0;
1da87b7f
AK
753 u32 if_handle;
754
755 if (vf) {
756 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 }
6b7c5b94 760
c0e64ef4
SP
761 /* No need to further configure vids if in promiscuous mode */
762 if (adapter->promiscuous)
763 return 0;
764
82903e4b 765 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 766 /* Construct VLAN Table to give to HW */
b738127d 767 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
768 if (adapter->vlan_tag[i]) {
769 vtag[ntags] = cpu_to_le16(i);
770 ntags++;
771 }
772 }
b31c50a7
SP
773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 vtag, ntags, 1, 0);
6b7c5b94 775 } else {
b31c50a7
SP
776 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777 NULL, 0, 1, 1);
6b7c5b94 778 }
1da87b7f 779
b31c50a7 780 return status;
6b7c5b94
SP
781}
782
8e586137 783static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
784{
785 struct be_adapter *adapter = netdev_priv(netdev);
786
1da87b7f 787 adapter->vlans_added++;
ba343c77 788 if (!be_physfn(adapter))
8e586137 789 return 0;
ba343c77 790
6b7c5b94 791 adapter->vlan_tag[vid] = 1;
82903e4b 792 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 793 be_vid_config(adapter, false, 0);
8e586137
JP
794
795 return 0;
6b7c5b94
SP
796}
797
8e586137 798static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
799{
800 struct be_adapter *adapter = netdev_priv(netdev);
801
1da87b7f 802 adapter->vlans_added--;
1da87b7f 803
ba343c77 804 if (!be_physfn(adapter))
8e586137 805 return 0;
ba343c77 806
6b7c5b94 807 adapter->vlan_tag[vid] = 0;
82903e4b 808 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 809 be_vid_config(adapter, false, 0);
8e586137
JP
810
811 return 0;
6b7c5b94
SP
812}
813
a54769f5 814static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
815{
816 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 817
24307eef 818 if (netdev->flags & IFF_PROMISC) {
5b8821b7 819 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
820 adapter->promiscuous = true;
821 goto done;
6b7c5b94
SP
822 }
823
25985edc 824 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
825 if (adapter->promiscuous) {
826 adapter->promiscuous = false;
5b8821b7 827 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
828
829 if (adapter->vlans_added)
830 be_vid_config(adapter, false, 0);
6b7c5b94
SP
831 }
832
e7b909a6 833 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 834 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
835 netdev_mc_count(netdev) > BE_MAX_MC) {
836 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 837 goto done;
6b7c5b94 838 }
6b7c5b94 839
5b8821b7 840 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
841done:
842 return;
6b7c5b94
SP
843}
844
ba343c77
SB
845static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
846{
847 struct be_adapter *adapter = netdev_priv(netdev);
848 int status;
849
850 if (!adapter->sriov_enabled)
851 return -EPERM;
852
853 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
854 return -EINVAL;
855
590c391d
PR
856 if (lancer_chip(adapter)) {
857 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
858 } else {
859 status = be_cmd_pmac_del(adapter,
860 adapter->vf_cfg[vf].vf_if_handle,
30128031 861 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 862
590c391d
PR
863 status = be_cmd_pmac_add(adapter, mac,
864 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 865 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
590c391d
PR
866 }
867
64600ea5 868 if (status)
ba343c77
SB
869 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
870 mac, vf);
64600ea5
AK
871 else
872 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
873
ba343c77
SB
874 return status;
875}
876
64600ea5
AK
877static int be_get_vf_config(struct net_device *netdev, int vf,
878 struct ifla_vf_info *vi)
879{
880 struct be_adapter *adapter = netdev_priv(netdev);
881
882 if (!adapter->sriov_enabled)
883 return -EPERM;
884
885 if (vf >= num_vfs)
886 return -EINVAL;
887
888 vi->vf = vf;
e1d18735 889 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 890 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
891 vi->qos = 0;
892 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
893
894 return 0;
895}
896
1da87b7f
AK
897static int be_set_vf_vlan(struct net_device *netdev,
898 int vf, u16 vlan, u8 qos)
899{
900 struct be_adapter *adapter = netdev_priv(netdev);
901 int status = 0;
902
903 if (!adapter->sriov_enabled)
904 return -EPERM;
905
906 if ((vf >= num_vfs) || (vlan > 4095))
907 return -EINVAL;
908
909 if (vlan) {
910 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
911 adapter->vlans_added++;
912 } else {
913 adapter->vf_cfg[vf].vf_vlan_tag = 0;
914 adapter->vlans_added--;
915 }
916
917 status = be_vid_config(adapter, true, vf);
918
919 if (status)
920 dev_info(&adapter->pdev->dev,
921 "VLAN %d config on VF %d failed\n", vlan, vf);
922 return status;
923}
924
e1d18735
AK
925static int be_set_vf_tx_rate(struct net_device *netdev,
926 int vf, int rate)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 int status = 0;
930
931 if (!adapter->sriov_enabled)
932 return -EPERM;
933
934 if ((vf >= num_vfs) || (rate < 0))
935 return -EINVAL;
936
937 if (rate > 10000)
938 rate = 10000;
939
940 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 941 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
942
943 if (status)
944 dev_info(&adapter->pdev->dev,
945 "tx rate %d on VF %d failed\n", rate, vf);
946 return status;
947}
948
ac124ff9 949static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 950{
ac124ff9
SP
951 struct be_eq_obj *rx_eq = &rxo->rx_eq;
952 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 953 ulong now = jiffies;
ac124ff9 954 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
955 u64 pkts;
956 unsigned int start, eqd;
ac124ff9
SP
957
958 if (!rx_eq->enable_aic)
959 return;
6b7c5b94 960
4097f663 961 /* Wrapped around */
3abcdeda
SP
962 if (time_before(now, stats->rx_jiffies)) {
963 stats->rx_jiffies = now;
4097f663
SP
964 return;
965 }
6b7c5b94 966
ac124ff9
SP
967 /* Update once a second */
968 if (delta < HZ)
6b7c5b94
SP
969 return;
970
ab1594e9
SP
971 do {
972 start = u64_stats_fetch_begin_bh(&stats->sync);
973 pkts = stats->rx_pkts;
974 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
975
68c3e5a7 976 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 977 stats->rx_pkts_prev = pkts;
3abcdeda 978 stats->rx_jiffies = now;
ac124ff9
SP
979 eqd = stats->rx_pps / 110000;
980 eqd = eqd << 3;
981 if (eqd > rx_eq->max_eqd)
982 eqd = rx_eq->max_eqd;
983 if (eqd < rx_eq->min_eqd)
984 eqd = rx_eq->min_eqd;
985 if (eqd < 10)
986 eqd = 0;
987 if (eqd != rx_eq->cur_eqd) {
988 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
989 rx_eq->cur_eqd = eqd;
990 }
6b7c5b94
SP
991}
992
3abcdeda 993static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 994 struct be_rx_compl_info *rxcp)
4097f663 995{
ac124ff9 996 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 997
ab1594e9 998 u64_stats_update_begin(&stats->sync);
3abcdeda 999 stats->rx_compl++;
2e588f84 1000 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1001 stats->rx_pkts++;
2e588f84 1002 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1003 stats->rx_mcast_pkts++;
2e588f84 1004 if (rxcp->err)
ac124ff9 1005 stats->rx_compl_err++;
ab1594e9 1006 u64_stats_update_end(&stats->sync);
4097f663
SP
1007}
1008
2e588f84 1009static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1010{
19fad86f
PR
1011 /* L4 checksum is not reliable for non TCP/UDP packets.
1012 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1013 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1014 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1015}
1016
6b7c5b94 1017static struct be_rx_page_info *
3abcdeda
SP
1018get_rx_page_info(struct be_adapter *adapter,
1019 struct be_rx_obj *rxo,
1020 u16 frag_idx)
6b7c5b94
SP
1021{
1022 struct be_rx_page_info *rx_page_info;
3abcdeda 1023 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1024
3abcdeda 1025 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1026 BUG_ON(!rx_page_info->page);
1027
205859a2 1028 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1029 dma_unmap_page(&adapter->pdev->dev,
1030 dma_unmap_addr(rx_page_info, bus),
1031 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1032 rx_page_info->last_page_user = false;
1033 }
6b7c5b94
SP
1034
1035 atomic_dec(&rxq->used);
1036 return rx_page_info;
1037}
1038
1039/* Throwaway the data in the Rx completion */
1040static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1041 struct be_rx_obj *rxo,
2e588f84 1042 struct be_rx_compl_info *rxcp)
6b7c5b94 1043{
3abcdeda 1044 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1045 struct be_rx_page_info *page_info;
2e588f84 1046 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1047
e80d9da6 1048 for (i = 0; i < num_rcvd; i++) {
2e588f84 1049 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1050 put_page(page_info->page);
1051 memset(page_info, 0, sizeof(*page_info));
2e588f84 1052 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1053 }
1054}
1055
1056/*
1057 * skb_fill_rx_data forms a complete skb for an ether frame
1058 * indicated by rxcp.
1059 */
3abcdeda 1060static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1061 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1062{
3abcdeda 1063 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1064 struct be_rx_page_info *page_info;
2e588f84
SP
1065 u16 i, j;
1066 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1067 u8 *start;
6b7c5b94 1068
2e588f84 1069 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1070 start = page_address(page_info->page) + page_info->page_offset;
1071 prefetch(start);
1072
1073 /* Copy data in the first descriptor of this completion */
2e588f84 1074 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1075
1076 /* Copy the header portion into skb_data */
2e588f84 1077 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1078 memcpy(skb->data, start, hdr_len);
1079 skb->len = curr_frag_len;
1080 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1081 /* Complete packet has now been moved to data */
1082 put_page(page_info->page);
1083 skb->data_len = 0;
1084 skb->tail += curr_frag_len;
1085 } else {
1086 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1087 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1088 skb_shinfo(skb)->frags[0].page_offset =
1089 page_info->page_offset + hdr_len;
9e903e08 1090 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1091 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1092 skb->truesize += rx_frag_size;
6b7c5b94
SP
1093 skb->tail += hdr_len;
1094 }
205859a2 1095 page_info->page = NULL;
6b7c5b94 1096
2e588f84
SP
1097 if (rxcp->pkt_size <= rx_frag_size) {
1098 BUG_ON(rxcp->num_rcvd != 1);
1099 return;
6b7c5b94
SP
1100 }
1101
1102 /* More frags present for this completion */
2e588f84
SP
1103 index_inc(&rxcp->rxq_idx, rxq->len);
1104 remaining = rxcp->pkt_size - curr_frag_len;
1105 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1106 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1107 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1108
bd46cb6c
AK
1109 /* Coalesce all frags from the same physical page in one slot */
1110 if (page_info->page_offset == 0) {
1111 /* Fresh page */
1112 j++;
b061b39e 1113 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1114 skb_shinfo(skb)->frags[j].page_offset =
1115 page_info->page_offset;
9e903e08 1116 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1117 skb_shinfo(skb)->nr_frags++;
1118 } else {
1119 put_page(page_info->page);
1120 }
1121
9e903e08 1122 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1123 skb->len += curr_frag_len;
1124 skb->data_len += curr_frag_len;
bdb28a97 1125 skb->truesize += rx_frag_size;
2e588f84
SP
1126 remaining -= curr_frag_len;
1127 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1128 page_info->page = NULL;
6b7c5b94 1129 }
bd46cb6c 1130 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1131}
1132
5be93b9a 1133/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1134static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1135 struct be_rx_obj *rxo,
2e588f84 1136 struct be_rx_compl_info *rxcp)
6b7c5b94 1137{
6332c8d3 1138 struct net_device *netdev = adapter->netdev;
6b7c5b94 1139 struct sk_buff *skb;
89420424 1140
6332c8d3 1141 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1142 if (unlikely(!skb)) {
ac124ff9 1143 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1144 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1145 return;
1146 }
1147
2e588f84 1148 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1149
6332c8d3 1150 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1151 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1152 else
1153 skb_checksum_none_assert(skb);
6b7c5b94 1154
6332c8d3 1155 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1156 if (adapter->netdev->features & NETIF_F_RXHASH)
1157 skb->rxhash = rxcp->rss_hash;
1158
6b7c5b94 1159
343e43c0 1160 if (rxcp->vlanf)
4c5102f9
AK
1161 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1162
1163 netif_receive_skb(skb);
6b7c5b94
SP
1164}
1165
5be93b9a
AK
1166/* Process the RX completion indicated by rxcp when GRO is enabled */
1167static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1168 struct be_rx_obj *rxo,
2e588f84 1169 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1170{
1171 struct be_rx_page_info *page_info;
5be93b9a 1172 struct sk_buff *skb = NULL;
3abcdeda
SP
1173 struct be_queue_info *rxq = &rxo->q;
1174 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1175 u16 remaining, curr_frag_len;
1176 u16 i, j;
3968fa1e 1177
5be93b9a
AK
1178 skb = napi_get_frags(&eq_obj->napi);
1179 if (!skb) {
3abcdeda 1180 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1181 return;
1182 }
1183
2e588f84
SP
1184 remaining = rxcp->pkt_size;
1185 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1186 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1187
1188 curr_frag_len = min(remaining, rx_frag_size);
1189
bd46cb6c
AK
1190 /* Coalesce all frags from the same physical page in one slot */
1191 if (i == 0 || page_info->page_offset == 0) {
1192 /* First frag or Fresh page */
1193 j++;
b061b39e 1194 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1195 skb_shinfo(skb)->frags[j].page_offset =
1196 page_info->page_offset;
9e903e08 1197 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1198 } else {
1199 put_page(page_info->page);
1200 }
9e903e08 1201 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1202 skb->truesize += rx_frag_size;
bd46cb6c 1203 remaining -= curr_frag_len;
2e588f84 1204 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1205 memset(page_info, 0, sizeof(*page_info));
1206 }
bd46cb6c 1207 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1208
5be93b9a 1209 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1210 skb->len = rxcp->pkt_size;
1211 skb->data_len = rxcp->pkt_size;
5be93b9a 1212 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1213 if (adapter->netdev->features & NETIF_F_RXHASH)
1214 skb->rxhash = rxcp->rss_hash;
5be93b9a 1215
343e43c0 1216 if (rxcp->vlanf)
4c5102f9
AK
1217 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1218
1219 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1220}
1221
1222static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1223 struct be_eth_rx_compl *compl,
1224 struct be_rx_compl_info *rxcp)
1225{
1226 rxcp->pkt_size =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1228 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1229 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1230 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1231 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1232 rxcp->ip_csum =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1234 rxcp->l4_csum =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1236 rxcp->ipv6 =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1238 rxcp->rxq_idx =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1240 rxcp->num_rcvd =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1242 rxcp->pkt_type =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1244 rxcp->rss_hash =
1245 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1246 if (rxcp->vlanf) {
1247 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1248 compl);
1249 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1250 compl);
15d72184 1251 }
12004ae9 1252 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1253}
1254
1255static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1256 struct be_eth_rx_compl *compl,
1257 struct be_rx_compl_info *rxcp)
1258{
1259 rxcp->pkt_size =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1261 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1262 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1263 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1264 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1265 rxcp->ip_csum =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1267 rxcp->l4_csum =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1269 rxcp->ipv6 =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1271 rxcp->rxq_idx =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1273 rxcp->num_rcvd =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1275 rxcp->pkt_type =
1276 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1277 rxcp->rss_hash =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1279 if (rxcp->vlanf) {
1280 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1281 compl);
1282 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1283 compl);
15d72184 1284 }
12004ae9 1285 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1286}
1287
1288static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1289{
1290 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1291 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1292 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1293
2e588f84
SP
1294 /* For checking the valid bit it is Ok to use either definition as the
1295 * valid bit is at the same position in both v0 and v1 Rx compl */
1296 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1297 return NULL;
6b7c5b94 1298
2e588f84
SP
1299 rmb();
1300 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1301
2e588f84
SP
1302 if (adapter->be3_native)
1303 be_parse_rx_compl_v1(adapter, compl, rxcp);
1304 else
1305 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1306
15d72184
SP
1307 if (rxcp->vlanf) {
1308 /* vlanf could be wrongly set in some cards.
1309 * ignore if vtm is not set */
752961a1 1310 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1311 rxcp->vlanf = 0;
6b7c5b94 1312
15d72184 1313 if (!lancer_chip(adapter))
3c709f8f 1314 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1315
939cf306 1316 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1317 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1318 rxcp->vlanf = 0;
1319 }
2e588f84
SP
1320
1321 /* As the compl has been parsed, reset it; we wont touch it again */
1322 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1323
3abcdeda 1324 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1325 return rxcp;
1326}
1327
1829b086 1328static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1329{
6b7c5b94 1330 u32 order = get_order(size);
1829b086 1331
6b7c5b94 1332 if (order > 0)
1829b086
ED
1333 gfp |= __GFP_COMP;
1334 return alloc_pages(gfp, order);
6b7c5b94
SP
1335}
1336
1337/*
1338 * Allocate a page, split it to fragments of size rx_frag_size and post as
1339 * receive buffers to BE
1340 */
1829b086 1341static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1342{
3abcdeda
SP
1343 struct be_adapter *adapter = rxo->adapter;
1344 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1345 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1346 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1347 struct page *pagep = NULL;
1348 struct be_eth_rx_d *rxd;
1349 u64 page_dmaaddr = 0, frag_dmaaddr;
1350 u32 posted, page_offset = 0;
1351
3abcdeda 1352 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1353 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1354 if (!pagep) {
1829b086 1355 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1356 if (unlikely(!pagep)) {
ac124ff9 1357 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1358 break;
1359 }
2b7bcebf
IV
1360 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1361 0, adapter->big_page_size,
1362 DMA_FROM_DEVICE);
6b7c5b94
SP
1363 page_info->page_offset = 0;
1364 } else {
1365 get_page(pagep);
1366 page_info->page_offset = page_offset + rx_frag_size;
1367 }
1368 page_offset = page_info->page_offset;
1369 page_info->page = pagep;
fac6da5b 1370 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1371 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1372
1373 rxd = queue_head_node(rxq);
1374 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1375 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1376
1377 /* Any space left in the current big page for another frag? */
1378 if ((page_offset + rx_frag_size + rx_frag_size) >
1379 adapter->big_page_size) {
1380 pagep = NULL;
1381 page_info->last_page_user = true;
1382 }
26d92f92
SP
1383
1384 prev_page_info = page_info;
1385 queue_head_inc(rxq);
6b7c5b94
SP
1386 page_info = &page_info_tbl[rxq->head];
1387 }
1388 if (pagep)
26d92f92 1389 prev_page_info->last_page_user = true;
6b7c5b94
SP
1390
1391 if (posted) {
6b7c5b94 1392 atomic_add(posted, &rxq->used);
8788fdc2 1393 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1394 } else if (atomic_read(&rxq->used) == 0) {
1395 /* Let be_worker replenish when memory is available */
3abcdeda 1396 rxo->rx_post_starved = true;
6b7c5b94 1397 }
6b7c5b94
SP
1398}
1399
5fb379ee 1400static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1401{
6b7c5b94
SP
1402 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1403
1404 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1405 return NULL;
1406
f3eb62d2 1407 rmb();
6b7c5b94
SP
1408 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1409
1410 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1411
1412 queue_tail_inc(tx_cq);
1413 return txcp;
1414}
1415
3c8def97
SP
1416static u16 be_tx_compl_process(struct be_adapter *adapter,
1417 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1418{
3c8def97 1419 struct be_queue_info *txq = &txo->q;
a73b796e 1420 struct be_eth_wrb *wrb;
3c8def97 1421 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1422 struct sk_buff *sent_skb;
ec43b1a6
SP
1423 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1424 bool unmap_skb_hdr = true;
6b7c5b94 1425
ec43b1a6 1426 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1427 BUG_ON(!sent_skb);
ec43b1a6
SP
1428 sent_skbs[txq->tail] = NULL;
1429
1430 /* skip header wrb */
a73b796e 1431 queue_tail_inc(txq);
6b7c5b94 1432
ec43b1a6 1433 do {
6b7c5b94 1434 cur_index = txq->tail;
a73b796e 1435 wrb = queue_tail_node(txq);
2b7bcebf
IV
1436 unmap_tx_frag(&adapter->pdev->dev, wrb,
1437 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1438 unmap_skb_hdr = false;
1439
6b7c5b94
SP
1440 num_wrbs++;
1441 queue_tail_inc(txq);
ec43b1a6 1442 } while (cur_index != last_index);
6b7c5b94 1443
6b7c5b94 1444 kfree_skb(sent_skb);
4d586b82 1445 return num_wrbs;
6b7c5b94
SP
1446}
1447
859b1e4e
SP
1448static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1449{
1450 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1451
1452 if (!eqe->evt)
1453 return NULL;
1454
f3eb62d2 1455 rmb();
859b1e4e
SP
1456 eqe->evt = le32_to_cpu(eqe->evt);
1457 queue_tail_inc(&eq_obj->q);
1458 return eqe;
1459}
1460
1461static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1462 struct be_eq_obj *eq_obj,
1463 bool rearm)
859b1e4e
SP
1464{
1465 struct be_eq_entry *eqe;
1466 u16 num = 0;
1467
1468 while ((eqe = event_get(eq_obj)) != NULL) {
1469 eqe->evt = 0;
1470 num++;
1471 }
1472
1473 /* Deal with any spurious interrupts that come
1474 * without events
1475 */
3c8def97
SP
1476 if (!num)
1477 rearm = true;
1478
1479 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1480 if (num)
1481 napi_schedule(&eq_obj->napi);
1482
1483 return num;
1484}
1485
1486/* Just read and notify events without processing them.
1487 * Used at the time of destroying event queues */
1488static void be_eq_clean(struct be_adapter *adapter,
1489 struct be_eq_obj *eq_obj)
1490{
1491 struct be_eq_entry *eqe;
1492 u16 num = 0;
1493
1494 while ((eqe = event_get(eq_obj)) != NULL) {
1495 eqe->evt = 0;
1496 num++;
1497 }
1498
1499 if (num)
1500 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1501}
1502
3abcdeda 1503static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1504{
1505 struct be_rx_page_info *page_info;
3abcdeda
SP
1506 struct be_queue_info *rxq = &rxo->q;
1507 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1508 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1509 u16 tail;
1510
1511 /* First cleanup pending rx completions */
3abcdeda
SP
1512 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1513 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1514 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1515 }
1516
1517 /* Then free posted rx buffer that were not used */
1518 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1519 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1520 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1521 put_page(page_info->page);
1522 memset(page_info, 0, sizeof(*page_info));
1523 }
1524 BUG_ON(atomic_read(&rxq->used));
482c9e79 1525 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1526}
1527
3c8def97
SP
1528static void be_tx_compl_clean(struct be_adapter *adapter,
1529 struct be_tx_obj *txo)
6b7c5b94 1530{
3c8def97
SP
1531 struct be_queue_info *tx_cq = &txo->cq;
1532 struct be_queue_info *txq = &txo->q;
a8e9179a 1533 struct be_eth_tx_compl *txcp;
4d586b82 1534 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1535 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1536 struct sk_buff *sent_skb;
1537 bool dummy_wrb;
a8e9179a
SP
1538
1539 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1540 do {
1541 while ((txcp = be_tx_compl_get(tx_cq))) {
1542 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1543 wrb_index, txcp);
3c8def97 1544 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1545 cmpl++;
1546 }
1547 if (cmpl) {
1548 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1549 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1550 cmpl = 0;
4d586b82 1551 num_wrbs = 0;
a8e9179a
SP
1552 }
1553
1554 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1555 break;
1556
1557 mdelay(1);
1558 } while (true);
1559
1560 if (atomic_read(&txq->used))
1561 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1562 atomic_read(&txq->used));
b03388d6
SP
1563
1564 /* free posted tx for which compls will never arrive */
1565 while (atomic_read(&txq->used)) {
1566 sent_skb = sent_skbs[txq->tail];
1567 end_idx = txq->tail;
1568 index_adv(&end_idx,
fe6d2a38
SP
1569 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1570 txq->len);
3c8def97 1571 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1572 atomic_sub(num_wrbs, &txq->used);
b03388d6 1573 }
6b7c5b94
SP
1574}
1575
5fb379ee
SP
1576static void be_mcc_queues_destroy(struct be_adapter *adapter)
1577{
1578 struct be_queue_info *q;
5fb379ee 1579
8788fdc2 1580 q = &adapter->mcc_obj.q;
5fb379ee 1581 if (q->created)
8788fdc2 1582 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1583 be_queue_free(adapter, q);
1584
8788fdc2 1585 q = &adapter->mcc_obj.cq;
5fb379ee 1586 if (q->created)
8788fdc2 1587 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1588 be_queue_free(adapter, q);
1589}
1590
1591/* Must be called only after TX qs are created as MCC shares TX EQ */
1592static int be_mcc_queues_create(struct be_adapter *adapter)
1593{
1594 struct be_queue_info *q, *cq;
5fb379ee
SP
1595
1596 /* Alloc MCC compl queue */
8788fdc2 1597 cq = &adapter->mcc_obj.cq;
5fb379ee 1598 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1599 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1600 goto err;
1601
1602 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1603 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1604 goto mcc_cq_free;
1605
1606 /* Alloc MCC queue */
8788fdc2 1607 q = &adapter->mcc_obj.q;
5fb379ee
SP
1608 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1609 goto mcc_cq_destroy;
1610
1611 /* Ask BE to create MCC queue */
8788fdc2 1612 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1613 goto mcc_q_free;
1614
1615 return 0;
1616
1617mcc_q_free:
1618 be_queue_free(adapter, q);
1619mcc_cq_destroy:
8788fdc2 1620 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1621mcc_cq_free:
1622 be_queue_free(adapter, cq);
1623err:
1624 return -1;
1625}
1626
6b7c5b94
SP
1627static void be_tx_queues_destroy(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q;
3c8def97
SP
1630 struct be_tx_obj *txo;
1631 u8 i;
6b7c5b94 1632
3c8def97
SP
1633 for_all_tx_queues(adapter, txo, i) {
1634 q = &txo->q;
1635 if (q->created)
1636 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1637 be_queue_free(adapter, q);
6b7c5b94 1638
3c8def97
SP
1639 q = &txo->cq;
1640 if (q->created)
1641 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1642 be_queue_free(adapter, q);
1643 }
6b7c5b94 1644
859b1e4e
SP
1645 /* Clear any residual events */
1646 be_eq_clean(adapter, &adapter->tx_eq);
1647
6b7c5b94
SP
1648 q = &adapter->tx_eq.q;
1649 if (q->created)
8788fdc2 1650 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1651 be_queue_free(adapter, q);
1652}
1653
dafc0fe3
SP
1654static int be_num_txqs_want(struct be_adapter *adapter)
1655{
1656 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1657 be_is_mc(adapter) ||
dafc0fe3
SP
1658 lancer_chip(adapter) || !be_physfn(adapter) ||
1659 adapter->generation == BE_GEN2)
1660 return 1;
1661 else
1662 return MAX_TX_QS;
1663}
1664
3c8def97 1665/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1666static int be_tx_queues_create(struct be_adapter *adapter)
1667{
1668 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1669 struct be_tx_obj *txo;
1670 u8 i;
6b7c5b94 1671
dafc0fe3 1672 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1673 if (adapter->num_tx_qs != MAX_TX_QS) {
1674 rtnl_lock();
dafc0fe3
SP
1675 netif_set_real_num_tx_queues(adapter->netdev,
1676 adapter->num_tx_qs);
3bb62f4f
PR
1677 rtnl_unlock();
1678 }
dafc0fe3 1679
6b7c5b94
SP
1680 adapter->tx_eq.max_eqd = 0;
1681 adapter->tx_eq.min_eqd = 0;
1682 adapter->tx_eq.cur_eqd = 96;
1683 adapter->tx_eq.enable_aic = false;
3c8def97 1684
6b7c5b94 1685 eq = &adapter->tx_eq.q;
3c8def97
SP
1686 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1687 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1688 return -1;
1689
8788fdc2 1690 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1691 goto err;
ecd62107 1692 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1693
3c8def97
SP
1694 for_all_tx_queues(adapter, txo, i) {
1695 cq = &txo->cq;
1696 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1697 sizeof(struct be_eth_tx_compl)))
3c8def97 1698 goto err;
6b7c5b94 1699
3c8def97
SP
1700 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1701 goto err;
6b7c5b94 1702
3c8def97
SP
1703 q = &txo->q;
1704 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1705 sizeof(struct be_eth_wrb)))
1706 goto err;
3c8def97 1707 }
6b7c5b94
SP
1708 return 0;
1709
3c8def97
SP
1710err:
1711 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1712 return -1;
1713}
1714
1715static void be_rx_queues_destroy(struct be_adapter *adapter)
1716{
1717 struct be_queue_info *q;
3abcdeda
SP
1718 struct be_rx_obj *rxo;
1719 int i;
1720
1721 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1722 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1723
1724 q = &rxo->cq;
1725 if (q->created)
1726 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1727 be_queue_free(adapter, q);
1728
3abcdeda 1729 q = &rxo->rx_eq.q;
482c9e79 1730 if (q->created)
3abcdeda 1731 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1732 be_queue_free(adapter, q);
6b7c5b94 1733 }
6b7c5b94
SP
1734}
1735
ac6a0c4a
SP
1736static u32 be_num_rxqs_want(struct be_adapter *adapter)
1737{
c814fd36 1738 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1739 !adapter->sriov_enabled && be_physfn(adapter) &&
1740 !be_is_mc(adapter)) {
ac6a0c4a
SP
1741 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1742 } else {
1743 dev_warn(&adapter->pdev->dev,
1744 "No support for multiple RX queues\n");
1745 return 1;
1746 }
1747}
1748
6b7c5b94
SP
1749static int be_rx_queues_create(struct be_adapter *adapter)
1750{
1751 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1752 struct be_rx_obj *rxo;
1753 int rc, i;
6b7c5b94 1754
ac6a0c4a
SP
1755 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1756 msix_enabled(adapter) ?
1757 adapter->num_msix_vec - 1 : 1);
1758 if (adapter->num_rx_qs != MAX_RX_QS)
1759 dev_warn(&adapter->pdev->dev,
1760 "Can create only %d RX queues", adapter->num_rx_qs);
1761
6b7c5b94 1762 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1763 for_all_rx_queues(adapter, rxo, i) {
1764 rxo->adapter = adapter;
1765 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1766 rxo->rx_eq.enable_aic = true;
1767
1768 /* EQ */
1769 eq = &rxo->rx_eq.q;
1770 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1771 sizeof(struct be_eq_entry));
1772 if (rc)
1773 goto err;
1774
1775 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1776 if (rc)
1777 goto err;
1778
ecd62107 1779 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1780
3abcdeda
SP
1781 /* CQ */
1782 cq = &rxo->cq;
1783 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1784 sizeof(struct be_eth_rx_compl));
1785 if (rc)
1786 goto err;
1787
1788 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1789 if (rc)
1790 goto err;
482c9e79
SP
1791
1792 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1793 q = &rxo->q;
1794 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1795 sizeof(struct be_eth_rx_d));
1796 if (rc)
1797 goto err;
1798
3abcdeda 1799 }
6b7c5b94
SP
1800
1801 return 0;
3abcdeda
SP
1802err:
1803 be_rx_queues_destroy(adapter);
1804 return -1;
6b7c5b94 1805}
6b7c5b94 1806
fe6d2a38 1807static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1808{
fe6d2a38
SP
1809 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1810 if (!eqe->evt)
1811 return false;
1812 else
1813 return true;
b628bde2
SP
1814}
1815
6b7c5b94
SP
1816static irqreturn_t be_intx(int irq, void *dev)
1817{
1818 struct be_adapter *adapter = dev;
3abcdeda 1819 struct be_rx_obj *rxo;
fe6d2a38 1820 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1821
fe6d2a38
SP
1822 if (lancer_chip(adapter)) {
1823 if (event_peek(&adapter->tx_eq))
3c8def97 1824 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1825 for_all_rx_queues(adapter, rxo, i) {
1826 if (event_peek(&rxo->rx_eq))
3c8def97 1827 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1828 }
6b7c5b94 1829
fe6d2a38
SP
1830 if (!(tx || rx))
1831 return IRQ_NONE;
3abcdeda 1832
fe6d2a38
SP
1833 } else {
1834 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1835 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1836 if (!isr)
1837 return IRQ_NONE;
1838
ecd62107 1839 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1840 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1841
1842 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1843 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1844 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1845 }
3abcdeda 1846 }
c001c213 1847
8788fdc2 1848 return IRQ_HANDLED;
6b7c5b94
SP
1849}
1850
1851static irqreturn_t be_msix_rx(int irq, void *dev)
1852{
3abcdeda
SP
1853 struct be_rx_obj *rxo = dev;
1854 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1855
3c8def97 1856 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1857
1858 return IRQ_HANDLED;
1859}
1860
5fb379ee 1861static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1862{
1863 struct be_adapter *adapter = dev;
1864
3c8def97 1865 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1866
1867 return IRQ_HANDLED;
1868}
1869
2e588f84 1870static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1871{
2e588f84 1872 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1873}
1874
49b05221 1875static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1876{
1877 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1878 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1879 struct be_adapter *adapter = rxo->adapter;
1880 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1881 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1882 u32 work_done;
1883
ac124ff9 1884 rx_stats(rxo)->rx_polls++;
6b7c5b94 1885 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1886 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1887 if (!rxcp)
1888 break;
1889
12004ae9
SP
1890 /* Is it a flush compl that has no data */
1891 if (unlikely(rxcp->num_rcvd == 0))
1892 goto loop_continue;
1893
1894 /* Discard compl with partial DMA Lancer B0 */
1895 if (unlikely(!rxcp->pkt_size)) {
1896 be_rx_compl_discard(adapter, rxo, rxcp);
1897 goto loop_continue;
1898 }
1899
1900 /* On BE drop pkts that arrive due to imperfect filtering in
1901 * promiscuous mode on some skews
1902 */
1903 if (unlikely(rxcp->port != adapter->port_num &&
1904 !lancer_chip(adapter))) {
009dd872 1905 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1906 goto loop_continue;
64642811 1907 }
009dd872 1908
12004ae9
SP
1909 if (do_gro(rxcp))
1910 be_rx_compl_process_gro(adapter, rxo, rxcp);
1911 else
1912 be_rx_compl_process(adapter, rxo, rxcp);
1913loop_continue:
2e588f84 1914 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1915 }
1916
9372cacb
PR
1917 be_cq_notify(adapter, rx_cq->id, false, work_done);
1918
6b7c5b94 1919 /* Refill the queue */
857c9905 1920 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1921 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1922
1923 /* All consumed */
1924 if (work_done < budget) {
1925 napi_complete(napi);
9372cacb
PR
1926 /* Arm CQ */
1927 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1928 }
1929 return work_done;
1930}
1931
f31e50a8
SP
1932/* As TX and MCC share the same EQ check for both TX and MCC completions.
1933 * For TX/MCC we don't honour budget; consume everything
1934 */
1935static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1936{
f31e50a8
SP
1937 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1938 struct be_adapter *adapter =
1939 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1940 struct be_tx_obj *txo;
6b7c5b94 1941 struct be_eth_tx_compl *txcp;
3c8def97
SP
1942 int tx_compl, mcc_compl, status = 0;
1943 u8 i;
1944 u16 num_wrbs;
1945
1946 for_all_tx_queues(adapter, txo, i) {
1947 tx_compl = 0;
1948 num_wrbs = 0;
1949 while ((txcp = be_tx_compl_get(&txo->cq))) {
1950 num_wrbs += be_tx_compl_process(adapter, txo,
1951 AMAP_GET_BITS(struct amap_eth_tx_compl,
1952 wrb_index, txcp));
1953 tx_compl++;
1954 }
1955 if (tx_compl) {
1956 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1957
1958 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1959
3c8def97
SP
1960 /* As Tx wrbs have been freed up, wake up netdev queue
1961 * if it was stopped due to lack of tx wrbs. */
1962 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1963 atomic_read(&txo->q.used) < txo->q.len / 2) {
1964 netif_wake_subqueue(adapter->netdev, i);
1965 }
1966
ab1594e9 1967 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1968 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1969 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1970 }
6b7c5b94
SP
1971 }
1972
f31e50a8
SP
1973 mcc_compl = be_process_mcc(adapter, &status);
1974
f31e50a8
SP
1975 if (mcc_compl) {
1976 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1977 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1978 }
1979
3c8def97 1980 napi_complete(napi);
6b7c5b94 1981
3c8def97 1982 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1983 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1984 return 1;
1985}
1986
d053de91 1987void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1988{
e1cfb67a
PR
1989 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1990 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1991 u32 i;
1992
72f02485
SP
1993 if (adapter->eeh_err || adapter->ue_detected)
1994 return;
1995
e1cfb67a
PR
1996 if (lancer_chip(adapter)) {
1997 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1998 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1999 sliport_err1 = ioread32(adapter->db +
2000 SLIPORT_ERROR1_OFFSET);
2001 sliport_err2 = ioread32(adapter->db +
2002 SLIPORT_ERROR2_OFFSET);
2003 }
2004 } else {
2005 pci_read_config_dword(adapter->pdev,
2006 PCICFG_UE_STATUS_LOW, &ue_lo);
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_HIGH, &ue_hi);
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2013
2014 ue_lo = (ue_lo & (~ue_lo_mask));
2015 ue_hi = (ue_hi & (~ue_hi_mask));
2016 }
7c185276 2017
e1cfb67a
PR
2018 if (ue_lo || ue_hi ||
2019 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2020 adapter->ue_detected = true;
7acc2087 2021 adapter->eeh_err = true;
434b3648
SP
2022 dev_err(&adapter->pdev->dev,
2023 "Unrecoverable error in the card\n");
d053de91
AK
2024 }
2025
e1cfb67a
PR
2026 if (ue_lo) {
2027 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2028 if (ue_lo & 1)
7c185276
AK
2029 dev_err(&adapter->pdev->dev,
2030 "UE: %s bit set\n", ue_status_low_desc[i]);
2031 }
2032 }
e1cfb67a
PR
2033 if (ue_hi) {
2034 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2035 if (ue_hi & 1)
7c185276
AK
2036 dev_err(&adapter->pdev->dev,
2037 "UE: %s bit set\n", ue_status_hi_desc[i]);
2038 }
2039 }
2040
e1cfb67a
PR
2041 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2042 dev_err(&adapter->pdev->dev,
2043 "sliport status 0x%x\n", sliport_status);
2044 dev_err(&adapter->pdev->dev,
2045 "sliport error1 0x%x\n", sliport_err1);
2046 dev_err(&adapter->pdev->dev,
2047 "sliport error2 0x%x\n", sliport_err2);
2048 }
7c185276
AK
2049}
2050
8d56ff11
SP
2051static void be_msix_disable(struct be_adapter *adapter)
2052{
ac6a0c4a 2053 if (msix_enabled(adapter)) {
8d56ff11 2054 pci_disable_msix(adapter->pdev);
ac6a0c4a 2055 adapter->num_msix_vec = 0;
3abcdeda
SP
2056 }
2057}
2058
6b7c5b94
SP
2059static void be_msix_enable(struct be_adapter *adapter)
2060{
3abcdeda 2061#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2062 int i, status, num_vec;
6b7c5b94 2063
ac6a0c4a 2064 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2065
ac6a0c4a 2066 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2067 adapter->msix_entries[i].entry = i;
2068
ac6a0c4a 2069 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2070 if (status == 0) {
2071 goto done;
2072 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2073 num_vec = status;
3abcdeda 2074 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2075 num_vec) == 0)
3abcdeda 2076 goto done;
3abcdeda
SP
2077 }
2078 return;
2079done:
ac6a0c4a
SP
2080 adapter->num_msix_vec = num_vec;
2081 return;
6b7c5b94
SP
2082}
2083
f9449ab7 2084static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2085{
344dbf10 2086 be_check_sriov_fn_type(adapter);
6dedec81 2087#ifdef CONFIG_PCI_IOV
ba343c77 2088 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2089 int status, pos;
2090 u16 nvfs;
2091
2092 pos = pci_find_ext_capability(adapter->pdev,
2093 PCI_EXT_CAP_ID_SRIOV);
2094 pci_read_config_word(adapter->pdev,
2095 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2096
2097 if (num_vfs > nvfs) {
2098 dev_info(&adapter->pdev->dev,
2099 "Device supports %d VFs and not %d\n",
2100 nvfs, num_vfs);
2101 num_vfs = nvfs;
2102 }
6dedec81 2103
ba343c77
SB
2104 status = pci_enable_sriov(adapter->pdev, num_vfs);
2105 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2106
2107 if (adapter->sriov_enabled) {
2108 adapter->vf_cfg = kcalloc(num_vfs,
2109 sizeof(struct be_vf_cfg),
2110 GFP_KERNEL);
2111 if (!adapter->vf_cfg)
2112 return -ENOMEM;
2113 }
ba343c77
SB
2114 }
2115#endif
f9449ab7 2116 return 0;
ba343c77
SB
2117}
2118
2119static void be_sriov_disable(struct be_adapter *adapter)
2120{
2121#ifdef CONFIG_PCI_IOV
2122 if (adapter->sriov_enabled) {
2123 pci_disable_sriov(adapter->pdev);
f9449ab7 2124 kfree(adapter->vf_cfg);
ba343c77
SB
2125 adapter->sriov_enabled = false;
2126 }
2127#endif
2128}
2129
fe6d2a38
SP
2130static inline int be_msix_vec_get(struct be_adapter *adapter,
2131 struct be_eq_obj *eq_obj)
6b7c5b94 2132{
ecd62107 2133 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2134}
2135
b628bde2
SP
2136static int be_request_irq(struct be_adapter *adapter,
2137 struct be_eq_obj *eq_obj,
3abcdeda 2138 void *handler, char *desc, void *context)
6b7c5b94
SP
2139{
2140 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2141 int vec;
2142
2143 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2144 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2145 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2146}
2147
3abcdeda
SP
2148static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2149 void *context)
b628bde2 2150{
fe6d2a38 2151 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2152 free_irq(vec, context);
b628bde2 2153}
6b7c5b94 2154
b628bde2
SP
2155static int be_msix_register(struct be_adapter *adapter)
2156{
3abcdeda
SP
2157 struct be_rx_obj *rxo;
2158 int status, i;
2159 char qname[10];
b628bde2 2160
3abcdeda
SP
2161 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2162 adapter);
6b7c5b94
SP
2163 if (status)
2164 goto err;
2165
3abcdeda
SP
2166 for_all_rx_queues(adapter, rxo, i) {
2167 sprintf(qname, "rxq%d", i);
2168 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2169 qname, rxo);
2170 if (status)
2171 goto err_msix;
2172 }
b628bde2 2173
6b7c5b94 2174 return 0;
b628bde2 2175
3abcdeda
SP
2176err_msix:
2177 be_free_irq(adapter, &adapter->tx_eq, adapter);
2178
2179 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2180 be_free_irq(adapter, &rxo->rx_eq, rxo);
2181
6b7c5b94
SP
2182err:
2183 dev_warn(&adapter->pdev->dev,
2184 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2185 be_msix_disable(adapter);
6b7c5b94
SP
2186 return status;
2187}
2188
2189static int be_irq_register(struct be_adapter *adapter)
2190{
2191 struct net_device *netdev = adapter->netdev;
2192 int status;
2193
ac6a0c4a 2194 if (msix_enabled(adapter)) {
6b7c5b94
SP
2195 status = be_msix_register(adapter);
2196 if (status == 0)
2197 goto done;
ba343c77
SB
2198 /* INTx is not supported for VF */
2199 if (!be_physfn(adapter))
2200 return status;
6b7c5b94
SP
2201 }
2202
2203 /* INTx */
2204 netdev->irq = adapter->pdev->irq;
2205 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2206 adapter);
2207 if (status) {
2208 dev_err(&adapter->pdev->dev,
2209 "INTx request IRQ failed - err %d\n", status);
2210 return status;
2211 }
2212done:
2213 adapter->isr_registered = true;
2214 return 0;
2215}
2216
2217static void be_irq_unregister(struct be_adapter *adapter)
2218{
2219 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2220 struct be_rx_obj *rxo;
2221 int i;
6b7c5b94
SP
2222
2223 if (!adapter->isr_registered)
2224 return;
2225
2226 /* INTx */
ac6a0c4a 2227 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2228 free_irq(netdev->irq, adapter);
2229 goto done;
2230 }
2231
2232 /* MSIx */
3abcdeda
SP
2233 be_free_irq(adapter, &adapter->tx_eq, adapter);
2234
2235 for_all_rx_queues(adapter, rxo, i)
2236 be_free_irq(adapter, &rxo->rx_eq, rxo);
2237
6b7c5b94
SP
2238done:
2239 adapter->isr_registered = false;
6b7c5b94
SP
2240}
2241
482c9e79
SP
2242static void be_rx_queues_clear(struct be_adapter *adapter)
2243{
2244 struct be_queue_info *q;
2245 struct be_rx_obj *rxo;
2246 int i;
2247
2248 for_all_rx_queues(adapter, rxo, i) {
2249 q = &rxo->q;
2250 if (q->created) {
2251 be_cmd_rxq_destroy(adapter, q);
2252 /* After the rxq is invalidated, wait for a grace time
2253 * of 1ms for all dma to end and the flush compl to
2254 * arrive
2255 */
2256 mdelay(1);
2257 be_rx_q_clean(adapter, rxo);
2258 }
2259
2260 /* Clear any residual events */
2261 q = &rxo->rx_eq.q;
2262 if (q->created)
2263 be_eq_clean(adapter, &rxo->rx_eq);
2264 }
2265}
2266
889cd4b2
SP
2267static int be_close(struct net_device *netdev)
2268{
2269 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2270 struct be_rx_obj *rxo;
3c8def97 2271 struct be_tx_obj *txo;
889cd4b2 2272 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2273 int vec, i;
889cd4b2 2274
889cd4b2
SP
2275 be_async_mcc_disable(adapter);
2276
fe6d2a38
SP
2277 if (!lancer_chip(adapter))
2278 be_intr_set(adapter, false);
889cd4b2 2279
63fcb27f
PR
2280 for_all_rx_queues(adapter, rxo, i)
2281 napi_disable(&rxo->rx_eq.napi);
2282
2283 napi_disable(&tx_eq->napi);
2284
2285 if (lancer_chip(adapter)) {
63fcb27f
PR
2286 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2287 for_all_rx_queues(adapter, rxo, i)
2288 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2289 for_all_tx_queues(adapter, txo, i)
2290 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2291 }
2292
ac6a0c4a 2293 if (msix_enabled(adapter)) {
fe6d2a38 2294 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2295 synchronize_irq(vec);
3abcdeda
SP
2296
2297 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2298 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2299 synchronize_irq(vec);
2300 }
889cd4b2
SP
2301 } else {
2302 synchronize_irq(netdev->irq);
2303 }
2304 be_irq_unregister(adapter);
2305
889cd4b2
SP
2306 /* Wait for all pending tx completions to arrive so that
2307 * all tx skbs are freed.
2308 */
3c8def97
SP
2309 for_all_tx_queues(adapter, txo, i)
2310 be_tx_compl_clean(adapter, txo);
889cd4b2 2311
482c9e79
SP
2312 be_rx_queues_clear(adapter);
2313 return 0;
2314}
2315
2316static int be_rx_queues_setup(struct be_adapter *adapter)
2317{
2318 struct be_rx_obj *rxo;
e9008ee9
PR
2319 int rc, i, j;
2320 u8 rsstable[128];
482c9e79
SP
2321
2322 for_all_rx_queues(adapter, rxo, i) {
2323 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2324 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2325 adapter->if_handle,
2326 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2327 if (rc)
2328 return rc;
2329 }
2330
2331 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2332 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2333 for_all_rss_queues(adapter, rxo, i) {
2334 if ((j + i) >= 128)
2335 break;
2336 rsstable[j + i] = rxo->rss_id;
2337 }
2338 }
2339 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79 2340
482c9e79
SP
2341 if (rc)
2342 return rc;
2343 }
2344
2345 /* First time posting */
2346 for_all_rx_queues(adapter, rxo, i) {
2347 be_post_rx_frags(rxo, GFP_KERNEL);
2348 napi_enable(&rxo->rx_eq.napi);
2349 }
889cd4b2
SP
2350 return 0;
2351}
2352
6b7c5b94
SP
2353static int be_open(struct net_device *netdev)
2354{
2355 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2356 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2357 struct be_rx_obj *rxo;
3abcdeda 2358 int status, i;
5fb379ee 2359
482c9e79
SP
2360 status = be_rx_queues_setup(adapter);
2361 if (status)
2362 goto err;
2363
5fb379ee
SP
2364 napi_enable(&tx_eq->napi);
2365
2366 be_irq_register(adapter);
2367
fe6d2a38
SP
2368 if (!lancer_chip(adapter))
2369 be_intr_set(adapter, true);
5fb379ee
SP
2370
2371 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2372 for_all_rx_queues(adapter, rxo, i) {
2373 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2374 be_cq_notify(adapter, rxo->cq.id, true, 0);
2375 }
8788fdc2 2376 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2377
7a1e9b20
SP
2378 /* Now that interrupts are on we can process async mcc */
2379 be_async_mcc_enable(adapter);
2380
889cd4b2
SP
2381 return 0;
2382err:
2383 be_close(adapter->netdev);
2384 return -EIO;
5fb379ee
SP
2385}
2386
71d8d1b5
AK
2387static int be_setup_wol(struct be_adapter *adapter, bool enable)
2388{
2389 struct be_dma_mem cmd;
2390 int status = 0;
2391 u8 mac[ETH_ALEN];
2392
2393 memset(mac, 0, ETH_ALEN);
2394
2395 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2396 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2397 GFP_KERNEL);
71d8d1b5
AK
2398 if (cmd.va == NULL)
2399 return -1;
2400 memset(cmd.va, 0, cmd.size);
2401
2402 if (enable) {
2403 status = pci_write_config_dword(adapter->pdev,
2404 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2405 if (status) {
2406 dev_err(&adapter->pdev->dev,
2381a55c 2407 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2408 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2409 cmd.dma);
71d8d1b5
AK
2410 return status;
2411 }
2412 status = be_cmd_enable_magic_wol(adapter,
2413 adapter->netdev->dev_addr, &cmd);
2414 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2415 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2416 } else {
2417 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2418 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2419 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2420 }
2421
2b7bcebf 2422 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2423 return status;
2424}
2425
6d87f5c3
AK
2426/*
2427 * Generate a seed MAC address from the PF MAC Address using jhash.
2428 * MAC Address for VFs are assigned incrementally starting from the seed.
2429 * These addresses are programmed in the ASIC by the PF and the VF driver
2430 * queries for the MAC address during its probe.
2431 */
2432static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2433{
f9449ab7 2434 u32 vf;
3abcdeda 2435 int status = 0;
6d87f5c3
AK
2436 u8 mac[ETH_ALEN];
2437
2438 be_vf_eth_addr_generate(adapter, mac);
2439
2440 for (vf = 0; vf < num_vfs; vf++) {
590c391d
PR
2441 if (lancer_chip(adapter)) {
2442 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2443 } else {
2444 status = be_cmd_pmac_add(adapter, mac,
6d87f5c3 2445 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2446 &adapter->vf_cfg[vf].vf_pmac_id,
2447 vf + 1);
590c391d
PR
2448 }
2449
6d87f5c3
AK
2450 if (status)
2451 dev_err(&adapter->pdev->dev,
590c391d 2452 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3
AK
2453 else
2454 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2455
2456 mac[5] += 1;
2457 }
2458 return status;
2459}
2460
f9449ab7 2461static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2462{
2463 u32 vf;
2464
590c391d
PR
2465 for (vf = 0; vf < num_vfs; vf++) {
2466 if (lancer_chip(adapter))
2467 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2468 else
2469 be_cmd_pmac_del(adapter,
2470 adapter->vf_cfg[vf].vf_if_handle,
2471 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2472 }
f9449ab7
SP
2473
2474 for (vf = 0; vf < num_vfs; vf++)
30128031
SP
2475 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2476 vf + 1);
6d87f5c3
AK
2477}
2478
a54769f5
SP
2479static int be_clear(struct be_adapter *adapter)
2480{
a54769f5 2481 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2482 be_vf_clear(adapter);
2483
2484 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2485
2486 be_mcc_queues_destroy(adapter);
2487 be_rx_queues_destroy(adapter);
2488 be_tx_queues_destroy(adapter);
a54769f5
SP
2489
2490 /* tell fw we're done with firing cmds */
2491 be_cmd_fw_clean(adapter);
2492 return 0;
2493}
2494
30128031
SP
2495static void be_vf_setup_init(struct be_adapter *adapter)
2496{
2497 int vf;
2498
2499 for (vf = 0; vf < num_vfs; vf++) {
2500 adapter->vf_cfg[vf].vf_if_handle = -1;
2501 adapter->vf_cfg[vf].vf_pmac_id = -1;
2502 }
2503}
2504
f9449ab7
SP
2505static int be_vf_setup(struct be_adapter *adapter)
2506{
2507 u32 cap_flags, en_flags, vf;
2508 u16 lnk_speed;
2509 int status;
2510
30128031
SP
2511 be_vf_setup_init(adapter);
2512
590c391d
PR
2513 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2514 BE_IF_FLAGS_MULTICAST;
2515
f9449ab7
SP
2516 for (vf = 0; vf < num_vfs; vf++) {
2517 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2518 &adapter->vf_cfg[vf].vf_if_handle,
2519 NULL, vf+1);
2520 if (status)
2521 goto err;
f9449ab7
SP
2522 }
2523
590c391d
PR
2524 status = be_vf_eth_addr_config(adapter);
2525 if (status)
2526 goto err;
f9449ab7
SP
2527
2528 for (vf = 0; vf < num_vfs; vf++) {
2529 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2530 vf + 1);
2531 if (status)
2532 goto err;
2533 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2534 }
2535 return 0;
2536err:
2537 return status;
2538}
2539
30128031
SP
2540static void be_setup_init(struct be_adapter *adapter)
2541{
2542 adapter->vlan_prio_bmap = 0xff;
2543 adapter->link_speed = -1;
2544 adapter->if_handle = -1;
2545 adapter->be3_native = false;
2546 adapter->promiscuous = false;
2547 adapter->eq_next_idx = 0;
2548}
2549
590c391d
PR
2550static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2551{
2552 u32 pmac_id;
2553 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2554 if (status != 0)
2555 goto do_none;
2556 status = be_cmd_mac_addr_query(adapter, mac,
2557 MAC_ADDRESS_TYPE_NETWORK,
2558 false, adapter->if_handle, pmac_id);
2559 if (status != 0)
2560 goto do_none;
2561 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2562 &adapter->pmac_id, 0);
2563do_none:
2564 return status;
2565}
2566
5fb379ee
SP
2567static int be_setup(struct be_adapter *adapter)
2568{
5fb379ee 2569 struct net_device *netdev = adapter->netdev;
f9449ab7 2570 u32 cap_flags, en_flags;
a54769f5 2571 u32 tx_fc, rx_fc;
293c4a7d 2572 int status, i;
ba343c77 2573 u8 mac[ETH_ALEN];
293c4a7d 2574 struct be_tx_obj *txo;
ba343c77 2575
30128031 2576 be_setup_init(adapter);
6b7c5b94 2577
f9449ab7 2578 be_cmd_req_native_mode(adapter);
73d540f2 2579
f9449ab7 2580 status = be_tx_queues_create(adapter);
6b7c5b94 2581 if (status != 0)
a54769f5 2582 goto err;
6b7c5b94 2583
f9449ab7 2584 status = be_rx_queues_create(adapter);
6b7c5b94 2585 if (status != 0)
a54769f5 2586 goto err;
6b7c5b94 2587
f9449ab7 2588 status = be_mcc_queues_create(adapter);
6b7c5b94 2589 if (status != 0)
a54769f5 2590 goto err;
6b7c5b94 2591
f9449ab7
SP
2592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2594 true /*permanent */, 0, 0);
f9449ab7
SP
2595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2599
f9449ab7
SP
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
f9449ab7
SP
2605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
5fb379ee 2612 if (status != 0)
a54769f5 2613 goto err;
6b7c5b94 2614
293c4a7d
PR
2615 for_all_tx_queues(adapter, txo, i) {
2616 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2617 if (status)
2618 goto err;
2619 }
2620
590c391d
PR
2621 /* The VF's permanent mac queried from card is incorrect.
2622 * For BEx: Query the mac configued by the PF using if_handle
2623 * For Lancer: Get and use mac_list to obtain mac address.
2624 */
2625 if (!be_physfn(adapter)) {
2626 if (lancer_chip(adapter))
2627 status = be_configure_mac_from_list(adapter, mac);
2628 else
2629 status = be_cmd_mac_addr_query(adapter, mac,
2630 MAC_ADDRESS_TYPE_NETWORK, false,
2631 adapter->if_handle, 0);
f9449ab7
SP
2632 if (!status) {
2633 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2634 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2635 }
2636 }
0dffc83e 2637
04b71175 2638 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2639
a54769f5
SP
2640 status = be_vid_config(adapter, false, 0);
2641 if (status)
2642 goto err;
7ab8b0b4 2643
a54769f5 2644 be_set_rx_mode(adapter->netdev);
5fb379ee 2645
a54769f5 2646 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2647 /* For Lancer: It is legal for this cmd to fail on VF */
2648 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2649 goto err;
590c391d 2650
a54769f5
SP
2651 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2652 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2653 adapter->rx_fc);
590c391d
PR
2654 /* For Lancer: It is legal for this cmd to fail on VF */
2655 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2656 goto err;
2657 }
2dc1deb6 2658
a54769f5 2659 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2660
f9449ab7
SP
2661 if (be_physfn(adapter) && adapter->sriov_enabled) {
2662 status = be_vf_setup(adapter);
2663 if (status)
2664 goto err;
2665 }
2666
2667 return 0;
a54769f5
SP
2668err:
2669 be_clear(adapter);
2670 return status;
2671}
6b7c5b94 2672
66268739
IV
2673#ifdef CONFIG_NET_POLL_CONTROLLER
2674static void be_netpoll(struct net_device *netdev)
2675{
2676 struct be_adapter *adapter = netdev_priv(netdev);
2677 struct be_rx_obj *rxo;
2678 int i;
2679
2680 event_handle(adapter, &adapter->tx_eq, false);
2681 for_all_rx_queues(adapter, rxo, i)
2682 event_handle(adapter, &rxo->rx_eq, true);
2683}
2684#endif
2685
84517482 2686#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2687static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2688 const u8 *p, u32 img_start, int image_size,
2689 int hdr_size)
fa9a6fed
SB
2690{
2691 u32 crc_offset;
2692 u8 flashed_crc[4];
2693 int status;
3f0d4560
AK
2694
2695 crc_offset = hdr_size + img_start + image_size - 4;
2696
fa9a6fed 2697 p += crc_offset;
3f0d4560
AK
2698
2699 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2700 (image_size - 4));
fa9a6fed
SB
2701 if (status) {
2702 dev_err(&adapter->pdev->dev,
2703 "could not get crc from flash, not flashing redboot\n");
2704 return false;
2705 }
2706
2707 /*update redboot only if crc does not match*/
2708 if (!memcmp(flashed_crc, p, 4))
2709 return false;
2710 else
2711 return true;
fa9a6fed
SB
2712}
2713
306f1348
SP
2714static bool phy_flashing_required(struct be_adapter *adapter)
2715{
2716 int status = 0;
2717 struct be_phy_info phy_info;
2718
2719 status = be_cmd_get_phy_info(adapter, &phy_info);
2720 if (status)
2721 return false;
2722 if ((phy_info.phy_type == TN_8022) &&
2723 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2724 return true;
2725 }
2726 return false;
2727}
2728
3f0d4560 2729static int be_flash_data(struct be_adapter *adapter,
84517482 2730 const struct firmware *fw,
3f0d4560
AK
2731 struct be_dma_mem *flash_cmd, int num_of_images)
2732
84517482 2733{
3f0d4560
AK
2734 int status = 0, i, filehdr_size = 0;
2735 u32 total_bytes = 0, flash_op;
84517482
AK
2736 int num_bytes;
2737 const u8 *p = fw->data;
2738 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2739 const struct flash_comp *pflashcomp;
9fe96934 2740 int num_comp;
3f0d4560 2741
306f1348 2742 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2743 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2744 FLASH_IMAGE_MAX_SIZE_g3},
2745 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2746 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2747 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2748 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2749 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2750 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2751 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2752 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2754 FLASH_IMAGE_MAX_SIZE_g3},
2755 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2756 FLASH_IMAGE_MAX_SIZE_g3},
2757 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2758 FLASH_IMAGE_MAX_SIZE_g3},
2759 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2760 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2761 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2762 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2763 };
215faf9c 2764 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2765 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2766 FLASH_IMAGE_MAX_SIZE_g2},
2767 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2768 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2769 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2770 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2771 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2772 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2773 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2774 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2776 FLASH_IMAGE_MAX_SIZE_g2},
2777 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2778 FLASH_IMAGE_MAX_SIZE_g2},
2779 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2780 FLASH_IMAGE_MAX_SIZE_g2}
2781 };
2782
2783 if (adapter->generation == BE_GEN3) {
2784 pflashcomp = gen3_flash_types;
2785 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2786 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2787 } else {
2788 pflashcomp = gen2_flash_types;
2789 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2790 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2791 }
9fe96934
SB
2792 for (i = 0; i < num_comp; i++) {
2793 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2794 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2795 continue;
306f1348
SP
2796 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2797 if (!phy_flashing_required(adapter))
2798 continue;
2799 }
3f0d4560
AK
2800 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2801 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2802 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2803 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2804 continue;
2805 p = fw->data;
2806 p += filehdr_size + pflashcomp[i].offset
2807 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2808 if (p + pflashcomp[i].size > fw->data + fw->size)
2809 return -1;
2810 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2811 while (total_bytes) {
2812 if (total_bytes > 32*1024)
2813 num_bytes = 32*1024;
2814 else
2815 num_bytes = total_bytes;
2816 total_bytes -= num_bytes;
306f1348
SP
2817 if (!total_bytes) {
2818 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2819 flash_op = FLASHROM_OPER_PHY_FLASH;
2820 else
2821 flash_op = FLASHROM_OPER_FLASH;
2822 } else {
2823 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2824 flash_op = FLASHROM_OPER_PHY_SAVE;
2825 else
2826 flash_op = FLASHROM_OPER_SAVE;
2827 }
3f0d4560
AK
2828 memcpy(req->params.data_buf, p, num_bytes);
2829 p += num_bytes;
2830 status = be_cmd_write_flashrom(adapter, flash_cmd,
2831 pflashcomp[i].optype, flash_op, num_bytes);
2832 if (status) {
306f1348
SP
2833 if ((status == ILLEGAL_IOCTL_REQ) &&
2834 (pflashcomp[i].optype ==
2835 IMG_TYPE_PHY_FW))
2836 break;
3f0d4560
AK
2837 dev_err(&adapter->pdev->dev,
2838 "cmd to write to flash rom failed.\n");
2839 return -1;
2840 }
84517482 2841 }
84517482 2842 }
84517482
AK
2843 return 0;
2844}
2845
3f0d4560
AK
2846static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2847{
2848 if (fhdr == NULL)
2849 return 0;
2850 if (fhdr->build[0] == '3')
2851 return BE_GEN3;
2852 else if (fhdr->build[0] == '2')
2853 return BE_GEN2;
2854 else
2855 return 0;
2856}
2857
485bf569
SN
2858static int lancer_fw_download(struct be_adapter *adapter,
2859 const struct firmware *fw)
84517482 2860{
485bf569
SN
2861#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2862#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2863 struct be_dma_mem flash_cmd;
485bf569
SN
2864 const u8 *data_ptr = NULL;
2865 u8 *dest_image_ptr = NULL;
2866 size_t image_size = 0;
2867 u32 chunk_size = 0;
2868 u32 data_written = 0;
2869 u32 offset = 0;
2870 int status = 0;
2871 u8 add_status = 0;
84517482 2872
485bf569 2873 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2874 dev_err(&adapter->pdev->dev,
485bf569
SN
2875 "FW Image not properly aligned. "
2876 "Length must be 4 byte aligned.\n");
2877 status = -EINVAL;
2878 goto lancer_fw_exit;
d9efd2af
SB
2879 }
2880
485bf569
SN
2881 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2882 + LANCER_FW_DOWNLOAD_CHUNK;
2883 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2884 &flash_cmd.dma, GFP_KERNEL);
2885 if (!flash_cmd.va) {
2886 status = -ENOMEM;
2887 dev_err(&adapter->pdev->dev,
2888 "Memory allocation failure while flashing\n");
2889 goto lancer_fw_exit;
2890 }
84517482 2891
485bf569
SN
2892 dest_image_ptr = flash_cmd.va +
2893 sizeof(struct lancer_cmd_req_write_object);
2894 image_size = fw->size;
2895 data_ptr = fw->data;
2896
2897 while (image_size) {
2898 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2899
2900 /* Copy the image chunk content. */
2901 memcpy(dest_image_ptr, data_ptr, chunk_size);
2902
2903 status = lancer_cmd_write_object(adapter, &flash_cmd,
2904 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2905 &data_written, &add_status);
2906
2907 if (status)
2908 break;
2909
2910 offset += data_written;
2911 data_ptr += data_written;
2912 image_size -= data_written;
2913 }
2914
2915 if (!status) {
2916 /* Commit the FW written */
2917 status = lancer_cmd_write_object(adapter, &flash_cmd,
2918 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2919 &data_written, &add_status);
2920 }
2921
2922 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2923 flash_cmd.dma);
2924 if (status) {
2925 dev_err(&adapter->pdev->dev,
2926 "Firmware load error. "
2927 "Status code: 0x%x Additional Status: 0x%x\n",
2928 status, add_status);
2929 goto lancer_fw_exit;
2930 }
2931
2932 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2933lancer_fw_exit:
2934 return status;
2935}
2936
2937static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2938{
2939 struct flash_file_hdr_g2 *fhdr;
2940 struct flash_file_hdr_g3 *fhdr3;
2941 struct image_hdr *img_hdr_ptr = NULL;
2942 struct be_dma_mem flash_cmd;
2943 const u8 *p;
2944 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2945
2946 p = fw->data;
3f0d4560 2947 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2948
84517482 2949 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2950 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2951 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2952 if (!flash_cmd.va) {
2953 status = -ENOMEM;
2954 dev_err(&adapter->pdev->dev,
2955 "Memory allocation failure while flashing\n");
485bf569 2956 goto be_fw_exit;
84517482
AK
2957 }
2958
3f0d4560
AK
2959 if ((adapter->generation == BE_GEN3) &&
2960 (get_ufigen_type(fhdr) == BE_GEN3)) {
2961 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2962 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2963 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2964 img_hdr_ptr = (struct image_hdr *) (fw->data +
2965 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2966 i * sizeof(struct image_hdr)));
2967 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2968 status = be_flash_data(adapter, fw, &flash_cmd,
2969 num_imgs);
3f0d4560
AK
2970 }
2971 } else if ((adapter->generation == BE_GEN2) &&
2972 (get_ufigen_type(fhdr) == BE_GEN2)) {
2973 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2974 } else {
2975 dev_err(&adapter->pdev->dev,
2976 "UFI and Interface are not compatible for flashing\n");
2977 status = -1;
84517482
AK
2978 }
2979
2b7bcebf
IV
2980 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2981 flash_cmd.dma);
84517482
AK
2982 if (status) {
2983 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2984 goto be_fw_exit;
84517482
AK
2985 }
2986
af901ca1 2987 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2988
485bf569
SN
2989be_fw_exit:
2990 return status;
2991}
2992
2993int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2994{
2995 const struct firmware *fw;
2996 int status;
2997
2998 if (!netif_running(adapter->netdev)) {
2999 dev_err(&adapter->pdev->dev,
3000 "Firmware load not allowed (interface is down)\n");
3001 return -1;
3002 }
3003
3004 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3005 if (status)
3006 goto fw_exit;
3007
3008 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3009
3010 if (lancer_chip(adapter))
3011 status = lancer_fw_download(adapter, fw);
3012 else
3013 status = be_fw_download(adapter, fw);
3014
84517482
AK
3015fw_exit:
3016 release_firmware(fw);
3017 return status;
3018}
3019
6b7c5b94
SP
3020static struct net_device_ops be_netdev_ops = {
3021 .ndo_open = be_open,
3022 .ndo_stop = be_close,
3023 .ndo_start_xmit = be_xmit,
a54769f5 3024 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3025 .ndo_set_mac_address = be_mac_addr_set,
3026 .ndo_change_mtu = be_change_mtu,
ab1594e9 3027 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3028 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3029 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3030 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3031 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3032 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3033 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3034 .ndo_get_vf_config = be_get_vf_config,
3035#ifdef CONFIG_NET_POLL_CONTROLLER
3036 .ndo_poll_controller = be_netpoll,
3037#endif
6b7c5b94
SP
3038};
3039
3040static void be_netdev_init(struct net_device *netdev)
3041{
3042 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3043 struct be_rx_obj *rxo;
3044 int i;
6b7c5b94 3045
6332c8d3 3046 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3047 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3048 NETIF_F_HW_VLAN_TX;
3049 if (be_multi_rxq(adapter))
3050 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3051
3052 netdev->features |= netdev->hw_features |
8b8ddc68 3053 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3054
eb8a50d9 3055 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3056 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3057
6b7c5b94
SP
3058 netdev->flags |= IFF_MULTICAST;
3059
c190e3c8
AK
3060 netif_set_gso_max_size(netdev, 65535);
3061
6b7c5b94
SP
3062 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3063
3064 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3065
3abcdeda
SP
3066 for_all_rx_queues(adapter, rxo, i)
3067 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3068 BE_NAPI_WEIGHT);
3069
5fb379ee 3070 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3071 BE_NAPI_WEIGHT);
6b7c5b94
SP
3072}
3073
3074static void be_unmap_pci_bars(struct be_adapter *adapter)
3075{
8788fdc2
SP
3076 if (adapter->csr)
3077 iounmap(adapter->csr);
3078 if (adapter->db)
3079 iounmap(adapter->db);
6b7c5b94
SP
3080}
3081
3082static int be_map_pci_bars(struct be_adapter *adapter)
3083{
3084 u8 __iomem *addr;
db3ea781 3085 int db_reg;
6b7c5b94 3086
fe6d2a38
SP
3087 if (lancer_chip(adapter)) {
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3089 pci_resource_len(adapter->pdev, 0));
3090 if (addr == NULL)
3091 return -ENOMEM;
3092 adapter->db = addr;
3093 return 0;
3094 }
3095
ba343c77
SB
3096 if (be_physfn(adapter)) {
3097 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3098 pci_resource_len(adapter->pdev, 2));
3099 if (addr == NULL)
3100 return -ENOMEM;
3101 adapter->csr = addr;
3102 }
6b7c5b94 3103
ba343c77 3104 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3105 db_reg = 4;
3106 } else {
ba343c77
SB
3107 if (be_physfn(adapter))
3108 db_reg = 4;
3109 else
3110 db_reg = 0;
3111 }
3112 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3113 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3114 if (addr == NULL)
3115 goto pci_map_err;
ba343c77
SB
3116 adapter->db = addr;
3117
6b7c5b94
SP
3118 return 0;
3119pci_map_err:
3120 be_unmap_pci_bars(adapter);
3121 return -ENOMEM;
3122}
3123
3124
3125static void be_ctrl_cleanup(struct be_adapter *adapter)
3126{
8788fdc2 3127 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3128
3129 be_unmap_pci_bars(adapter);
3130
3131 if (mem->va)
2b7bcebf
IV
3132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3133 mem->dma);
e7b909a6 3134
5b8821b7 3135 mem = &adapter->rx_filter;
e7b909a6 3136 if (mem->va)
2b7bcebf
IV
3137 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3138 mem->dma);
6b7c5b94
SP
3139}
3140
6b7c5b94
SP
3141static int be_ctrl_init(struct be_adapter *adapter)
3142{
8788fdc2
SP
3143 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3144 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3145 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3146 int status;
6b7c5b94
SP
3147
3148 status = be_map_pci_bars(adapter);
3149 if (status)
e7b909a6 3150 goto done;
6b7c5b94
SP
3151
3152 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3153 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3154 mbox_mem_alloc->size,
3155 &mbox_mem_alloc->dma,
3156 GFP_KERNEL);
6b7c5b94 3157 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3158 status = -ENOMEM;
3159 goto unmap_pci_bars;
6b7c5b94
SP
3160 }
3161 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3162 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3163 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3164 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3165
5b8821b7
SP
3166 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3167 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3168 &rx_filter->dma, GFP_KERNEL);
3169 if (rx_filter->va == NULL) {
e7b909a6
SP
3170 status = -ENOMEM;
3171 goto free_mbox;
3172 }
5b8821b7 3173 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3174
2984961c 3175 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3176 spin_lock_init(&adapter->mcc_lock);
3177 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3178
dd131e76 3179 init_completion(&adapter->flash_compl);
cf588477 3180 pci_save_state(adapter->pdev);
6b7c5b94 3181 return 0;
e7b909a6
SP
3182
3183free_mbox:
2b7bcebf
IV
3184 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3185 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3186
3187unmap_pci_bars:
3188 be_unmap_pci_bars(adapter);
3189
3190done:
3191 return status;
6b7c5b94
SP
3192}
3193
3194static void be_stats_cleanup(struct be_adapter *adapter)
3195{
3abcdeda 3196 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3197
3198 if (cmd->va)
2b7bcebf
IV
3199 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3200 cmd->va, cmd->dma);
6b7c5b94
SP
3201}
3202
3203static int be_stats_init(struct be_adapter *adapter)
3204{
3abcdeda 3205 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3206
005d5696 3207 if (adapter->generation == BE_GEN2) {
89a88ab8 3208 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3209 } else {
3210 if (lancer_chip(adapter))
3211 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3212 else
3213 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3214 }
2b7bcebf
IV
3215 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3216 GFP_KERNEL);
6b7c5b94
SP
3217 if (cmd->va == NULL)
3218 return -1;
d291b9af 3219 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3220 return 0;
3221}
3222
3223static void __devexit be_remove(struct pci_dev *pdev)
3224{
3225 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3226
6b7c5b94
SP
3227 if (!adapter)
3228 return;
3229
f203af70
SK
3230 cancel_delayed_work_sync(&adapter->work);
3231
6b7c5b94
SP
3232 unregister_netdev(adapter->netdev);
3233
5fb379ee
SP
3234 be_clear(adapter);
3235
6b7c5b94
SP
3236 be_stats_cleanup(adapter);
3237
3238 be_ctrl_cleanup(adapter);
3239
ba343c77
SB
3240 be_sriov_disable(adapter);
3241
8d56ff11 3242 be_msix_disable(adapter);
6b7c5b94
SP
3243
3244 pci_set_drvdata(pdev, NULL);
3245 pci_release_regions(pdev);
3246 pci_disable_device(pdev);
3247
3248 free_netdev(adapter->netdev);
3249}
3250
2243e2e9 3251static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3252{
6b7c5b94
SP
3253 int status;
3254
3abcdeda
SP
3255 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3256 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3257 if (status)
3258 return status;
3259
752961a1 3260 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3261 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3262 else
3263 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3264
9e1453c5
AK
3265 status = be_cmd_get_cntl_attributes(adapter);
3266 if (status)
3267 return status;
3268
2243e2e9 3269 return 0;
6b7c5b94
SP
3270}
3271
fe6d2a38
SP
3272static int be_dev_family_check(struct be_adapter *adapter)
3273{
3274 struct pci_dev *pdev = adapter->pdev;
3275 u32 sli_intf = 0, if_type;
3276
3277 switch (pdev->device) {
3278 case BE_DEVICE_ID1:
3279 case OC_DEVICE_ID1:
3280 adapter->generation = BE_GEN2;
3281 break;
3282 case BE_DEVICE_ID2:
3283 case OC_DEVICE_ID2:
3284 adapter->generation = BE_GEN3;
3285 break;
3286 case OC_DEVICE_ID3:
12f4d0a8 3287 case OC_DEVICE_ID4:
fe6d2a38
SP
3288 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3289 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3290 SLI_INTF_IF_TYPE_SHIFT;
3291
3292 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3293 if_type != 0x02) {
3294 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3295 return -EINVAL;
3296 }
fe6d2a38
SP
3297 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3298 SLI_INTF_FAMILY_SHIFT);
3299 adapter->generation = BE_GEN3;
3300 break;
3301 default:
3302 adapter->generation = 0;
3303 }
3304 return 0;
3305}
3306
37eed1cb
PR
3307static int lancer_wait_ready(struct be_adapter *adapter)
3308{
d8110f62 3309#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3310 u32 sliport_status;
3311 int status = 0, i;
3312
3313 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3314 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3315 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3316 break;
3317
d8110f62 3318 msleep(1000);
37eed1cb
PR
3319 }
3320
3321 if (i == SLIPORT_READY_TIMEOUT)
3322 status = -1;
3323
3324 return status;
3325}
3326
3327static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3328{
3329 int status;
3330 u32 sliport_status, err, reset_needed;
3331 status = lancer_wait_ready(adapter);
3332 if (!status) {
3333 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3334 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3335 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3336 if (err && reset_needed) {
3337 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3338 adapter->db + SLIPORT_CONTROL_OFFSET);
3339
3340 /* check adapter has corrected the error */
3341 status = lancer_wait_ready(adapter);
3342 sliport_status = ioread32(adapter->db +
3343 SLIPORT_STATUS_OFFSET);
3344 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3345 SLIPORT_STATUS_RN_MASK);
3346 if (status || sliport_status)
3347 status = -1;
3348 } else if (err || reset_needed) {
3349 status = -1;
3350 }
3351 }
3352 return status;
3353}
3354
d8110f62
PR
3355static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3356{
3357 int status;
3358 u32 sliport_status;
3359
3360 if (adapter->eeh_err || adapter->ue_detected)
3361 return;
3362
3363 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3364
3365 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3366 dev_err(&adapter->pdev->dev,
3367 "Adapter in error state."
3368 "Trying to recover.\n");
3369
3370 status = lancer_test_and_set_rdy_state(adapter);
3371 if (status)
3372 goto err;
3373
3374 netif_device_detach(adapter->netdev);
3375
3376 if (netif_running(adapter->netdev))
3377 be_close(adapter->netdev);
3378
3379 be_clear(adapter);
3380
3381 adapter->fw_timeout = false;
3382
3383 status = be_setup(adapter);
3384 if (status)
3385 goto err;
3386
3387 if (netif_running(adapter->netdev)) {
3388 status = be_open(adapter->netdev);
3389 if (status)
3390 goto err;
3391 }
3392
3393 netif_device_attach(adapter->netdev);
3394
3395 dev_err(&adapter->pdev->dev,
3396 "Adapter error recovery succeeded\n");
3397 }
3398 return;
3399err:
3400 dev_err(&adapter->pdev->dev,
3401 "Adapter error recovery failed\n");
3402}
3403
3404static void be_worker(struct work_struct *work)
3405{
3406 struct be_adapter *adapter =
3407 container_of(work, struct be_adapter, work.work);
3408 struct be_rx_obj *rxo;
3409 int i;
3410
3411 if (lancer_chip(adapter))
3412 lancer_test_and_recover_fn_err(adapter);
3413
3414 be_detect_dump_ue(adapter);
3415
3416 /* when interrupts are not yet enabled, just reap any pending
3417 * mcc completions */
3418 if (!netif_running(adapter->netdev)) {
3419 int mcc_compl, status = 0;
3420
3421 mcc_compl = be_process_mcc(adapter, &status);
3422
3423 if (mcc_compl) {
3424 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3425 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3426 }
3427
3428 goto reschedule;
3429 }
3430
3431 if (!adapter->stats_cmd_sent) {
3432 if (lancer_chip(adapter))
3433 lancer_cmd_get_pport_stats(adapter,
3434 &adapter->stats_cmd);
3435 else
3436 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3437 }
3438
3439 for_all_rx_queues(adapter, rxo, i) {
3440 be_rx_eqd_update(adapter, rxo);
3441
3442 if (rxo->rx_post_starved) {
3443 rxo->rx_post_starved = false;
3444 be_post_rx_frags(rxo, GFP_KERNEL);
3445 }
3446 }
3447
3448reschedule:
3449 adapter->work_counter++;
3450 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3451}
3452
6b7c5b94
SP
3453static int __devinit be_probe(struct pci_dev *pdev,
3454 const struct pci_device_id *pdev_id)
3455{
3456 int status = 0;
3457 struct be_adapter *adapter;
3458 struct net_device *netdev;
6b7c5b94
SP
3459
3460 status = pci_enable_device(pdev);
3461 if (status)
3462 goto do_none;
3463
3464 status = pci_request_regions(pdev, DRV_NAME);
3465 if (status)
3466 goto disable_dev;
3467 pci_set_master(pdev);
3468
3c8def97 3469 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3470 if (netdev == NULL) {
3471 status = -ENOMEM;
3472 goto rel_reg;
3473 }
3474 adapter = netdev_priv(netdev);
3475 adapter->pdev = pdev;
3476 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3477
3478 status = be_dev_family_check(adapter);
63657b9c 3479 if (status)
fe6d2a38
SP
3480 goto free_netdev;
3481
6b7c5b94 3482 adapter->netdev = netdev;
2243e2e9 3483 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3484
2b7bcebf 3485 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3486 if (!status) {
3487 netdev->features |= NETIF_F_HIGHDMA;
3488 } else {
2b7bcebf 3489 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3490 if (status) {
3491 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3492 goto free_netdev;
3493 }
3494 }
3495
f9449ab7
SP
3496 status = be_sriov_enable(adapter);
3497 if (status)
3498 goto free_netdev;
ba343c77 3499
6b7c5b94
SP
3500 status = be_ctrl_init(adapter);
3501 if (status)
f9449ab7 3502 goto disable_sriov;
6b7c5b94 3503
37eed1cb 3504 if (lancer_chip(adapter)) {
d8110f62
PR
3505 status = lancer_wait_ready(adapter);
3506 if (!status) {
3507 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3508 adapter->db + SLIPORT_CONTROL_OFFSET);
3509 status = lancer_test_and_set_rdy_state(adapter);
3510 }
37eed1cb
PR
3511 if (status) {
3512 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3513 goto ctrl_clean;
37eed1cb
PR
3514 }
3515 }
3516
2243e2e9 3517 /* sync up with fw's ready state */
ba343c77
SB
3518 if (be_physfn(adapter)) {
3519 status = be_cmd_POST(adapter);
3520 if (status)
3521 goto ctrl_clean;
ba343c77 3522 }
6b7c5b94 3523
2243e2e9
SP
3524 /* tell fw we're ready to fire cmds */
3525 status = be_cmd_fw_init(adapter);
6b7c5b94 3526 if (status)
2243e2e9
SP
3527 goto ctrl_clean;
3528
a4b4dfab
AK
3529 status = be_cmd_reset_function(adapter);
3530 if (status)
3531 goto ctrl_clean;
556ae191 3532
2243e2e9
SP
3533 status = be_stats_init(adapter);
3534 if (status)
3535 goto ctrl_clean;
3536
3537 status = be_get_config(adapter);
6b7c5b94
SP
3538 if (status)
3539 goto stats_clean;
6b7c5b94 3540
b9ab82c7
SP
3541 /* The INTR bit may be set in the card when probed by a kdump kernel
3542 * after a crash.
3543 */
3544 if (!lancer_chip(adapter))
3545 be_intr_set(adapter, false);
3546
3abcdeda
SP
3547 be_msix_enable(adapter);
3548
6b7c5b94 3549 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3550 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3551
5fb379ee
SP
3552 status = be_setup(adapter);
3553 if (status)
3abcdeda 3554 goto msix_disable;
2243e2e9 3555
3abcdeda 3556 be_netdev_init(netdev);
6b7c5b94
SP
3557 status = register_netdev(netdev);
3558 if (status != 0)
5fb379ee 3559 goto unsetup;
6b7c5b94 3560
c4ca2374 3561 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3562
f203af70 3563 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3564 return 0;
3565
5fb379ee
SP
3566unsetup:
3567 be_clear(adapter);
3abcdeda
SP
3568msix_disable:
3569 be_msix_disable(adapter);
6b7c5b94
SP
3570stats_clean:
3571 be_stats_cleanup(adapter);
3572ctrl_clean:
3573 be_ctrl_cleanup(adapter);
f9449ab7 3574disable_sriov:
ba343c77 3575 be_sriov_disable(adapter);
f9449ab7 3576free_netdev:
fe6d2a38 3577 free_netdev(netdev);
8d56ff11 3578 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3579rel_reg:
3580 pci_release_regions(pdev);
3581disable_dev:
3582 pci_disable_device(pdev);
3583do_none:
c4ca2374 3584 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3585 return status;
3586}
3587
3588static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3589{
3590 struct be_adapter *adapter = pci_get_drvdata(pdev);
3591 struct net_device *netdev = adapter->netdev;
3592
a4ca055f 3593 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3594 if (adapter->wol)
3595 be_setup_wol(adapter, true);
3596
6b7c5b94
SP
3597 netif_device_detach(netdev);
3598 if (netif_running(netdev)) {
3599 rtnl_lock();
3600 be_close(netdev);
3601 rtnl_unlock();
3602 }
9b0365f1 3603 be_clear(adapter);
6b7c5b94 3604
a4ca055f 3605 be_msix_disable(adapter);
6b7c5b94
SP
3606 pci_save_state(pdev);
3607 pci_disable_device(pdev);
3608 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3609 return 0;
3610}
3611
3612static int be_resume(struct pci_dev *pdev)
3613{
3614 int status = 0;
3615 struct be_adapter *adapter = pci_get_drvdata(pdev);
3616 struct net_device *netdev = adapter->netdev;
3617
3618 netif_device_detach(netdev);
3619
3620 status = pci_enable_device(pdev);
3621 if (status)
3622 return status;
3623
3624 pci_set_power_state(pdev, 0);
3625 pci_restore_state(pdev);
3626
a4ca055f 3627 be_msix_enable(adapter);
2243e2e9
SP
3628 /* tell fw we're ready to fire cmds */
3629 status = be_cmd_fw_init(adapter);
3630 if (status)
3631 return status;
3632
9b0365f1 3633 be_setup(adapter);
6b7c5b94
SP
3634 if (netif_running(netdev)) {
3635 rtnl_lock();
3636 be_open(netdev);
3637 rtnl_unlock();
3638 }
3639 netif_device_attach(netdev);
71d8d1b5
AK
3640
3641 if (adapter->wol)
3642 be_setup_wol(adapter, false);
a4ca055f
AK
3643
3644 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3645 return 0;
3646}
3647
82456b03
SP
3648/*
3649 * An FLR will stop BE from DMAing any data.
3650 */
3651static void be_shutdown(struct pci_dev *pdev)
3652{
3653 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3654
2d5d4154
AK
3655 if (!adapter)
3656 return;
82456b03 3657
0f4a6828 3658 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3659
2d5d4154 3660 netif_device_detach(adapter->netdev);
82456b03 3661
82456b03
SP
3662 if (adapter->wol)
3663 be_setup_wol(adapter, true);
3664
57841869
AK
3665 be_cmd_reset_function(adapter);
3666
82456b03 3667 pci_disable_device(pdev);
82456b03
SP
3668}
3669
cf588477
SP
3670static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3671 pci_channel_state_t state)
3672{
3673 struct be_adapter *adapter = pci_get_drvdata(pdev);
3674 struct net_device *netdev = adapter->netdev;
3675
3676 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3677
3678 adapter->eeh_err = true;
3679
3680 netif_device_detach(netdev);
3681
3682 if (netif_running(netdev)) {
3683 rtnl_lock();
3684 be_close(netdev);
3685 rtnl_unlock();
3686 }
3687 be_clear(adapter);
3688
3689 if (state == pci_channel_io_perm_failure)
3690 return PCI_ERS_RESULT_DISCONNECT;
3691
3692 pci_disable_device(pdev);
3693
3694 return PCI_ERS_RESULT_NEED_RESET;
3695}
3696
3697static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3698{
3699 struct be_adapter *adapter = pci_get_drvdata(pdev);
3700 int status;
3701
3702 dev_info(&adapter->pdev->dev, "EEH reset\n");
3703 adapter->eeh_err = false;
6589ade0
SP
3704 adapter->ue_detected = false;
3705 adapter->fw_timeout = false;
cf588477
SP
3706
3707 status = pci_enable_device(pdev);
3708 if (status)
3709 return PCI_ERS_RESULT_DISCONNECT;
3710
3711 pci_set_master(pdev);
3712 pci_set_power_state(pdev, 0);
3713 pci_restore_state(pdev);
3714
3715 /* Check if card is ok and fw is ready */
3716 status = be_cmd_POST(adapter);
3717 if (status)
3718 return PCI_ERS_RESULT_DISCONNECT;
3719
3720 return PCI_ERS_RESULT_RECOVERED;
3721}
3722
3723static void be_eeh_resume(struct pci_dev *pdev)
3724{
3725 int status = 0;
3726 struct be_adapter *adapter = pci_get_drvdata(pdev);
3727 struct net_device *netdev = adapter->netdev;
3728
3729 dev_info(&adapter->pdev->dev, "EEH resume\n");
3730
3731 pci_save_state(pdev);
3732
3733 /* tell fw we're ready to fire cmds */
3734 status = be_cmd_fw_init(adapter);
3735 if (status)
3736 goto err;
3737
3738 status = be_setup(adapter);
3739 if (status)
3740 goto err;
3741
3742 if (netif_running(netdev)) {
3743 status = be_open(netdev);
3744 if (status)
3745 goto err;
3746 }
3747 netif_device_attach(netdev);
3748 return;
3749err:
3750 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3751}
3752
3753static struct pci_error_handlers be_eeh_handlers = {
3754 .error_detected = be_eeh_err_detected,
3755 .slot_reset = be_eeh_reset,
3756 .resume = be_eeh_resume,
3757};
3758
6b7c5b94
SP
3759static struct pci_driver be_driver = {
3760 .name = DRV_NAME,
3761 .id_table = be_dev_ids,
3762 .probe = be_probe,
3763 .remove = be_remove,
3764 .suspend = be_suspend,
cf588477 3765 .resume = be_resume,
82456b03 3766 .shutdown = be_shutdown,
cf588477 3767 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3768};
3769
3770static int __init be_init_module(void)
3771{
8e95a202
JP
3772 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3773 rx_frag_size != 2048) {
6b7c5b94
SP
3774 printk(KERN_WARNING DRV_NAME
3775 " : Module param rx_frag_size must be 2048/4096/8192."
3776 " Using 2048\n");
3777 rx_frag_size = 2048;
3778 }
6b7c5b94
SP
3779
3780 return pci_register_driver(&be_driver);
3781}
3782module_init(be_init_module);
3783
3784static void __exit be_exit_module(void)
3785{
3786 pci_unregister_driver(&be_driver);
3787}
3788module_exit(be_exit_module);
This page took 0.602406 seconds and 5 git commands to generate.