bnx2x: downgrade Max BW error message to debug
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
ac124ff9
SP
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 251 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 254
ac124ff9 255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
ac124ff9 273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
ac124ff9 281 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 282 else
ac124ff9 283 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
ac124ff9
SP
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 300 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 303
ac124ff9 304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
ac124ff9 325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
005d5696
SX
340static void populate_lancer_stats(struct be_adapter *adapter)
341{
89a88ab8 342
005d5696 343 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 370 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 374 drvs->rx_drops_too_many_frags =
ac124ff9 375 pport_stats->rx_drops_too_many_frags_lo;
005d5696 376}
89a88ab8
AK
377
378void be_parse_stats(struct be_adapter *adapter)
379{
ac124ff9
SP
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
005d5696
SX
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
89a88ab8 390 populate_be2_stats(adapter);
005d5696 391 }
ac124ff9
SP
392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8
AK
397}
398
ab1594e9
SP
399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
6b7c5b94 401{
ab1594e9 402 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 403 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 404 struct be_rx_obj *rxo;
3c8def97 405 struct be_tx_obj *txo;
ab1594e9
SP
406 u64 pkts, bytes;
407 unsigned int start;
3abcdeda 408 int i;
6b7c5b94 409
3abcdeda 410 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
422 }
423
3c8def97 424 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
3c8def97 433 }
6b7c5b94
SP
434
435 /* bad pkts received */
ab1594e9 436 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
ab1594e9 445 drvs->rx_dropped_runt;
68110868 446
6b7c5b94 447 /* detailed rx errors */
ab1594e9 448 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
68110868 451
ab1594e9 452 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
453
454 /* frame alignment errors */
ab1594e9 455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 456
6b7c5b94
SP
457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
ab1594e9 462 return stats;
6b7c5b94
SP
463}
464
ea172a01 465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 466{
6b7c5b94
SP
467 struct net_device *netdev = adapter->netdev;
468
ea172a01
SP
469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 477 }
6b7c5b94
SP
478}
479
3c8def97 480static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 482{
3c8def97
SP
483 struct be_tx_stats *stats = tx_stats(txo);
484
ab1594e9 485 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 490 if (stopped)
ac124ff9 491 stats->tx_stops++;
ab1594e9 492 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
6b7c5b94 498{
ebc8d2ab
DM
499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
6b7c5b94
SP
503 /* to account for hdr wrb */
504 cnt++;
fe6d2a38
SP
505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
6b7c5b94
SP
508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
fe6d2a38 511 }
6b7c5b94
SP
512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
cc4ce020
SK
523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 525{
cc4ce020
SK
526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
6b7c5b94
SP
529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
49e4b847 533 if (skb_is_gso(skb)) {
6b7c5b94
SP
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
6b7c5b94
SP
549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
4c5102f9 556 if (vlan_tx_tag_present(skb)) {
6b7c5b94 557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
2b7bcebf 573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 581 if (wrb->frag_len) {
7101e111 582 if (unmap_single)
2b7bcebf
IV
583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
7101e111 585 else
2b7bcebf 586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
587 }
588}
6b7c5b94 589
3c8def97 590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
7101e111
SP
593 dma_addr_t busaddr;
594 int i, copied = 0;
2b7bcebf 595 struct device *dev = &adapter->pdev->dev;
6b7c5b94 596 struct sk_buff *first_skb = skb;
6b7c5b94
SP
597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
7101e111
SP
599 bool map_single = false;
600 u16 map_head;
6b7c5b94 601
6b7c5b94
SP
602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
7101e111 604 map_head = txq->head;
6b7c5b94 605
ebc8d2ab 606 if (skb->len > skb->data_len) {
e743d313 607 int len = skb_headlen(skb);
2b7bcebf
IV
608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
7101e111
SP
610 goto dma_err;
611 map_single = true;
ebc8d2ab
DM
612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
6b7c5b94 618
ebc8d2ab
DM
619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
7101e111 625 goto dma_err;
ebc8d2ab
DM
626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
6b7c5b94
SP
631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
cc4ce020 640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
7101e111
SP
644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
2b7bcebf 648 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
6b7c5b94
SP
654}
655
61357325 656static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 657 struct net_device *netdev)
6b7c5b94
SP
658{
659 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
fe6d2a38 666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 667
3c8def97 668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
669 if (copied) {
670 /* record the sent skb in the sent_skb table */
3c8def97
SP
671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
c190e3c8
AK
673
674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
7101e111 678 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
3c8def97 681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
682 stopped = true;
683 }
6b7c5b94 684
c190e3c8 685 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 686
3c8def97 687 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 688 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
6b7c5b94 692 }
6b7c5b94
SP
693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
82903e4b
AK
715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 717 */
1da87b7f 718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 719{
6b7c5b94
SP
720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
82903e4b 722 int status = 0;
1da87b7f
AK
723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
6b7c5b94 730
c0e64ef4
SP
731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
82903e4b 735 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 736 /* Construct VLAN Table to give to HW */
b738127d 737 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
b31c50a7
SP
743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
6b7c5b94 745 } else {
b31c50a7
SP
746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
6b7c5b94 748 }
1da87b7f 749
b31c50a7 750 return status;
6b7c5b94
SP
751}
752
6b7c5b94
SP
753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
1da87b7f 757 adapter->vlans_added++;
ba343c77
SB
758 if (!be_physfn(adapter))
759 return;
760
6b7c5b94 761 adapter->vlan_tag[vid] = 1;
82903e4b 762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 763 be_vid_config(adapter, false, 0);
6b7c5b94
SP
764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
1da87b7f 770 adapter->vlans_added--;
1da87b7f 771
ba343c77
SB
772 if (!be_physfn(adapter))
773 return;
774
6b7c5b94 775 adapter->vlan_tag[vid] = 0;
82903e4b 776 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 777 be_vid_config(adapter, false, 0);
6b7c5b94
SP
778}
779
24307eef 780static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 783
24307eef 784 if (netdev->flags & IFF_PROMISC) {
5b8821b7 785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
786 adapter->promiscuous = true;
787 goto done;
6b7c5b94
SP
788 }
789
25985edc 790 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
5b8821b7 793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
6b7c5b94
SP
797 }
798
e7b909a6 799 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 800 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 803 goto done;
6b7c5b94 804 }
6b7c5b94 805
5b8821b7 806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
807done:
808 return;
6b7c5b94
SP
809}
810
ba343c77
SB
811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
64600ea5
AK
822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 826
64600ea5
AK
827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
830
831 if (status)
ba343c77
SB
832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
64600ea5
AK
834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
ba343c77
SB
837 return status;
838}
839
64600ea5
AK
840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
e1d18735 852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
1da87b7f
AK
860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
e1d18735
AK
888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
ac124ff9 912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 913{
ac124ff9
SP
914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 916 ulong now = jiffies;
ac124ff9 917 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
918 u64 pkts;
919 unsigned int start, eqd;
ac124ff9
SP
920
921 if (!rx_eq->enable_aic)
922 return;
6b7c5b94 923
4097f663 924 /* Wrapped around */
3abcdeda
SP
925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
4097f663
SP
927 return;
928 }
6b7c5b94 929
ac124ff9
SP
930 /* Update once a second */
931 if (delta < HZ)
6b7c5b94
SP
932 return;
933
ab1594e9
SP
934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
68c3e5a7 939 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 940 stats->rx_pkts_prev = pkts;
3abcdeda 941 stats->rx_jiffies = now;
ac124ff9
SP
942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
6b7c5b94
SP
954}
955
3abcdeda 956static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 957 struct be_rx_compl_info *rxcp)
4097f663 958{
ac124ff9 959 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 960
ab1594e9 961 u64_stats_update_begin(&stats->sync);
3abcdeda 962 stats->rx_compl++;
2e588f84 963 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 964 stats->rx_pkts++;
2e588f84 965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 966 stats->rx_mcast_pkts++;
2e588f84 967 if (rxcp->err)
ac124ff9 968 stats->rx_compl_err++;
ab1594e9 969 u64_stats_update_end(&stats->sync);
4097f663
SP
970}
971
2e588f84 972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 973{
19fad86f
PR
974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
978}
979
6b7c5b94 980static struct be_rx_page_info *
3abcdeda
SP
981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
6b7c5b94
SP
984{
985 struct be_rx_page_info *rx_page_info;
3abcdeda 986 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 987
3abcdeda 988 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
989 BUG_ON(!rx_page_info->page);
990
205859a2 991 if (rx_page_info->last_page_user) {
2b7bcebf
IV
992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
995 rx_page_info->last_page_user = false;
996 }
6b7c5b94
SP
997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1004 struct be_rx_obj *rxo,
2e588f84 1005 struct be_rx_compl_info *rxcp)
6b7c5b94 1006{
3abcdeda 1007 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1008 struct be_rx_page_info *page_info;
2e588f84 1009 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1010
e80d9da6 1011 for (i = 0; i < num_rcvd; i++) {
2e588f84 1012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
2e588f84 1015 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
3abcdeda 1023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1025{
3abcdeda 1026 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1027 struct be_rx_page_info *page_info;
2e588f84
SP
1028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1030 u8 *start;
6b7c5b94 1031
2e588f84 1032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
2e588f84 1037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1038
1039 /* Copy the header portion into skb_data */
2e588f84 1040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
205859a2 1057 page_info->page = NULL;
6b7c5b94 1058
2e588f84
SP
1059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
6b7c5b94
SP
1062 }
1063
1064 /* More frags present for this completion */
2e588f84
SP
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1070
bd46cb6c
AK
1071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
6b7c5b94 1087
2e588f84
SP
1088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1090 page_info->page = NULL;
6b7c5b94 1091 }
bd46cb6c 1092 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1093}
1094
5be93b9a 1095/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1096static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1097 struct be_rx_obj *rxo,
2e588f84 1098 struct be_rx_compl_info *rxcp)
6b7c5b94 1099{
6332c8d3 1100 struct net_device *netdev = adapter->netdev;
6b7c5b94 1101 struct sk_buff *skb;
89420424 1102
6332c8d3 1103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1104 if (unlikely(!skb)) {
ac124ff9 1105 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1106 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1107 return;
1108 }
1109
2e588f84 1110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1111
6332c8d3 1112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1113 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1114 else
1115 skb_checksum_none_assert(skb);
6b7c5b94
SP
1116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1118 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
6b7c5b94 1122
4c5102f9
AK
1123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
6b7c5b94
SP
1127}
1128
5be93b9a
AK
1129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1131 struct be_rx_obj *rxo,
2e588f84 1132 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1133{
1134 struct be_rx_page_info *page_info;
5be93b9a 1135 struct sk_buff *skb = NULL;
3abcdeda
SP
1136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1138 u16 remaining, curr_frag_len;
1139 u16 i, j;
3968fa1e 1140
5be93b9a
AK
1141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
3abcdeda 1143 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1144 return;
1145 }
1146
2e588f84
SP
1147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
bd46cb6c
AK
1153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
5be93b9a
AK
1157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1161 } else {
1162 put_page(page_info->page);
1163 }
5be93b9a 1164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1165
bd46cb6c 1166 remaining -= curr_frag_len;
2e588f84 1167 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1168 memset(page_info, 0, sizeof(*page_info));
1169 }
bd46cb6c 1170 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1171
5be93b9a 1172 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
5be93b9a 1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
5be93b9a 1179
4c5102f9
AK
1180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1184}
1185
1186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
1189{
1190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
15d72184 1215 }
12004ae9 1216 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1217}
1218
1219static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1220 struct be_eth_rx_compl *compl,
1221 struct be_rx_compl_info *rxcp)
1222{
1223 rxcp->pkt_size =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1225 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1226 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1227 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1228 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1229 rxcp->ip_csum =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1231 rxcp->l4_csum =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1233 rxcp->ipv6 =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1235 rxcp->rxq_idx =
1236 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1237 rxcp->num_rcvd =
1238 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1239 rxcp->pkt_type =
1240 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1241 rxcp->rss_hash =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1243 if (rxcp->vlanf) {
1244 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1245 compl);
1246 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1247 compl);
15d72184 1248 }
12004ae9 1249 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1250}
1251
1252static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253{
1254 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1257
2e588f84
SP
1258 /* For checking the valid bit it is Ok to use either definition as the
1259 * valid bit is at the same position in both v0 and v1 Rx compl */
1260 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1261 return NULL;
6b7c5b94 1262
2e588f84
SP
1263 rmb();
1264 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1265
2e588f84
SP
1266 if (adapter->be3_native)
1267 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268 else
1269 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1270
15d72184
SP
1271 if (rxcp->vlanf) {
1272 /* vlanf could be wrongly set in some cards.
1273 * ignore if vtm is not set */
1274 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275 rxcp->vlanf = 0;
6b7c5b94 1276
15d72184 1277 if (!lancer_chip(adapter))
3c709f8f 1278 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1279
3c709f8f
DM
1280 if (((adapter->pvid & VLAN_VID_MASK) ==
1281 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1282 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1283 rxcp->vlanf = 0;
1284 }
2e588f84
SP
1285
1286 /* As the compl has been parsed, reset it; we wont touch it again */
1287 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1288
3abcdeda 1289 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1290 return rxcp;
1291}
1292
1829b086 1293static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1294{
6b7c5b94 1295 u32 order = get_order(size);
1829b086 1296
6b7c5b94 1297 if (order > 0)
1829b086
ED
1298 gfp |= __GFP_COMP;
1299 return alloc_pages(gfp, order);
6b7c5b94
SP
1300}
1301
1302/*
1303 * Allocate a page, split it to fragments of size rx_frag_size and post as
1304 * receive buffers to BE
1305 */
1829b086 1306static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1307{
3abcdeda
SP
1308 struct be_adapter *adapter = rxo->adapter;
1309 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1310 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1311 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1312 struct page *pagep = NULL;
1313 struct be_eth_rx_d *rxd;
1314 u64 page_dmaaddr = 0, frag_dmaaddr;
1315 u32 posted, page_offset = 0;
1316
3abcdeda 1317 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1318 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1319 if (!pagep) {
1829b086 1320 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1321 if (unlikely(!pagep)) {
ac124ff9 1322 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1323 break;
1324 }
2b7bcebf
IV
1325 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1326 0, adapter->big_page_size,
1327 DMA_FROM_DEVICE);
6b7c5b94
SP
1328 page_info->page_offset = 0;
1329 } else {
1330 get_page(pagep);
1331 page_info->page_offset = page_offset + rx_frag_size;
1332 }
1333 page_offset = page_info->page_offset;
1334 page_info->page = pagep;
fac6da5b 1335 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1337
1338 rxd = queue_head_node(rxq);
1339 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1340 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1341
1342 /* Any space left in the current big page for another frag? */
1343 if ((page_offset + rx_frag_size + rx_frag_size) >
1344 adapter->big_page_size) {
1345 pagep = NULL;
1346 page_info->last_page_user = true;
1347 }
26d92f92
SP
1348
1349 prev_page_info = page_info;
1350 queue_head_inc(rxq);
6b7c5b94
SP
1351 page_info = &page_info_tbl[rxq->head];
1352 }
1353 if (pagep)
26d92f92 1354 prev_page_info->last_page_user = true;
6b7c5b94
SP
1355
1356 if (posted) {
6b7c5b94 1357 atomic_add(posted, &rxq->used);
8788fdc2 1358 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1359 } else if (atomic_read(&rxq->used) == 0) {
1360 /* Let be_worker replenish when memory is available */
3abcdeda 1361 rxo->rx_post_starved = true;
6b7c5b94 1362 }
6b7c5b94
SP
1363}
1364
5fb379ee 1365static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1366{
6b7c5b94
SP
1367 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1368
1369 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1370 return NULL;
1371
f3eb62d2 1372 rmb();
6b7c5b94
SP
1373 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1374
1375 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1376
1377 queue_tail_inc(tx_cq);
1378 return txcp;
1379}
1380
3c8def97
SP
1381static u16 be_tx_compl_process(struct be_adapter *adapter,
1382 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1383{
3c8def97 1384 struct be_queue_info *txq = &txo->q;
a73b796e 1385 struct be_eth_wrb *wrb;
3c8def97 1386 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1387 struct sk_buff *sent_skb;
ec43b1a6
SP
1388 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1389 bool unmap_skb_hdr = true;
6b7c5b94 1390
ec43b1a6 1391 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1392 BUG_ON(!sent_skb);
ec43b1a6
SP
1393 sent_skbs[txq->tail] = NULL;
1394
1395 /* skip header wrb */
a73b796e 1396 queue_tail_inc(txq);
6b7c5b94 1397
ec43b1a6 1398 do {
6b7c5b94 1399 cur_index = txq->tail;
a73b796e 1400 wrb = queue_tail_node(txq);
2b7bcebf
IV
1401 unmap_tx_frag(&adapter->pdev->dev, wrb,
1402 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1403 unmap_skb_hdr = false;
1404
6b7c5b94
SP
1405 num_wrbs++;
1406 queue_tail_inc(txq);
ec43b1a6 1407 } while (cur_index != last_index);
6b7c5b94 1408
6b7c5b94 1409 kfree_skb(sent_skb);
4d586b82 1410 return num_wrbs;
6b7c5b94
SP
1411}
1412
859b1e4e
SP
1413static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1414{
1415 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1416
1417 if (!eqe->evt)
1418 return NULL;
1419
f3eb62d2 1420 rmb();
859b1e4e
SP
1421 eqe->evt = le32_to_cpu(eqe->evt);
1422 queue_tail_inc(&eq_obj->q);
1423 return eqe;
1424}
1425
1426static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1427 struct be_eq_obj *eq_obj,
1428 bool rearm)
859b1e4e
SP
1429{
1430 struct be_eq_entry *eqe;
1431 u16 num = 0;
1432
1433 while ((eqe = event_get(eq_obj)) != NULL) {
1434 eqe->evt = 0;
1435 num++;
1436 }
1437
1438 /* Deal with any spurious interrupts that come
1439 * without events
1440 */
3c8def97
SP
1441 if (!num)
1442 rearm = true;
1443
1444 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1445 if (num)
1446 napi_schedule(&eq_obj->napi);
1447
1448 return num;
1449}
1450
1451/* Just read and notify events without processing them.
1452 * Used at the time of destroying event queues */
1453static void be_eq_clean(struct be_adapter *adapter,
1454 struct be_eq_obj *eq_obj)
1455{
1456 struct be_eq_entry *eqe;
1457 u16 num = 0;
1458
1459 while ((eqe = event_get(eq_obj)) != NULL) {
1460 eqe->evt = 0;
1461 num++;
1462 }
1463
1464 if (num)
1465 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1466}
1467
3abcdeda 1468static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1469{
1470 struct be_rx_page_info *page_info;
3abcdeda
SP
1471 struct be_queue_info *rxq = &rxo->q;
1472 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1473 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1474 u16 tail;
1475
1476 /* First cleanup pending rx completions */
3abcdeda
SP
1477 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1478 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1479 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1480 }
1481
1482 /* Then free posted rx buffer that were not used */
1483 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1484 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1485 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1486 put_page(page_info->page);
1487 memset(page_info, 0, sizeof(*page_info));
1488 }
1489 BUG_ON(atomic_read(&rxq->used));
482c9e79 1490 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1491}
1492
3c8def97
SP
1493static void be_tx_compl_clean(struct be_adapter *adapter,
1494 struct be_tx_obj *txo)
6b7c5b94 1495{
3c8def97
SP
1496 struct be_queue_info *tx_cq = &txo->cq;
1497 struct be_queue_info *txq = &txo->q;
a8e9179a 1498 struct be_eth_tx_compl *txcp;
4d586b82 1499 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1500 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1501 struct sk_buff *sent_skb;
1502 bool dummy_wrb;
a8e9179a
SP
1503
1504 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1505 do {
1506 while ((txcp = be_tx_compl_get(tx_cq))) {
1507 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1508 wrb_index, txcp);
3c8def97 1509 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1510 cmpl++;
1511 }
1512 if (cmpl) {
1513 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1514 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1515 cmpl = 0;
4d586b82 1516 num_wrbs = 0;
a8e9179a
SP
1517 }
1518
1519 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1520 break;
1521
1522 mdelay(1);
1523 } while (true);
1524
1525 if (atomic_read(&txq->used))
1526 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1527 atomic_read(&txq->used));
b03388d6
SP
1528
1529 /* free posted tx for which compls will never arrive */
1530 while (atomic_read(&txq->used)) {
1531 sent_skb = sent_skbs[txq->tail];
1532 end_idx = txq->tail;
1533 index_adv(&end_idx,
fe6d2a38
SP
1534 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1535 txq->len);
3c8def97 1536 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1537 atomic_sub(num_wrbs, &txq->used);
b03388d6 1538 }
6b7c5b94
SP
1539}
1540
5fb379ee
SP
1541static void be_mcc_queues_destroy(struct be_adapter *adapter)
1542{
1543 struct be_queue_info *q;
5fb379ee 1544
8788fdc2 1545 q = &adapter->mcc_obj.q;
5fb379ee 1546 if (q->created)
8788fdc2 1547 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1548 be_queue_free(adapter, q);
1549
8788fdc2 1550 q = &adapter->mcc_obj.cq;
5fb379ee 1551 if (q->created)
8788fdc2 1552 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1553 be_queue_free(adapter, q);
1554}
1555
1556/* Must be called only after TX qs are created as MCC shares TX EQ */
1557static int be_mcc_queues_create(struct be_adapter *adapter)
1558{
1559 struct be_queue_info *q, *cq;
5fb379ee
SP
1560
1561 /* Alloc MCC compl queue */
8788fdc2 1562 cq = &adapter->mcc_obj.cq;
5fb379ee 1563 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1564 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1565 goto err;
1566
1567 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1568 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1569 goto mcc_cq_free;
1570
1571 /* Alloc MCC queue */
8788fdc2 1572 q = &adapter->mcc_obj.q;
5fb379ee
SP
1573 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1574 goto mcc_cq_destroy;
1575
1576 /* Ask BE to create MCC queue */
8788fdc2 1577 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1578 goto mcc_q_free;
1579
1580 return 0;
1581
1582mcc_q_free:
1583 be_queue_free(adapter, q);
1584mcc_cq_destroy:
8788fdc2 1585 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1586mcc_cq_free:
1587 be_queue_free(adapter, cq);
1588err:
1589 return -1;
1590}
1591
6b7c5b94
SP
1592static void be_tx_queues_destroy(struct be_adapter *adapter)
1593{
1594 struct be_queue_info *q;
3c8def97
SP
1595 struct be_tx_obj *txo;
1596 u8 i;
6b7c5b94 1597
3c8def97
SP
1598 for_all_tx_queues(adapter, txo, i) {
1599 q = &txo->q;
1600 if (q->created)
1601 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1602 be_queue_free(adapter, q);
6b7c5b94 1603
3c8def97
SP
1604 q = &txo->cq;
1605 if (q->created)
1606 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1607 be_queue_free(adapter, q);
1608 }
6b7c5b94 1609
859b1e4e
SP
1610 /* Clear any residual events */
1611 be_eq_clean(adapter, &adapter->tx_eq);
1612
6b7c5b94
SP
1613 q = &adapter->tx_eq.q;
1614 if (q->created)
8788fdc2 1615 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1616 be_queue_free(adapter, q);
1617}
1618
3c8def97 1619/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1620static int be_tx_queues_create(struct be_adapter *adapter)
1621{
1622 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1623 struct be_tx_obj *txo;
1624 u8 i;
6b7c5b94
SP
1625
1626 adapter->tx_eq.max_eqd = 0;
1627 adapter->tx_eq.min_eqd = 0;
1628 adapter->tx_eq.cur_eqd = 96;
1629 adapter->tx_eq.enable_aic = false;
3c8def97 1630
6b7c5b94 1631 eq = &adapter->tx_eq.q;
3c8def97
SP
1632 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1633 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1634 return -1;
1635
8788fdc2 1636 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1637 goto err;
ecd62107 1638 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1639
3c8def97
SP
1640 for_all_tx_queues(adapter, txo, i) {
1641 cq = &txo->cq;
1642 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1643 sizeof(struct be_eth_tx_compl)))
3c8def97 1644 goto err;
6b7c5b94 1645
3c8def97
SP
1646 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1647 goto err;
6b7c5b94 1648
3c8def97
SP
1649 q = &txo->q;
1650 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1651 sizeof(struct be_eth_wrb)))
1652 goto err;
6b7c5b94 1653
3c8def97
SP
1654 if (be_cmd_txq_create(adapter, q, cq))
1655 goto err;
1656 }
6b7c5b94
SP
1657 return 0;
1658
3c8def97
SP
1659err:
1660 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1661 return -1;
1662}
1663
1664static void be_rx_queues_destroy(struct be_adapter *adapter)
1665{
1666 struct be_queue_info *q;
3abcdeda
SP
1667 struct be_rx_obj *rxo;
1668 int i;
1669
1670 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1671 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1672
1673 q = &rxo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677
3abcdeda 1678 q = &rxo->rx_eq.q;
482c9e79 1679 if (q->created)
3abcdeda 1680 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1681 be_queue_free(adapter, q);
6b7c5b94 1682 }
6b7c5b94
SP
1683}
1684
ac6a0c4a
SP
1685static u32 be_num_rxqs_want(struct be_adapter *adapter)
1686{
c814fd36 1687 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1688 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1689 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1690 } else {
1691 dev_warn(&adapter->pdev->dev,
1692 "No support for multiple RX queues\n");
1693 return 1;
1694 }
1695}
1696
6b7c5b94
SP
1697static int be_rx_queues_create(struct be_adapter *adapter)
1698{
1699 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1700 struct be_rx_obj *rxo;
1701 int rc, i;
6b7c5b94 1702
ac6a0c4a
SP
1703 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1704 msix_enabled(adapter) ?
1705 adapter->num_msix_vec - 1 : 1);
1706 if (adapter->num_rx_qs != MAX_RX_QS)
1707 dev_warn(&adapter->pdev->dev,
1708 "Can create only %d RX queues", adapter->num_rx_qs);
1709
6b7c5b94 1710 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1711 for_all_rx_queues(adapter, rxo, i) {
1712 rxo->adapter = adapter;
1713 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1714 rxo->rx_eq.enable_aic = true;
1715
1716 /* EQ */
1717 eq = &rxo->rx_eq.q;
1718 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719 sizeof(struct be_eq_entry));
1720 if (rc)
1721 goto err;
1722
1723 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1724 if (rc)
1725 goto err;
1726
ecd62107 1727 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1728
3abcdeda
SP
1729 /* CQ */
1730 cq = &rxo->cq;
1731 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1732 sizeof(struct be_eth_rx_compl));
1733 if (rc)
1734 goto err;
1735
1736 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1737 if (rc)
1738 goto err;
482c9e79
SP
1739
1740 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1741 q = &rxo->q;
1742 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1743 sizeof(struct be_eth_rx_d));
1744 if (rc)
1745 goto err;
1746
3abcdeda 1747 }
6b7c5b94
SP
1748
1749 return 0;
3abcdeda
SP
1750err:
1751 be_rx_queues_destroy(adapter);
1752 return -1;
6b7c5b94 1753}
6b7c5b94 1754
fe6d2a38 1755static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1756{
fe6d2a38
SP
1757 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1758 if (!eqe->evt)
1759 return false;
1760 else
1761 return true;
b628bde2
SP
1762}
1763
6b7c5b94
SP
1764static irqreturn_t be_intx(int irq, void *dev)
1765{
1766 struct be_adapter *adapter = dev;
3abcdeda 1767 struct be_rx_obj *rxo;
fe6d2a38 1768 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1769
fe6d2a38
SP
1770 if (lancer_chip(adapter)) {
1771 if (event_peek(&adapter->tx_eq))
3c8def97 1772 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1773 for_all_rx_queues(adapter, rxo, i) {
1774 if (event_peek(&rxo->rx_eq))
3c8def97 1775 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1776 }
6b7c5b94 1777
fe6d2a38
SP
1778 if (!(tx || rx))
1779 return IRQ_NONE;
3abcdeda 1780
fe6d2a38
SP
1781 } else {
1782 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1783 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1784 if (!isr)
1785 return IRQ_NONE;
1786
ecd62107 1787 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1788 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1789
1790 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1791 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1792 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1793 }
3abcdeda 1794 }
c001c213 1795
8788fdc2 1796 return IRQ_HANDLED;
6b7c5b94
SP
1797}
1798
1799static irqreturn_t be_msix_rx(int irq, void *dev)
1800{
3abcdeda
SP
1801 struct be_rx_obj *rxo = dev;
1802 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1803
3c8def97 1804 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1805
1806 return IRQ_HANDLED;
1807}
1808
5fb379ee 1809static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1810{
1811 struct be_adapter *adapter = dev;
1812
3c8def97 1813 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1814
1815 return IRQ_HANDLED;
1816}
1817
2e588f84 1818static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1819{
2e588f84 1820 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1821}
1822
49b05221 1823static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1824{
1825 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1826 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1827 struct be_adapter *adapter = rxo->adapter;
1828 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1829 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1830 u32 work_done;
1831
ac124ff9 1832 rx_stats(rxo)->rx_polls++;
6b7c5b94 1833 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1834 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1835 if (!rxcp)
1836 break;
1837
12004ae9
SP
1838 /* Is it a flush compl that has no data */
1839 if (unlikely(rxcp->num_rcvd == 0))
1840 goto loop_continue;
1841
1842 /* Discard compl with partial DMA Lancer B0 */
1843 if (unlikely(!rxcp->pkt_size)) {
1844 be_rx_compl_discard(adapter, rxo, rxcp);
1845 goto loop_continue;
1846 }
1847
1848 /* On BE drop pkts that arrive due to imperfect filtering in
1849 * promiscuous mode on some skews
1850 */
1851 if (unlikely(rxcp->port != adapter->port_num &&
1852 !lancer_chip(adapter))) {
009dd872 1853 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1854 goto loop_continue;
64642811 1855 }
009dd872 1856
12004ae9
SP
1857 if (do_gro(rxcp))
1858 be_rx_compl_process_gro(adapter, rxo, rxcp);
1859 else
1860 be_rx_compl_process(adapter, rxo, rxcp);
1861loop_continue:
2e588f84 1862 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1863 }
1864
6b7c5b94 1865 /* Refill the queue */
3abcdeda 1866 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1867 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1868
1869 /* All consumed */
1870 if (work_done < budget) {
1871 napi_complete(napi);
8788fdc2 1872 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1873 } else {
1874 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1875 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1876 }
1877 return work_done;
1878}
1879
f31e50a8
SP
1880/* As TX and MCC share the same EQ check for both TX and MCC completions.
1881 * For TX/MCC we don't honour budget; consume everything
1882 */
1883static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1884{
f31e50a8
SP
1885 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1886 struct be_adapter *adapter =
1887 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1888 struct be_tx_obj *txo;
6b7c5b94 1889 struct be_eth_tx_compl *txcp;
3c8def97
SP
1890 int tx_compl, mcc_compl, status = 0;
1891 u8 i;
1892 u16 num_wrbs;
1893
1894 for_all_tx_queues(adapter, txo, i) {
1895 tx_compl = 0;
1896 num_wrbs = 0;
1897 while ((txcp = be_tx_compl_get(&txo->cq))) {
1898 num_wrbs += be_tx_compl_process(adapter, txo,
1899 AMAP_GET_BITS(struct amap_eth_tx_compl,
1900 wrb_index, txcp));
1901 tx_compl++;
1902 }
1903 if (tx_compl) {
1904 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1905
1906 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1907
3c8def97
SP
1908 /* As Tx wrbs have been freed up, wake up netdev queue
1909 * if it was stopped due to lack of tx wrbs. */
1910 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1911 atomic_read(&txo->q.used) < txo->q.len / 2) {
1912 netif_wake_subqueue(adapter->netdev, i);
1913 }
1914
ab1594e9 1915 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1916 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1917 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1918 }
6b7c5b94
SP
1919 }
1920
f31e50a8
SP
1921 mcc_compl = be_process_mcc(adapter, &status);
1922
f31e50a8
SP
1923 if (mcc_compl) {
1924 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1925 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1926 }
1927
3c8def97 1928 napi_complete(napi);
6b7c5b94 1929
3c8def97 1930 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1931 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1932 return 1;
1933}
1934
d053de91 1935void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1936{
1937 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1938 u32 i;
1939
1940 pci_read_config_dword(adapter->pdev,
1941 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1942 pci_read_config_dword(adapter->pdev,
1943 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1944 pci_read_config_dword(adapter->pdev,
1945 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1946 pci_read_config_dword(adapter->pdev,
1947 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1948
1949 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1950 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1951
d053de91
AK
1952 if (ue_status_lo || ue_status_hi) {
1953 adapter->ue_detected = true;
7acc2087 1954 adapter->eeh_err = true;
d053de91
AK
1955 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1956 }
1957
7c185276
AK
1958 if (ue_status_lo) {
1959 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1960 if (ue_status_lo & 1)
1961 dev_err(&adapter->pdev->dev,
1962 "UE: %s bit set\n", ue_status_low_desc[i]);
1963 }
1964 }
1965 if (ue_status_hi) {
1966 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1967 if (ue_status_hi & 1)
1968 dev_err(&adapter->pdev->dev,
1969 "UE: %s bit set\n", ue_status_hi_desc[i]);
1970 }
1971 }
1972
1973}
1974
ea1dae11
SP
1975static void be_worker(struct work_struct *work)
1976{
1977 struct be_adapter *adapter =
1978 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1979 struct be_rx_obj *rxo;
1980 int i;
ea1dae11 1981
16da8250
SP
1982 if (!adapter->ue_detected && !lancer_chip(adapter))
1983 be_detect_dump_ue(adapter);
1984
f203af70
SK
1985 /* when interrupts are not yet enabled, just reap any pending
1986 * mcc completions */
1987 if (!netif_running(adapter->netdev)) {
1988 int mcc_compl, status = 0;
1989
1990 mcc_compl = be_process_mcc(adapter, &status);
1991
1992 if (mcc_compl) {
1993 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1994 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1995 }
9b037f38 1996
f203af70
SK
1997 goto reschedule;
1998 }
1999
005d5696
SX
2000 if (!adapter->stats_cmd_sent) {
2001 if (lancer_chip(adapter))
2002 lancer_cmd_get_pport_stats(adapter,
2003 &adapter->stats_cmd);
2004 else
2005 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2006 }
3c8def97 2007
3abcdeda 2008 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2009 be_rx_eqd_update(adapter, rxo);
2010
2011 if (rxo->rx_post_starved) {
2012 rxo->rx_post_starved = false;
1829b086 2013 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2014 }
ea1dae11
SP
2015 }
2016
f203af70 2017reschedule:
e74fbd03 2018 adapter->work_counter++;
ea1dae11
SP
2019 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2020}
2021
8d56ff11
SP
2022static void be_msix_disable(struct be_adapter *adapter)
2023{
ac6a0c4a 2024 if (msix_enabled(adapter)) {
8d56ff11 2025 pci_disable_msix(adapter->pdev);
ac6a0c4a 2026 adapter->num_msix_vec = 0;
3abcdeda
SP
2027 }
2028}
2029
6b7c5b94
SP
2030static void be_msix_enable(struct be_adapter *adapter)
2031{
3abcdeda 2032#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2033 int i, status, num_vec;
6b7c5b94 2034
ac6a0c4a 2035 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2036
ac6a0c4a 2037 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2038 adapter->msix_entries[i].entry = i;
2039
ac6a0c4a 2040 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2041 if (status == 0) {
2042 goto done;
2043 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2044 num_vec = status;
3abcdeda 2045 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2046 num_vec) == 0)
3abcdeda 2047 goto done;
3abcdeda
SP
2048 }
2049 return;
2050done:
ac6a0c4a
SP
2051 adapter->num_msix_vec = num_vec;
2052 return;
6b7c5b94
SP
2053}
2054
ba343c77
SB
2055static void be_sriov_enable(struct be_adapter *adapter)
2056{
344dbf10 2057 be_check_sriov_fn_type(adapter);
6dedec81 2058#ifdef CONFIG_PCI_IOV
ba343c77 2059 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2060 int status, pos;
2061 u16 nvfs;
2062
2063 pos = pci_find_ext_capability(adapter->pdev,
2064 PCI_EXT_CAP_ID_SRIOV);
2065 pci_read_config_word(adapter->pdev,
2066 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2067
2068 if (num_vfs > nvfs) {
2069 dev_info(&adapter->pdev->dev,
2070 "Device supports %d VFs and not %d\n",
2071 nvfs, num_vfs);
2072 num_vfs = nvfs;
2073 }
6dedec81 2074
ba343c77
SB
2075 status = pci_enable_sriov(adapter->pdev, num_vfs);
2076 adapter->sriov_enabled = status ? false : true;
2077 }
2078#endif
ba343c77
SB
2079}
2080
2081static void be_sriov_disable(struct be_adapter *adapter)
2082{
2083#ifdef CONFIG_PCI_IOV
2084 if (adapter->sriov_enabled) {
2085 pci_disable_sriov(adapter->pdev);
2086 adapter->sriov_enabled = false;
2087 }
2088#endif
2089}
2090
fe6d2a38
SP
2091static inline int be_msix_vec_get(struct be_adapter *adapter,
2092 struct be_eq_obj *eq_obj)
6b7c5b94 2093{
ecd62107 2094 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2095}
2096
b628bde2
SP
2097static int be_request_irq(struct be_adapter *adapter,
2098 struct be_eq_obj *eq_obj,
3abcdeda 2099 void *handler, char *desc, void *context)
6b7c5b94
SP
2100{
2101 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2102 int vec;
2103
2104 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2105 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2106 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2107}
2108
3abcdeda
SP
2109static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2110 void *context)
b628bde2 2111{
fe6d2a38 2112 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2113 free_irq(vec, context);
b628bde2 2114}
6b7c5b94 2115
b628bde2
SP
2116static int be_msix_register(struct be_adapter *adapter)
2117{
3abcdeda
SP
2118 struct be_rx_obj *rxo;
2119 int status, i;
2120 char qname[10];
b628bde2 2121
3abcdeda
SP
2122 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2123 adapter);
6b7c5b94
SP
2124 if (status)
2125 goto err;
2126
3abcdeda
SP
2127 for_all_rx_queues(adapter, rxo, i) {
2128 sprintf(qname, "rxq%d", i);
2129 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2130 qname, rxo);
2131 if (status)
2132 goto err_msix;
2133 }
b628bde2 2134
6b7c5b94 2135 return 0;
b628bde2 2136
3abcdeda
SP
2137err_msix:
2138 be_free_irq(adapter, &adapter->tx_eq, adapter);
2139
2140 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2141 be_free_irq(adapter, &rxo->rx_eq, rxo);
2142
6b7c5b94
SP
2143err:
2144 dev_warn(&adapter->pdev->dev,
2145 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2146 be_msix_disable(adapter);
6b7c5b94
SP
2147 return status;
2148}
2149
2150static int be_irq_register(struct be_adapter *adapter)
2151{
2152 struct net_device *netdev = adapter->netdev;
2153 int status;
2154
ac6a0c4a 2155 if (msix_enabled(adapter)) {
6b7c5b94
SP
2156 status = be_msix_register(adapter);
2157 if (status == 0)
2158 goto done;
ba343c77
SB
2159 /* INTx is not supported for VF */
2160 if (!be_physfn(adapter))
2161 return status;
6b7c5b94
SP
2162 }
2163
2164 /* INTx */
2165 netdev->irq = adapter->pdev->irq;
2166 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2167 adapter);
2168 if (status) {
2169 dev_err(&adapter->pdev->dev,
2170 "INTx request IRQ failed - err %d\n", status);
2171 return status;
2172 }
2173done:
2174 adapter->isr_registered = true;
2175 return 0;
2176}
2177
2178static void be_irq_unregister(struct be_adapter *adapter)
2179{
2180 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2181 struct be_rx_obj *rxo;
2182 int i;
6b7c5b94
SP
2183
2184 if (!adapter->isr_registered)
2185 return;
2186
2187 /* INTx */
ac6a0c4a 2188 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2189 free_irq(netdev->irq, adapter);
2190 goto done;
2191 }
2192
2193 /* MSIx */
3abcdeda
SP
2194 be_free_irq(adapter, &adapter->tx_eq, adapter);
2195
2196 for_all_rx_queues(adapter, rxo, i)
2197 be_free_irq(adapter, &rxo->rx_eq, rxo);
2198
6b7c5b94
SP
2199done:
2200 adapter->isr_registered = false;
6b7c5b94
SP
2201}
2202
482c9e79
SP
2203static void be_rx_queues_clear(struct be_adapter *adapter)
2204{
2205 struct be_queue_info *q;
2206 struct be_rx_obj *rxo;
2207 int i;
2208
2209 for_all_rx_queues(adapter, rxo, i) {
2210 q = &rxo->q;
2211 if (q->created) {
2212 be_cmd_rxq_destroy(adapter, q);
2213 /* After the rxq is invalidated, wait for a grace time
2214 * of 1ms for all dma to end and the flush compl to
2215 * arrive
2216 */
2217 mdelay(1);
2218 be_rx_q_clean(adapter, rxo);
2219 }
2220
2221 /* Clear any residual events */
2222 q = &rxo->rx_eq.q;
2223 if (q->created)
2224 be_eq_clean(adapter, &rxo->rx_eq);
2225 }
2226}
2227
889cd4b2
SP
2228static int be_close(struct net_device *netdev)
2229{
2230 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2231 struct be_rx_obj *rxo;
3c8def97 2232 struct be_tx_obj *txo;
889cd4b2 2233 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2234 int vec, i;
889cd4b2 2235
889cd4b2
SP
2236 be_async_mcc_disable(adapter);
2237
fe6d2a38
SP
2238 if (!lancer_chip(adapter))
2239 be_intr_set(adapter, false);
889cd4b2 2240
63fcb27f
PR
2241 for_all_rx_queues(adapter, rxo, i)
2242 napi_disable(&rxo->rx_eq.napi);
2243
2244 napi_disable(&tx_eq->napi);
2245
2246 if (lancer_chip(adapter)) {
63fcb27f
PR
2247 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2248 for_all_rx_queues(adapter, rxo, i)
2249 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2250 for_all_tx_queues(adapter, txo, i)
2251 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2252 }
2253
ac6a0c4a 2254 if (msix_enabled(adapter)) {
fe6d2a38 2255 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2256 synchronize_irq(vec);
3abcdeda
SP
2257
2258 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2259 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2260 synchronize_irq(vec);
2261 }
889cd4b2
SP
2262 } else {
2263 synchronize_irq(netdev->irq);
2264 }
2265 be_irq_unregister(adapter);
2266
889cd4b2
SP
2267 /* Wait for all pending tx completions to arrive so that
2268 * all tx skbs are freed.
2269 */
3c8def97
SP
2270 for_all_tx_queues(adapter, txo, i)
2271 be_tx_compl_clean(adapter, txo);
889cd4b2 2272
482c9e79
SP
2273 be_rx_queues_clear(adapter);
2274 return 0;
2275}
2276
2277static int be_rx_queues_setup(struct be_adapter *adapter)
2278{
2279 struct be_rx_obj *rxo;
2280 int rc, i;
2281 u8 rsstable[MAX_RSS_QS];
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2285 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2286 adapter->if_handle,
2287 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2288 if (rc)
2289 return rc;
2290 }
2291
2292 if (be_multi_rxq(adapter)) {
2293 for_all_rss_queues(adapter, rxo, i)
2294 rsstable[i] = rxo->rss_id;
2295
2296 rc = be_cmd_rss_config(adapter, rsstable,
2297 adapter->num_rx_qs - 1);
2298 if (rc)
2299 return rc;
2300 }
2301
2302 /* First time posting */
2303 for_all_rx_queues(adapter, rxo, i) {
2304 be_post_rx_frags(rxo, GFP_KERNEL);
2305 napi_enable(&rxo->rx_eq.napi);
2306 }
889cd4b2
SP
2307 return 0;
2308}
2309
6b7c5b94
SP
2310static int be_open(struct net_device *netdev)
2311{
2312 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2313 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2314 struct be_rx_obj *rxo;
3abcdeda 2315 int status, i;
5fb379ee 2316
482c9e79
SP
2317 status = be_rx_queues_setup(adapter);
2318 if (status)
2319 goto err;
2320
5fb379ee
SP
2321 napi_enable(&tx_eq->napi);
2322
2323 be_irq_register(adapter);
2324
fe6d2a38
SP
2325 if (!lancer_chip(adapter))
2326 be_intr_set(adapter, true);
5fb379ee
SP
2327
2328 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2329 for_all_rx_queues(adapter, rxo, i) {
2330 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2331 be_cq_notify(adapter, rxo->cq.id, true, 0);
2332 }
8788fdc2 2333 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2334
7a1e9b20
SP
2335 /* Now that interrupts are on we can process async mcc */
2336 be_async_mcc_enable(adapter);
2337
889cd4b2 2338 if (be_physfn(adapter)) {
1da87b7f 2339 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2340 if (status)
2341 goto err;
4f2aa89c 2342
ba343c77
SB
2343 status = be_cmd_set_flow_control(adapter,
2344 adapter->tx_fc, adapter->rx_fc);
2345 if (status)
889cd4b2 2346 goto err;
ba343c77 2347 }
4f2aa89c 2348
889cd4b2
SP
2349 return 0;
2350err:
2351 be_close(adapter->netdev);
2352 return -EIO;
5fb379ee
SP
2353}
2354
71d8d1b5
AK
2355static int be_setup_wol(struct be_adapter *adapter, bool enable)
2356{
2357 struct be_dma_mem cmd;
2358 int status = 0;
2359 u8 mac[ETH_ALEN];
2360
2361 memset(mac, 0, ETH_ALEN);
2362
2363 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2364 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2365 GFP_KERNEL);
71d8d1b5
AK
2366 if (cmd.va == NULL)
2367 return -1;
2368 memset(cmd.va, 0, cmd.size);
2369
2370 if (enable) {
2371 status = pci_write_config_dword(adapter->pdev,
2372 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2373 if (status) {
2374 dev_err(&adapter->pdev->dev,
2381a55c 2375 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2376 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2377 cmd.dma);
71d8d1b5
AK
2378 return status;
2379 }
2380 status = be_cmd_enable_magic_wol(adapter,
2381 adapter->netdev->dev_addr, &cmd);
2382 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2383 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2384 } else {
2385 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2386 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2387 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2388 }
2389
2b7bcebf 2390 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2391 return status;
2392}
2393
6d87f5c3
AK
2394/*
2395 * Generate a seed MAC address from the PF MAC Address using jhash.
2396 * MAC Address for VFs are assigned incrementally starting from the seed.
2397 * These addresses are programmed in the ASIC by the PF and the VF driver
2398 * queries for the MAC address during its probe.
2399 */
2400static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2401{
2402 u32 vf = 0;
3abcdeda 2403 int status = 0;
6d87f5c3
AK
2404 u8 mac[ETH_ALEN];
2405
2406 be_vf_eth_addr_generate(adapter, mac);
2407
2408 for (vf = 0; vf < num_vfs; vf++) {
2409 status = be_cmd_pmac_add(adapter, mac,
2410 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2411 &adapter->vf_cfg[vf].vf_pmac_id,
2412 vf + 1);
6d87f5c3
AK
2413 if (status)
2414 dev_err(&adapter->pdev->dev,
2415 "Mac address add failed for VF %d\n", vf);
2416 else
2417 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2418
2419 mac[5] += 1;
2420 }
2421 return status;
2422}
2423
2424static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2425{
2426 u32 vf;
2427
2428 for (vf = 0; vf < num_vfs; vf++) {
2429 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2430 be_cmd_pmac_del(adapter,
2431 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2432 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2433 }
2434}
2435
5fb379ee
SP
2436static int be_setup(struct be_adapter *adapter)
2437{
5fb379ee 2438 struct net_device *netdev = adapter->netdev;
ba343c77 2439 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2440 int status;
ba343c77
SB
2441 u8 mac[ETH_ALEN];
2442
2dc1deb6
SP
2443 be_cmd_req_native_mode(adapter);
2444
f21b538c
PR
2445 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2446 BE_IF_FLAGS_BROADCAST |
2447 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2448
ba343c77
SB
2449 if (be_physfn(adapter)) {
2450 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2451 BE_IF_FLAGS_PROMISCUOUS |
2452 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2453 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2454
ac6a0c4a 2455 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2456 cap_flags |= BE_IF_FLAGS_RSS;
2457 en_flags |= BE_IF_FLAGS_RSS;
2458 }
ba343c77 2459 }
73d540f2
SP
2460
2461 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2462 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2463 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2464 if (status != 0)
2465 goto do_none;
2466
ba343c77 2467 if (be_physfn(adapter)) {
c99ac3e7
AK
2468 if (adapter->sriov_enabled) {
2469 while (vf < num_vfs) {
2470 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2471 BE_IF_FLAGS_BROADCAST;
2472 status = be_cmd_if_create(adapter, cap_flags,
2473 en_flags, mac, true,
64600ea5 2474 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2475 NULL, vf+1);
c99ac3e7
AK
2476 if (status) {
2477 dev_err(&adapter->pdev->dev,
2478 "Interface Create failed for VF %d\n",
2479 vf);
2480 goto if_destroy;
2481 }
2482 adapter->vf_cfg[vf].vf_pmac_id =
2483 BE_INVALID_PMAC_ID;
2484 vf++;
ba343c77 2485 }
84e5b9f7 2486 }
c99ac3e7 2487 } else {
ba343c77
SB
2488 status = be_cmd_mac_addr_query(adapter, mac,
2489 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2490 if (!status) {
2491 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2492 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2493 }
2494 }
2495
6b7c5b94
SP
2496 status = be_tx_queues_create(adapter);
2497 if (status != 0)
2498 goto if_destroy;
2499
2500 status = be_rx_queues_create(adapter);
2501 if (status != 0)
2502 goto tx_qs_destroy;
2503
2903dd65
SP
2504 /* Allow all priorities by default. A GRP5 evt may modify this */
2505 adapter->vlan_prio_bmap = 0xff;
2506
5fb379ee
SP
2507 status = be_mcc_queues_create(adapter);
2508 if (status != 0)
2509 goto rx_qs_destroy;
6b7c5b94 2510
0dffc83e
AK
2511 adapter->link_speed = -1;
2512
6b7c5b94
SP
2513 return 0;
2514
5fb379ee
SP
2515rx_qs_destroy:
2516 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2517tx_qs_destroy:
2518 be_tx_queues_destroy(adapter);
2519if_destroy:
c99ac3e7
AK
2520 if (be_physfn(adapter) && adapter->sriov_enabled)
2521 for (vf = 0; vf < num_vfs; vf++)
2522 if (adapter->vf_cfg[vf].vf_if_handle)
2523 be_cmd_if_destroy(adapter,
658681f7
AK
2524 adapter->vf_cfg[vf].vf_if_handle,
2525 vf + 1);
2526 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2527do_none:
2528 return status;
2529}
2530
5fb379ee
SP
2531static int be_clear(struct be_adapter *adapter)
2532{
7ab8b0b4
AK
2533 int vf;
2534
c99ac3e7 2535 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2536 be_vf_eth_addr_rem(adapter);
2537
1a8887d8 2538 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2539 be_rx_queues_destroy(adapter);
2540 be_tx_queues_destroy(adapter);
1f5db833 2541 adapter->eq_next_idx = 0;
5fb379ee 2542
7ab8b0b4
AK
2543 if (be_physfn(adapter) && adapter->sriov_enabled)
2544 for (vf = 0; vf < num_vfs; vf++)
2545 if (adapter->vf_cfg[vf].vf_if_handle)
2546 be_cmd_if_destroy(adapter,
2547 adapter->vf_cfg[vf].vf_if_handle,
2548 vf + 1);
2549
658681f7 2550 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2551
2dc1deb6
SP
2552 adapter->be3_native = 0;
2553
2243e2e9
SP
2554 /* tell fw we're done with firing cmds */
2555 be_cmd_fw_clean(adapter);
5fb379ee
SP
2556 return 0;
2557}
2558
6b7c5b94 2559
84517482 2560#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2561static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2562 const u8 *p, u32 img_start, int image_size,
2563 int hdr_size)
fa9a6fed
SB
2564{
2565 u32 crc_offset;
2566 u8 flashed_crc[4];
2567 int status;
3f0d4560
AK
2568
2569 crc_offset = hdr_size + img_start + image_size - 4;
2570
fa9a6fed 2571 p += crc_offset;
3f0d4560
AK
2572
2573 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2574 (image_size - 4));
fa9a6fed
SB
2575 if (status) {
2576 dev_err(&adapter->pdev->dev,
2577 "could not get crc from flash, not flashing redboot\n");
2578 return false;
2579 }
2580
2581 /*update redboot only if crc does not match*/
2582 if (!memcmp(flashed_crc, p, 4))
2583 return false;
2584 else
2585 return true;
fa9a6fed
SB
2586}
2587
306f1348
SP
2588static bool phy_flashing_required(struct be_adapter *adapter)
2589{
2590 int status = 0;
2591 struct be_phy_info phy_info;
2592
2593 status = be_cmd_get_phy_info(adapter, &phy_info);
2594 if (status)
2595 return false;
2596 if ((phy_info.phy_type == TN_8022) &&
2597 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2598 return true;
2599 }
2600 return false;
2601}
2602
3f0d4560 2603static int be_flash_data(struct be_adapter *adapter,
84517482 2604 const struct firmware *fw,
3f0d4560
AK
2605 struct be_dma_mem *flash_cmd, int num_of_images)
2606
84517482 2607{
3f0d4560
AK
2608 int status = 0, i, filehdr_size = 0;
2609 u32 total_bytes = 0, flash_op;
84517482
AK
2610 int num_bytes;
2611 const u8 *p = fw->data;
2612 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2613 const struct flash_comp *pflashcomp;
9fe96934 2614 int num_comp;
3f0d4560 2615
306f1348 2616 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2617 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2618 FLASH_IMAGE_MAX_SIZE_g3},
2619 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2620 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2621 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2622 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2623 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2624 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2625 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2626 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2627 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2628 FLASH_IMAGE_MAX_SIZE_g3},
2629 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2630 FLASH_IMAGE_MAX_SIZE_g3},
2631 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2632 FLASH_IMAGE_MAX_SIZE_g3},
2633 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2634 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2635 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2636 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2637 };
215faf9c 2638 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2639 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2640 FLASH_IMAGE_MAX_SIZE_g2},
2641 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2642 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2643 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2645 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2646 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2647 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2648 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2649 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2650 FLASH_IMAGE_MAX_SIZE_g2},
2651 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2652 FLASH_IMAGE_MAX_SIZE_g2},
2653 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2654 FLASH_IMAGE_MAX_SIZE_g2}
2655 };
2656
2657 if (adapter->generation == BE_GEN3) {
2658 pflashcomp = gen3_flash_types;
2659 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2660 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2661 } else {
2662 pflashcomp = gen2_flash_types;
2663 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2664 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2665 }
9fe96934
SB
2666 for (i = 0; i < num_comp; i++) {
2667 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2668 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2669 continue;
306f1348
SP
2670 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2671 if (!phy_flashing_required(adapter))
2672 continue;
2673 }
3f0d4560
AK
2674 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2675 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2676 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2677 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2678 continue;
2679 p = fw->data;
2680 p += filehdr_size + pflashcomp[i].offset
2681 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2682 if (p + pflashcomp[i].size > fw->data + fw->size)
2683 return -1;
2684 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2685 while (total_bytes) {
2686 if (total_bytes > 32*1024)
2687 num_bytes = 32*1024;
2688 else
2689 num_bytes = total_bytes;
2690 total_bytes -= num_bytes;
306f1348
SP
2691 if (!total_bytes) {
2692 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2693 flash_op = FLASHROM_OPER_PHY_FLASH;
2694 else
2695 flash_op = FLASHROM_OPER_FLASH;
2696 } else {
2697 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2698 flash_op = FLASHROM_OPER_PHY_SAVE;
2699 else
2700 flash_op = FLASHROM_OPER_SAVE;
2701 }
3f0d4560
AK
2702 memcpy(req->params.data_buf, p, num_bytes);
2703 p += num_bytes;
2704 status = be_cmd_write_flashrom(adapter, flash_cmd,
2705 pflashcomp[i].optype, flash_op, num_bytes);
2706 if (status) {
306f1348
SP
2707 if ((status == ILLEGAL_IOCTL_REQ) &&
2708 (pflashcomp[i].optype ==
2709 IMG_TYPE_PHY_FW))
2710 break;
3f0d4560
AK
2711 dev_err(&adapter->pdev->dev,
2712 "cmd to write to flash rom failed.\n");
2713 return -1;
2714 }
84517482 2715 }
84517482 2716 }
84517482
AK
2717 return 0;
2718}
2719
3f0d4560
AK
2720static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2721{
2722 if (fhdr == NULL)
2723 return 0;
2724 if (fhdr->build[0] == '3')
2725 return BE_GEN3;
2726 else if (fhdr->build[0] == '2')
2727 return BE_GEN2;
2728 else
2729 return 0;
2730}
2731
485bf569
SN
2732static int lancer_fw_download(struct be_adapter *adapter,
2733 const struct firmware *fw)
84517482 2734{
485bf569
SN
2735#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2736#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2737 struct be_dma_mem flash_cmd;
485bf569
SN
2738 const u8 *data_ptr = NULL;
2739 u8 *dest_image_ptr = NULL;
2740 size_t image_size = 0;
2741 u32 chunk_size = 0;
2742 u32 data_written = 0;
2743 u32 offset = 0;
2744 int status = 0;
2745 u8 add_status = 0;
84517482 2746
485bf569 2747 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2748 dev_err(&adapter->pdev->dev,
485bf569
SN
2749 "FW Image not properly aligned. "
2750 "Length must be 4 byte aligned.\n");
2751 status = -EINVAL;
2752 goto lancer_fw_exit;
d9efd2af
SB
2753 }
2754
485bf569
SN
2755 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2756 + LANCER_FW_DOWNLOAD_CHUNK;
2757 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2758 &flash_cmd.dma, GFP_KERNEL);
2759 if (!flash_cmd.va) {
2760 status = -ENOMEM;
2761 dev_err(&adapter->pdev->dev,
2762 "Memory allocation failure while flashing\n");
2763 goto lancer_fw_exit;
2764 }
84517482 2765
485bf569
SN
2766 dest_image_ptr = flash_cmd.va +
2767 sizeof(struct lancer_cmd_req_write_object);
2768 image_size = fw->size;
2769 data_ptr = fw->data;
2770
2771 while (image_size) {
2772 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2773
2774 /* Copy the image chunk content. */
2775 memcpy(dest_image_ptr, data_ptr, chunk_size);
2776
2777 status = lancer_cmd_write_object(adapter, &flash_cmd,
2778 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2779 &data_written, &add_status);
2780
2781 if (status)
2782 break;
2783
2784 offset += data_written;
2785 data_ptr += data_written;
2786 image_size -= data_written;
2787 }
2788
2789 if (!status) {
2790 /* Commit the FW written */
2791 status = lancer_cmd_write_object(adapter, &flash_cmd,
2792 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2793 &data_written, &add_status);
2794 }
2795
2796 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2797 flash_cmd.dma);
2798 if (status) {
2799 dev_err(&adapter->pdev->dev,
2800 "Firmware load error. "
2801 "Status code: 0x%x Additional Status: 0x%x\n",
2802 status, add_status);
2803 goto lancer_fw_exit;
2804 }
2805
2806 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2807lancer_fw_exit:
2808 return status;
2809}
2810
2811static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2812{
2813 struct flash_file_hdr_g2 *fhdr;
2814 struct flash_file_hdr_g3 *fhdr3;
2815 struct image_hdr *img_hdr_ptr = NULL;
2816 struct be_dma_mem flash_cmd;
2817 const u8 *p;
2818 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2819
2820 p = fw->data;
3f0d4560 2821 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2822
84517482 2823 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2824 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2825 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2826 if (!flash_cmd.va) {
2827 status = -ENOMEM;
2828 dev_err(&adapter->pdev->dev,
2829 "Memory allocation failure while flashing\n");
485bf569 2830 goto be_fw_exit;
84517482
AK
2831 }
2832
3f0d4560
AK
2833 if ((adapter->generation == BE_GEN3) &&
2834 (get_ufigen_type(fhdr) == BE_GEN3)) {
2835 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2836 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2837 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2838 img_hdr_ptr = (struct image_hdr *) (fw->data +
2839 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2840 i * sizeof(struct image_hdr)));
2841 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2842 status = be_flash_data(adapter, fw, &flash_cmd,
2843 num_imgs);
3f0d4560
AK
2844 }
2845 } else if ((adapter->generation == BE_GEN2) &&
2846 (get_ufigen_type(fhdr) == BE_GEN2)) {
2847 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2848 } else {
2849 dev_err(&adapter->pdev->dev,
2850 "UFI and Interface are not compatible for flashing\n");
2851 status = -1;
84517482
AK
2852 }
2853
2b7bcebf
IV
2854 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2855 flash_cmd.dma);
84517482
AK
2856 if (status) {
2857 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2858 goto be_fw_exit;
84517482
AK
2859 }
2860
af901ca1 2861 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2862
485bf569
SN
2863be_fw_exit:
2864 return status;
2865}
2866
2867int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2868{
2869 const struct firmware *fw;
2870 int status;
2871
2872 if (!netif_running(adapter->netdev)) {
2873 dev_err(&adapter->pdev->dev,
2874 "Firmware load not allowed (interface is down)\n");
2875 return -1;
2876 }
2877
2878 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2879 if (status)
2880 goto fw_exit;
2881
2882 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2883
2884 if (lancer_chip(adapter))
2885 status = lancer_fw_download(adapter, fw);
2886 else
2887 status = be_fw_download(adapter, fw);
2888
84517482
AK
2889fw_exit:
2890 release_firmware(fw);
2891 return status;
2892}
2893
6b7c5b94
SP
2894static struct net_device_ops be_netdev_ops = {
2895 .ndo_open = be_open,
2896 .ndo_stop = be_close,
2897 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2898 .ndo_set_rx_mode = be_set_multicast_list,
2899 .ndo_set_mac_address = be_mac_addr_set,
2900 .ndo_change_mtu = be_change_mtu,
ab1594e9 2901 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2902 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2903 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2904 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2905 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2906 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2907 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2908 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2909};
2910
2911static void be_netdev_init(struct net_device *netdev)
2912{
2913 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2914 struct be_rx_obj *rxo;
2915 int i;
6b7c5b94 2916
6332c8d3 2917 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2918 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2919 NETIF_F_HW_VLAN_TX;
2920 if (be_multi_rxq(adapter))
2921 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2922
2923 netdev->features |= netdev->hw_features |
8b8ddc68 2924 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2925
eb8a50d9 2926 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2927 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2928
6b7c5b94
SP
2929 netdev->flags |= IFF_MULTICAST;
2930
9e90c961
AK
2931 /* Default settings for Rx and Tx flow control */
2932 adapter->rx_fc = true;
2933 adapter->tx_fc = true;
2934
c190e3c8
AK
2935 netif_set_gso_max_size(netdev, 65535);
2936
6b7c5b94
SP
2937 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2938
2939 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2940
3abcdeda
SP
2941 for_all_rx_queues(adapter, rxo, i)
2942 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2943 BE_NAPI_WEIGHT);
2944
5fb379ee 2945 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2946 BE_NAPI_WEIGHT);
6b7c5b94
SP
2947}
2948
2949static void be_unmap_pci_bars(struct be_adapter *adapter)
2950{
8788fdc2
SP
2951 if (adapter->csr)
2952 iounmap(adapter->csr);
2953 if (adapter->db)
2954 iounmap(adapter->db);
ba343c77 2955 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2956 iounmap(adapter->pcicfg);
6b7c5b94
SP
2957}
2958
2959static int be_map_pci_bars(struct be_adapter *adapter)
2960{
2961 u8 __iomem *addr;
ba343c77 2962 int pcicfg_reg, db_reg;
6b7c5b94 2963
fe6d2a38
SP
2964 if (lancer_chip(adapter)) {
2965 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2966 pci_resource_len(adapter->pdev, 0));
2967 if (addr == NULL)
2968 return -ENOMEM;
2969 adapter->db = addr;
2970 return 0;
2971 }
2972
ba343c77
SB
2973 if (be_physfn(adapter)) {
2974 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2975 pci_resource_len(adapter->pdev, 2));
2976 if (addr == NULL)
2977 return -ENOMEM;
2978 adapter->csr = addr;
2979 }
6b7c5b94 2980
ba343c77 2981 if (adapter->generation == BE_GEN2) {
7b139c83 2982 pcicfg_reg = 1;
ba343c77
SB
2983 db_reg = 4;
2984 } else {
7b139c83 2985 pcicfg_reg = 0;
ba343c77
SB
2986 if (be_physfn(adapter))
2987 db_reg = 4;
2988 else
2989 db_reg = 0;
2990 }
2991 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2992 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2993 if (addr == NULL)
2994 goto pci_map_err;
ba343c77
SB
2995 adapter->db = addr;
2996
2997 if (be_physfn(adapter)) {
2998 addr = ioremap_nocache(
2999 pci_resource_start(adapter->pdev, pcicfg_reg),
3000 pci_resource_len(adapter->pdev, pcicfg_reg));
3001 if (addr == NULL)
3002 goto pci_map_err;
3003 adapter->pcicfg = addr;
3004 } else
3005 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
3006
3007 return 0;
3008pci_map_err:
3009 be_unmap_pci_bars(adapter);
3010 return -ENOMEM;
3011}
3012
3013
3014static void be_ctrl_cleanup(struct be_adapter *adapter)
3015{
8788fdc2 3016 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3017
3018 be_unmap_pci_bars(adapter);
3019
3020 if (mem->va)
2b7bcebf
IV
3021 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3022 mem->dma);
e7b909a6 3023
5b8821b7 3024 mem = &adapter->rx_filter;
e7b909a6 3025 if (mem->va)
2b7bcebf
IV
3026 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3027 mem->dma);
6b7c5b94
SP
3028}
3029
6b7c5b94
SP
3030static int be_ctrl_init(struct be_adapter *adapter)
3031{
8788fdc2
SP
3032 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3033 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3034 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3035 int status;
6b7c5b94
SP
3036
3037 status = be_map_pci_bars(adapter);
3038 if (status)
e7b909a6 3039 goto done;
6b7c5b94
SP
3040
3041 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3042 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3043 mbox_mem_alloc->size,
3044 &mbox_mem_alloc->dma,
3045 GFP_KERNEL);
6b7c5b94 3046 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3047 status = -ENOMEM;
3048 goto unmap_pci_bars;
6b7c5b94
SP
3049 }
3050 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3051 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3052 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3053 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3054
5b8821b7
SP
3055 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3056 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3057 &rx_filter->dma, GFP_KERNEL);
3058 if (rx_filter->va == NULL) {
e7b909a6
SP
3059 status = -ENOMEM;
3060 goto free_mbox;
3061 }
5b8821b7 3062 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3063
2984961c 3064 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3065 spin_lock_init(&adapter->mcc_lock);
3066 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3067
dd131e76 3068 init_completion(&adapter->flash_compl);
cf588477 3069 pci_save_state(adapter->pdev);
6b7c5b94 3070 return 0;
e7b909a6
SP
3071
3072free_mbox:
2b7bcebf
IV
3073 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3074 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3075
3076unmap_pci_bars:
3077 be_unmap_pci_bars(adapter);
3078
3079done:
3080 return status;
6b7c5b94
SP
3081}
3082
3083static void be_stats_cleanup(struct be_adapter *adapter)
3084{
3abcdeda 3085 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3086
3087 if (cmd->va)
2b7bcebf
IV
3088 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3089 cmd->va, cmd->dma);
6b7c5b94
SP
3090}
3091
3092static int be_stats_init(struct be_adapter *adapter)
3093{
3abcdeda 3094 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3095
005d5696 3096 if (adapter->generation == BE_GEN2) {
89a88ab8 3097 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3098 } else {
3099 if (lancer_chip(adapter))
3100 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3101 else
3102 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3103 }
2b7bcebf
IV
3104 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3105 GFP_KERNEL);
6b7c5b94
SP
3106 if (cmd->va == NULL)
3107 return -1;
d291b9af 3108 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3109 return 0;
3110}
3111
3112static void __devexit be_remove(struct pci_dev *pdev)
3113{
3114 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3115
6b7c5b94
SP
3116 if (!adapter)
3117 return;
3118
f203af70
SK
3119 cancel_delayed_work_sync(&adapter->work);
3120
6b7c5b94
SP
3121 unregister_netdev(adapter->netdev);
3122
5fb379ee
SP
3123 be_clear(adapter);
3124
6b7c5b94
SP
3125 be_stats_cleanup(adapter);
3126
3127 be_ctrl_cleanup(adapter);
3128
48f5a191 3129 kfree(adapter->vf_cfg);
ba343c77
SB
3130 be_sriov_disable(adapter);
3131
8d56ff11 3132 be_msix_disable(adapter);
6b7c5b94
SP
3133
3134 pci_set_drvdata(pdev, NULL);
3135 pci_release_regions(pdev);
3136 pci_disable_device(pdev);
3137
3138 free_netdev(adapter->netdev);
3139}
3140
2243e2e9 3141static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3142{
6b7c5b94 3143 int status;
2243e2e9 3144 u8 mac[ETH_ALEN];
6b7c5b94 3145
2243e2e9 3146 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3147 if (status)
3148 return status;
3149
3abcdeda
SP
3150 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3151 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3152 if (status)
3153 return status;
3154
2243e2e9 3155 memset(mac, 0, ETH_ALEN);
ba343c77 3156
12f4d0a8
ME
3157 /* A default permanent address is given to each VF for Lancer*/
3158 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3159 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3160 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3161
ba343c77
SB
3162 if (status)
3163 return status;
ca9e4988 3164
ba343c77
SB
3165 if (!is_valid_ether_addr(mac))
3166 return -EADDRNOTAVAIL;
3167
3168 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3169 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3170 }
6b7c5b94 3171
3486be29 3172 if (adapter->function_mode & 0x400)
82903e4b
AK
3173 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3174 else
3175 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3176
9e1453c5
AK
3177 status = be_cmd_get_cntl_attributes(adapter);
3178 if (status)
3179 return status;
3180
3c8def97
SP
3181 if ((num_vfs && adapter->sriov_enabled) ||
3182 (adapter->function_mode & 0x400) ||
3183 lancer_chip(adapter) || !be_physfn(adapter)) {
3184 adapter->num_tx_qs = 1;
3185 netif_set_real_num_tx_queues(adapter->netdev,
3186 adapter->num_tx_qs);
3187 } else {
3188 adapter->num_tx_qs = MAX_TX_QS;
3189 }
3190
2243e2e9 3191 return 0;
6b7c5b94
SP
3192}
3193
fe6d2a38
SP
3194static int be_dev_family_check(struct be_adapter *adapter)
3195{
3196 struct pci_dev *pdev = adapter->pdev;
3197 u32 sli_intf = 0, if_type;
3198
3199 switch (pdev->device) {
3200 case BE_DEVICE_ID1:
3201 case OC_DEVICE_ID1:
3202 adapter->generation = BE_GEN2;
3203 break;
3204 case BE_DEVICE_ID2:
3205 case OC_DEVICE_ID2:
3206 adapter->generation = BE_GEN3;
3207 break;
3208 case OC_DEVICE_ID3:
12f4d0a8 3209 case OC_DEVICE_ID4:
fe6d2a38
SP
3210 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3211 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3212 SLI_INTF_IF_TYPE_SHIFT;
3213
3214 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3215 if_type != 0x02) {
3216 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3217 return -EINVAL;
3218 }
fe6d2a38
SP
3219 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3220 SLI_INTF_FAMILY_SHIFT);
3221 adapter->generation = BE_GEN3;
3222 break;
3223 default:
3224 adapter->generation = 0;
3225 }
3226 return 0;
3227}
3228
37eed1cb
PR
3229static int lancer_wait_ready(struct be_adapter *adapter)
3230{
3231#define SLIPORT_READY_TIMEOUT 500
3232 u32 sliport_status;
3233 int status = 0, i;
3234
3235 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3236 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3237 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3238 break;
3239
3240 msleep(20);
3241 }
3242
3243 if (i == SLIPORT_READY_TIMEOUT)
3244 status = -1;
3245
3246 return status;
3247}
3248
3249static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3250{
3251 int status;
3252 u32 sliport_status, err, reset_needed;
3253 status = lancer_wait_ready(adapter);
3254 if (!status) {
3255 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3256 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3257 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3258 if (err && reset_needed) {
3259 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3260 adapter->db + SLIPORT_CONTROL_OFFSET);
3261
3262 /* check adapter has corrected the error */
3263 status = lancer_wait_ready(adapter);
3264 sliport_status = ioread32(adapter->db +
3265 SLIPORT_STATUS_OFFSET);
3266 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3267 SLIPORT_STATUS_RN_MASK);
3268 if (status || sliport_status)
3269 status = -1;
3270 } else if (err || reset_needed) {
3271 status = -1;
3272 }
3273 }
3274 return status;
3275}
3276
6b7c5b94
SP
3277static int __devinit be_probe(struct pci_dev *pdev,
3278 const struct pci_device_id *pdev_id)
3279{
3280 int status = 0;
3281 struct be_adapter *adapter;
3282 struct net_device *netdev;
6b7c5b94
SP
3283
3284 status = pci_enable_device(pdev);
3285 if (status)
3286 goto do_none;
3287
3288 status = pci_request_regions(pdev, DRV_NAME);
3289 if (status)
3290 goto disable_dev;
3291 pci_set_master(pdev);
3292
3c8def97 3293 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3294 if (netdev == NULL) {
3295 status = -ENOMEM;
3296 goto rel_reg;
3297 }
3298 adapter = netdev_priv(netdev);
3299 adapter->pdev = pdev;
3300 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3301
3302 status = be_dev_family_check(adapter);
63657b9c 3303 if (status)
fe6d2a38
SP
3304 goto free_netdev;
3305
6b7c5b94 3306 adapter->netdev = netdev;
2243e2e9 3307 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3308
2b7bcebf 3309 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3310 if (!status) {
3311 netdev->features |= NETIF_F_HIGHDMA;
3312 } else {
2b7bcebf 3313 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3314 if (status) {
3315 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3316 goto free_netdev;
3317 }
3318 }
3319
ba343c77 3320 be_sriov_enable(adapter);
48f5a191
AK
3321 if (adapter->sriov_enabled) {
3322 adapter->vf_cfg = kcalloc(num_vfs,
3323 sizeof(struct be_vf_cfg), GFP_KERNEL);
3324
3325 if (!adapter->vf_cfg)
3326 goto free_netdev;
3327 }
ba343c77 3328
6b7c5b94
SP
3329 status = be_ctrl_init(adapter);
3330 if (status)
48f5a191 3331 goto free_vf_cfg;
6b7c5b94 3332
37eed1cb
PR
3333 if (lancer_chip(adapter)) {
3334 status = lancer_test_and_set_rdy_state(adapter);
3335 if (status) {
3336 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3337 goto ctrl_clean;
37eed1cb
PR
3338 }
3339 }
3340
2243e2e9 3341 /* sync up with fw's ready state */
ba343c77
SB
3342 if (be_physfn(adapter)) {
3343 status = be_cmd_POST(adapter);
3344 if (status)
3345 goto ctrl_clean;
ba343c77 3346 }
6b7c5b94 3347
2243e2e9
SP
3348 /* tell fw we're ready to fire cmds */
3349 status = be_cmd_fw_init(adapter);
6b7c5b94 3350 if (status)
2243e2e9
SP
3351 goto ctrl_clean;
3352
a4b4dfab
AK
3353 status = be_cmd_reset_function(adapter);
3354 if (status)
3355 goto ctrl_clean;
556ae191 3356
2243e2e9
SP
3357 status = be_stats_init(adapter);
3358 if (status)
3359 goto ctrl_clean;
3360
3361 status = be_get_config(adapter);
6b7c5b94
SP
3362 if (status)
3363 goto stats_clean;
6b7c5b94 3364
b9ab82c7
SP
3365 /* The INTR bit may be set in the card when probed by a kdump kernel
3366 * after a crash.
3367 */
3368 if (!lancer_chip(adapter))
3369 be_intr_set(adapter, false);
3370
3abcdeda
SP
3371 be_msix_enable(adapter);
3372
6b7c5b94 3373 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3374
5fb379ee
SP
3375 status = be_setup(adapter);
3376 if (status)
3abcdeda 3377 goto msix_disable;
2243e2e9 3378
3abcdeda 3379 be_netdev_init(netdev);
6b7c5b94
SP
3380 status = register_netdev(netdev);
3381 if (status != 0)
5fb379ee 3382 goto unsetup;
6b7c5b94 3383
e6319365 3384 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42 3385 u8 mac_speed;
d0381c42
AK
3386 u16 vf, lnk_speed;
3387
12f4d0a8
ME
3388 if (!lancer_chip(adapter)) {
3389 status = be_vf_eth_addr_config(adapter);
3390 if (status)
3391 goto unreg_netdev;
3392 }
d0381c42
AK
3393
3394 for (vf = 0; vf < num_vfs; vf++) {
ea172a01
SP
3395 status = be_cmd_link_status_query(adapter, &mac_speed,
3396 &lnk_speed, vf + 1);
d0381c42
AK
3397 if (!status)
3398 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3399 else
3400 goto unreg_netdev;
3401 }
e6319365
AK
3402 }
3403
c4ca2374 3404 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3405
f203af70 3406 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3407 return 0;
3408
e6319365
AK
3409unreg_netdev:
3410 unregister_netdev(netdev);
5fb379ee
SP
3411unsetup:
3412 be_clear(adapter);
3abcdeda
SP
3413msix_disable:
3414 be_msix_disable(adapter);
6b7c5b94
SP
3415stats_clean:
3416 be_stats_cleanup(adapter);
3417ctrl_clean:
3418 be_ctrl_cleanup(adapter);
48f5a191
AK
3419free_vf_cfg:
3420 kfree(adapter->vf_cfg);
6b7c5b94 3421free_netdev:
ba343c77 3422 be_sriov_disable(adapter);
fe6d2a38 3423 free_netdev(netdev);
8d56ff11 3424 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3425rel_reg:
3426 pci_release_regions(pdev);
3427disable_dev:
3428 pci_disable_device(pdev);
3429do_none:
c4ca2374 3430 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3431 return status;
3432}
3433
3434static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3435{
3436 struct be_adapter *adapter = pci_get_drvdata(pdev);
3437 struct net_device *netdev = adapter->netdev;
3438
a4ca055f 3439 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3440 if (adapter->wol)
3441 be_setup_wol(adapter, true);
3442
6b7c5b94
SP
3443 netif_device_detach(netdev);
3444 if (netif_running(netdev)) {
3445 rtnl_lock();
3446 be_close(netdev);
3447 rtnl_unlock();
3448 }
9e90c961 3449 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3450 be_clear(adapter);
6b7c5b94 3451
a4ca055f 3452 be_msix_disable(adapter);
6b7c5b94
SP
3453 pci_save_state(pdev);
3454 pci_disable_device(pdev);
3455 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3456 return 0;
3457}
3458
3459static int be_resume(struct pci_dev *pdev)
3460{
3461 int status = 0;
3462 struct be_adapter *adapter = pci_get_drvdata(pdev);
3463 struct net_device *netdev = adapter->netdev;
3464
3465 netif_device_detach(netdev);
3466
3467 status = pci_enable_device(pdev);
3468 if (status)
3469 return status;
3470
3471 pci_set_power_state(pdev, 0);
3472 pci_restore_state(pdev);
3473
a4ca055f 3474 be_msix_enable(adapter);
2243e2e9
SP
3475 /* tell fw we're ready to fire cmds */
3476 status = be_cmd_fw_init(adapter);
3477 if (status)
3478 return status;
3479
9b0365f1 3480 be_setup(adapter);
6b7c5b94
SP
3481 if (netif_running(netdev)) {
3482 rtnl_lock();
3483 be_open(netdev);
3484 rtnl_unlock();
3485 }
3486 netif_device_attach(netdev);
71d8d1b5
AK
3487
3488 if (adapter->wol)
3489 be_setup_wol(adapter, false);
a4ca055f
AK
3490
3491 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3492 return 0;
3493}
3494
82456b03
SP
3495/*
3496 * An FLR will stop BE from DMAing any data.
3497 */
3498static void be_shutdown(struct pci_dev *pdev)
3499{
3500 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3501
2d5d4154
AK
3502 if (!adapter)
3503 return;
82456b03 3504
0f4a6828 3505 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3506
2d5d4154 3507 netif_device_detach(adapter->netdev);
82456b03 3508
82456b03
SP
3509 if (adapter->wol)
3510 be_setup_wol(adapter, true);
3511
57841869
AK
3512 be_cmd_reset_function(adapter);
3513
82456b03 3514 pci_disable_device(pdev);
82456b03
SP
3515}
3516
cf588477
SP
3517static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3518 pci_channel_state_t state)
3519{
3520 struct be_adapter *adapter = pci_get_drvdata(pdev);
3521 struct net_device *netdev = adapter->netdev;
3522
3523 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3524
3525 adapter->eeh_err = true;
3526
3527 netif_device_detach(netdev);
3528
3529 if (netif_running(netdev)) {
3530 rtnl_lock();
3531 be_close(netdev);
3532 rtnl_unlock();
3533 }
3534 be_clear(adapter);
3535
3536 if (state == pci_channel_io_perm_failure)
3537 return PCI_ERS_RESULT_DISCONNECT;
3538
3539 pci_disable_device(pdev);
3540
3541 return PCI_ERS_RESULT_NEED_RESET;
3542}
3543
3544static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3545{
3546 struct be_adapter *adapter = pci_get_drvdata(pdev);
3547 int status;
3548
3549 dev_info(&adapter->pdev->dev, "EEH reset\n");
3550 adapter->eeh_err = false;
3551
3552 status = pci_enable_device(pdev);
3553 if (status)
3554 return PCI_ERS_RESULT_DISCONNECT;
3555
3556 pci_set_master(pdev);
3557 pci_set_power_state(pdev, 0);
3558 pci_restore_state(pdev);
3559
3560 /* Check if card is ok and fw is ready */
3561 status = be_cmd_POST(adapter);
3562 if (status)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 return PCI_ERS_RESULT_RECOVERED;
3566}
3567
3568static void be_eeh_resume(struct pci_dev *pdev)
3569{
3570 int status = 0;
3571 struct be_adapter *adapter = pci_get_drvdata(pdev);
3572 struct net_device *netdev = adapter->netdev;
3573
3574 dev_info(&adapter->pdev->dev, "EEH resume\n");
3575
3576 pci_save_state(pdev);
3577
3578 /* tell fw we're ready to fire cmds */
3579 status = be_cmd_fw_init(adapter);
3580 if (status)
3581 goto err;
3582
3583 status = be_setup(adapter);
3584 if (status)
3585 goto err;
3586
3587 if (netif_running(netdev)) {
3588 status = be_open(netdev);
3589 if (status)
3590 goto err;
3591 }
3592 netif_device_attach(netdev);
3593 return;
3594err:
3595 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3596}
3597
3598static struct pci_error_handlers be_eeh_handlers = {
3599 .error_detected = be_eeh_err_detected,
3600 .slot_reset = be_eeh_reset,
3601 .resume = be_eeh_resume,
3602};
3603
6b7c5b94
SP
3604static struct pci_driver be_driver = {
3605 .name = DRV_NAME,
3606 .id_table = be_dev_ids,
3607 .probe = be_probe,
3608 .remove = be_remove,
3609 .suspend = be_suspend,
cf588477 3610 .resume = be_resume,
82456b03 3611 .shutdown = be_shutdown,
cf588477 3612 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3613};
3614
3615static int __init be_init_module(void)
3616{
8e95a202
JP
3617 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3618 rx_frag_size != 2048) {
6b7c5b94
SP
3619 printk(KERN_WARNING DRV_NAME
3620 " : Module param rx_frag_size must be 2048/4096/8192."
3621 " Using 2048\n");
3622 rx_frag_size = 2048;
3623 }
6b7c5b94
SP
3624
3625 return pci_register_driver(&be_driver);
3626}
3627module_init(be_init_module);
3628
3629static void __exit be_exit_module(void)
3630{
3631 pci_unregister_driver(&be_driver);
3632}
3633module_exit(be_exit_module);
This page took 0.589991 seconds and 5 git commands to generate.