be2net: use RX_FILTER cmd to program multicast addresses
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
ac124ff9
SP
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 251 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 254
ac124ff9 255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
ac124ff9 273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
ac124ff9 281 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 282 else
ac124ff9 283 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
ac124ff9
SP
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 300 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 303
ac124ff9 304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
ac124ff9 325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
005d5696
SX
340static void populate_lancer_stats(struct be_adapter *adapter)
341{
89a88ab8 342
005d5696 343 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 370 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 374 drvs->rx_drops_too_many_frags =
ac124ff9 375 pport_stats->rx_drops_too_many_frags_lo;
005d5696 376}
89a88ab8
AK
377
378void be_parse_stats(struct be_adapter *adapter)
379{
ac124ff9
SP
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
005d5696
SX
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
89a88ab8 390 populate_be2_stats(adapter);
005d5696 391 }
ac124ff9
SP
392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8
AK
397}
398
ab1594e9
SP
399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
6b7c5b94 401{
ab1594e9 402 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 403 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 404 struct be_rx_obj *rxo;
3c8def97 405 struct be_tx_obj *txo;
ab1594e9
SP
406 u64 pkts, bytes;
407 unsigned int start;
3abcdeda 408 int i;
6b7c5b94 409
3abcdeda 410 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
422 }
423
3c8def97 424 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
3c8def97 433 }
6b7c5b94
SP
434
435 /* bad pkts received */
ab1594e9 436 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
ab1594e9 445 drvs->rx_dropped_runt;
68110868 446
6b7c5b94 447 /* detailed rx errors */
ab1594e9 448 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
68110868 451
ab1594e9 452 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
453
454 /* frame alignment errors */
ab1594e9 455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 456
6b7c5b94
SP
457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
ab1594e9 462 return stats;
6b7c5b94
SP
463}
464
ea172a01 465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 466{
6b7c5b94
SP
467 struct net_device *netdev = adapter->netdev;
468
ea172a01
SP
469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 477 }
6b7c5b94
SP
478}
479
3c8def97 480static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 482{
3c8def97
SP
483 struct be_tx_stats *stats = tx_stats(txo);
484
ab1594e9 485 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 490 if (stopped)
ac124ff9 491 stats->tx_stops++;
ab1594e9 492 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
6b7c5b94 498{
ebc8d2ab
DM
499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
6b7c5b94
SP
503 /* to account for hdr wrb */
504 cnt++;
fe6d2a38
SP
505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
6b7c5b94
SP
508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
fe6d2a38 511 }
6b7c5b94
SP
512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
cc4ce020
SK
523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 525{
cc4ce020
SK
526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
6b7c5b94
SP
529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
49e4b847 533 if (skb_is_gso(skb)) {
6b7c5b94
SP
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
6b7c5b94
SP
549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
4c5102f9 556 if (vlan_tx_tag_present(skb)) {
6b7c5b94 557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
2b7bcebf 573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 581 if (wrb->frag_len) {
7101e111 582 if (unmap_single)
2b7bcebf
IV
583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
7101e111 585 else
2b7bcebf 586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
587 }
588}
6b7c5b94 589
3c8def97 590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
7101e111
SP
593 dma_addr_t busaddr;
594 int i, copied = 0;
2b7bcebf 595 struct device *dev = &adapter->pdev->dev;
6b7c5b94 596 struct sk_buff *first_skb = skb;
6b7c5b94
SP
597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
7101e111
SP
599 bool map_single = false;
600 u16 map_head;
6b7c5b94 601
6b7c5b94
SP
602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
7101e111 604 map_head = txq->head;
6b7c5b94 605
ebc8d2ab 606 if (skb->len > skb->data_len) {
e743d313 607 int len = skb_headlen(skb);
2b7bcebf
IV
608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
7101e111
SP
610 goto dma_err;
611 map_single = true;
ebc8d2ab
DM
612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
6b7c5b94 618
ebc8d2ab
DM
619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
7101e111 625 goto dma_err;
ebc8d2ab
DM
626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
6b7c5b94
SP
631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
cc4ce020 640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
7101e111
SP
644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
2b7bcebf 648 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
6b7c5b94
SP
654}
655
61357325 656static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 657 struct net_device *netdev)
6b7c5b94
SP
658{
659 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
fe6d2a38 666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 667
3c8def97 668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
669 if (copied) {
670 /* record the sent skb in the sent_skb table */
3c8def97
SP
671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
c190e3c8
AK
673
674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
7101e111 678 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
3c8def97 681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
682 stopped = true;
683 }
6b7c5b94 684
c190e3c8 685 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 686
3c8def97 687 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 688 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
6b7c5b94 692 }
6b7c5b94
SP
693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
82903e4b
AK
715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 717 */
1da87b7f 718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 719{
6b7c5b94
SP
720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
82903e4b 722 int status = 0;
1da87b7f
AK
723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
6b7c5b94 730
c0e64ef4
SP
731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
82903e4b 735 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 736 /* Construct VLAN Table to give to HW */
b738127d 737 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
b31c50a7
SP
743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
6b7c5b94 745 } else {
b31c50a7
SP
746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
6b7c5b94 748 }
1da87b7f 749
b31c50a7 750 return status;
6b7c5b94
SP
751}
752
6b7c5b94
SP
753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
1da87b7f 757 adapter->vlans_added++;
ba343c77
SB
758 if (!be_physfn(adapter))
759 return;
760
6b7c5b94 761 adapter->vlan_tag[vid] = 1;
82903e4b 762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 763 be_vid_config(adapter, false, 0);
6b7c5b94
SP
764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
1da87b7f 770 adapter->vlans_added--;
1da87b7f 771
ba343c77
SB
772 if (!be_physfn(adapter))
773 return;
774
6b7c5b94 775 adapter->vlan_tag[vid] = 0;
82903e4b 776 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 777 be_vid_config(adapter, false, 0);
6b7c5b94
SP
778}
779
24307eef 780static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 783
24307eef 784 if (netdev->flags & IFF_PROMISC) {
5b8821b7 785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
786 adapter->promiscuous = true;
787 goto done;
6b7c5b94
SP
788 }
789
25985edc 790 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
5b8821b7 793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
6b7c5b94
SP
797 }
798
e7b909a6 799 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 800 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 803 goto done;
6b7c5b94 804 }
6b7c5b94 805
5b8821b7 806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
807done:
808 return;
6b7c5b94
SP
809}
810
ba343c77
SB
811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
64600ea5
AK
822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 826
64600ea5
AK
827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
830
831 if (status)
ba343c77
SB
832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
64600ea5
AK
834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
ba343c77
SB
837 return status;
838}
839
64600ea5
AK
840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
e1d18735 852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
1da87b7f
AK
860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
e1d18735
AK
888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
ac124ff9 912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 913{
ac124ff9
SP
914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 916 ulong now = jiffies;
ac124ff9 917 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
918 u64 pkts;
919 unsigned int start, eqd;
ac124ff9
SP
920
921 if (!rx_eq->enable_aic)
922 return;
6b7c5b94 923
4097f663 924 /* Wrapped around */
3abcdeda
SP
925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
4097f663
SP
927 return;
928 }
6b7c5b94 929
ac124ff9
SP
930 /* Update once a second */
931 if (delta < HZ)
6b7c5b94
SP
932 return;
933
ab1594e9
SP
934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
939 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
940 stats->rx_pkts_prev = pkts;
3abcdeda 941 stats->rx_jiffies = now;
ac124ff9
SP
942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
6b7c5b94
SP
954}
955
3abcdeda 956static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 957 struct be_rx_compl_info *rxcp)
4097f663 958{
ac124ff9 959 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 960
ab1594e9 961 u64_stats_update_begin(&stats->sync);
3abcdeda 962 stats->rx_compl++;
2e588f84 963 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 964 stats->rx_pkts++;
2e588f84 965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 966 stats->rx_mcast_pkts++;
2e588f84 967 if (rxcp->err)
ac124ff9 968 stats->rx_compl_err++;
ab1594e9 969 u64_stats_update_end(&stats->sync);
4097f663
SP
970}
971
2e588f84 972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 973{
19fad86f
PR
974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
978}
979
6b7c5b94 980static struct be_rx_page_info *
3abcdeda
SP
981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
6b7c5b94
SP
984{
985 struct be_rx_page_info *rx_page_info;
3abcdeda 986 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 987
3abcdeda 988 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
989 BUG_ON(!rx_page_info->page);
990
205859a2 991 if (rx_page_info->last_page_user) {
2b7bcebf
IV
992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
995 rx_page_info->last_page_user = false;
996 }
6b7c5b94
SP
997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1004 struct be_rx_obj *rxo,
2e588f84 1005 struct be_rx_compl_info *rxcp)
6b7c5b94 1006{
3abcdeda 1007 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1008 struct be_rx_page_info *page_info;
2e588f84 1009 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1010
e80d9da6 1011 for (i = 0; i < num_rcvd; i++) {
2e588f84 1012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
2e588f84 1015 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
3abcdeda 1023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1025{
3abcdeda 1026 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1027 struct be_rx_page_info *page_info;
2e588f84
SP
1028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1030 u8 *start;
6b7c5b94 1031
2e588f84 1032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
2e588f84 1037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1038
1039 /* Copy the header portion into skb_data */
2e588f84 1040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
205859a2 1057 page_info->page = NULL;
6b7c5b94 1058
2e588f84
SP
1059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
6b7c5b94
SP
1062 }
1063
1064 /* More frags present for this completion */
2e588f84
SP
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1070
bd46cb6c
AK
1071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
6b7c5b94 1087
2e588f84
SP
1088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1090 page_info->page = NULL;
6b7c5b94 1091 }
bd46cb6c 1092 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1093}
1094
5be93b9a 1095/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1096static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1097 struct be_rx_obj *rxo,
2e588f84 1098 struct be_rx_compl_info *rxcp)
6b7c5b94 1099{
6332c8d3 1100 struct net_device *netdev = adapter->netdev;
6b7c5b94 1101 struct sk_buff *skb;
89420424 1102
6332c8d3 1103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1104 if (unlikely(!skb)) {
ac124ff9 1105 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1106 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1107 return;
1108 }
1109
2e588f84 1110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1111
6332c8d3 1112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1113 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1114 else
1115 skb_checksum_none_assert(skb);
6b7c5b94
SP
1116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1118 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
6b7c5b94 1122
4c5102f9
AK
1123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
6b7c5b94
SP
1127}
1128
5be93b9a
AK
1129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1131 struct be_rx_obj *rxo,
2e588f84 1132 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1133{
1134 struct be_rx_page_info *page_info;
5be93b9a 1135 struct sk_buff *skb = NULL;
3abcdeda
SP
1136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1138 u16 remaining, curr_frag_len;
1139 u16 i, j;
3968fa1e 1140
5be93b9a
AK
1141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
3abcdeda 1143 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1144 return;
1145 }
1146
2e588f84
SP
1147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
bd46cb6c
AK
1153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
5be93b9a
AK
1157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1161 } else {
1162 put_page(page_info->page);
1163 }
5be93b9a 1164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1165
bd46cb6c 1166 remaining -= curr_frag_len;
2e588f84 1167 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1168 memset(page_info, 0, sizeof(*page_info));
1169 }
bd46cb6c 1170 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1171
5be93b9a 1172 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
5be93b9a 1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
5be93b9a 1179
4c5102f9
AK
1180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1184}
1185
1186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
1189{
1190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
15d72184 1215 }
2e588f84
SP
1216}
1217
1218static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1219 struct be_eth_rx_compl *compl,
1220 struct be_rx_compl_info *rxcp)
1221{
1222 rxcp->pkt_size =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1224 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1225 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1226 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1227 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1228 rxcp->ip_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1230 rxcp->l4_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1232 rxcp->ipv6 =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1234 rxcp->rxq_idx =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1236 rxcp->num_rcvd =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1238 rxcp->pkt_type =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1240 rxcp->rss_hash =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1242 if (rxcp->vlanf) {
1243 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1244 compl);
1245 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1246 compl);
15d72184 1247 }
2e588f84
SP
1248}
1249
1250static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1251{
1252 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1253 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1254 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1255
2e588f84
SP
1256 /* For checking the valid bit it is Ok to use either definition as the
1257 * valid bit is at the same position in both v0 and v1 Rx compl */
1258 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1259 return NULL;
6b7c5b94 1260
2e588f84
SP
1261 rmb();
1262 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1263
2e588f84
SP
1264 if (adapter->be3_native)
1265 be_parse_rx_compl_v1(adapter, compl, rxcp);
1266 else
1267 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1268
15d72184
SP
1269 if (rxcp->vlanf) {
1270 /* vlanf could be wrongly set in some cards.
1271 * ignore if vtm is not set */
1272 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1273 rxcp->vlanf = 0;
6b7c5b94 1274
15d72184 1275 if (!lancer_chip(adapter))
3c709f8f 1276 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1277
3c709f8f
DM
1278 if (((adapter->pvid & VLAN_VID_MASK) ==
1279 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1280 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1281 rxcp->vlanf = 0;
1282 }
2e588f84
SP
1283
1284 /* As the compl has been parsed, reset it; we wont touch it again */
1285 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1286
3abcdeda 1287 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1288 return rxcp;
1289}
1290
1829b086 1291static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1292{
6b7c5b94 1293 u32 order = get_order(size);
1829b086 1294
6b7c5b94 1295 if (order > 0)
1829b086
ED
1296 gfp |= __GFP_COMP;
1297 return alloc_pages(gfp, order);
6b7c5b94
SP
1298}
1299
1300/*
1301 * Allocate a page, split it to fragments of size rx_frag_size and post as
1302 * receive buffers to BE
1303 */
1829b086 1304static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1305{
3abcdeda
SP
1306 struct be_adapter *adapter = rxo->adapter;
1307 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1308 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1309 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1310 struct page *pagep = NULL;
1311 struct be_eth_rx_d *rxd;
1312 u64 page_dmaaddr = 0, frag_dmaaddr;
1313 u32 posted, page_offset = 0;
1314
3abcdeda 1315 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1316 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1317 if (!pagep) {
1829b086 1318 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1319 if (unlikely(!pagep)) {
ac124ff9 1320 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1321 break;
1322 }
2b7bcebf
IV
1323 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1324 0, adapter->big_page_size,
1325 DMA_FROM_DEVICE);
6b7c5b94
SP
1326 page_info->page_offset = 0;
1327 } else {
1328 get_page(pagep);
1329 page_info->page_offset = page_offset + rx_frag_size;
1330 }
1331 page_offset = page_info->page_offset;
1332 page_info->page = pagep;
fac6da5b 1333 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1334 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1335
1336 rxd = queue_head_node(rxq);
1337 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1338 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1339
1340 /* Any space left in the current big page for another frag? */
1341 if ((page_offset + rx_frag_size + rx_frag_size) >
1342 adapter->big_page_size) {
1343 pagep = NULL;
1344 page_info->last_page_user = true;
1345 }
26d92f92
SP
1346
1347 prev_page_info = page_info;
1348 queue_head_inc(rxq);
6b7c5b94
SP
1349 page_info = &page_info_tbl[rxq->head];
1350 }
1351 if (pagep)
26d92f92 1352 prev_page_info->last_page_user = true;
6b7c5b94
SP
1353
1354 if (posted) {
6b7c5b94 1355 atomic_add(posted, &rxq->used);
8788fdc2 1356 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1357 } else if (atomic_read(&rxq->used) == 0) {
1358 /* Let be_worker replenish when memory is available */
3abcdeda 1359 rxo->rx_post_starved = true;
6b7c5b94 1360 }
6b7c5b94
SP
1361}
1362
5fb379ee 1363static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1364{
6b7c5b94
SP
1365 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1366
1367 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1368 return NULL;
1369
f3eb62d2 1370 rmb();
6b7c5b94
SP
1371 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1372
1373 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1374
1375 queue_tail_inc(tx_cq);
1376 return txcp;
1377}
1378
3c8def97
SP
1379static u16 be_tx_compl_process(struct be_adapter *adapter,
1380 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1381{
3c8def97 1382 struct be_queue_info *txq = &txo->q;
a73b796e 1383 struct be_eth_wrb *wrb;
3c8def97 1384 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1385 struct sk_buff *sent_skb;
ec43b1a6
SP
1386 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1387 bool unmap_skb_hdr = true;
6b7c5b94 1388
ec43b1a6 1389 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1390 BUG_ON(!sent_skb);
ec43b1a6
SP
1391 sent_skbs[txq->tail] = NULL;
1392
1393 /* skip header wrb */
a73b796e 1394 queue_tail_inc(txq);
6b7c5b94 1395
ec43b1a6 1396 do {
6b7c5b94 1397 cur_index = txq->tail;
a73b796e 1398 wrb = queue_tail_node(txq);
2b7bcebf
IV
1399 unmap_tx_frag(&adapter->pdev->dev, wrb,
1400 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1401 unmap_skb_hdr = false;
1402
6b7c5b94
SP
1403 num_wrbs++;
1404 queue_tail_inc(txq);
ec43b1a6 1405 } while (cur_index != last_index);
6b7c5b94 1406
6b7c5b94 1407 kfree_skb(sent_skb);
4d586b82 1408 return num_wrbs;
6b7c5b94
SP
1409}
1410
859b1e4e
SP
1411static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1412{
1413 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1414
1415 if (!eqe->evt)
1416 return NULL;
1417
f3eb62d2 1418 rmb();
859b1e4e
SP
1419 eqe->evt = le32_to_cpu(eqe->evt);
1420 queue_tail_inc(&eq_obj->q);
1421 return eqe;
1422}
1423
1424static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1425 struct be_eq_obj *eq_obj,
1426 bool rearm)
859b1e4e
SP
1427{
1428 struct be_eq_entry *eqe;
1429 u16 num = 0;
1430
1431 while ((eqe = event_get(eq_obj)) != NULL) {
1432 eqe->evt = 0;
1433 num++;
1434 }
1435
1436 /* Deal with any spurious interrupts that come
1437 * without events
1438 */
3c8def97
SP
1439 if (!num)
1440 rearm = true;
1441
1442 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1443 if (num)
1444 napi_schedule(&eq_obj->napi);
1445
1446 return num;
1447}
1448
1449/* Just read and notify events without processing them.
1450 * Used at the time of destroying event queues */
1451static void be_eq_clean(struct be_adapter *adapter,
1452 struct be_eq_obj *eq_obj)
1453{
1454 struct be_eq_entry *eqe;
1455 u16 num = 0;
1456
1457 while ((eqe = event_get(eq_obj)) != NULL) {
1458 eqe->evt = 0;
1459 num++;
1460 }
1461
1462 if (num)
1463 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1464}
1465
3abcdeda 1466static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1467{
1468 struct be_rx_page_info *page_info;
3abcdeda
SP
1469 struct be_queue_info *rxq = &rxo->q;
1470 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1471 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1472 u16 tail;
1473
1474 /* First cleanup pending rx completions */
3abcdeda
SP
1475 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1476 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1477 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1478 }
1479
1480 /* Then free posted rx buffer that were not used */
1481 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1482 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1483 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1484 put_page(page_info->page);
1485 memset(page_info, 0, sizeof(*page_info));
1486 }
1487 BUG_ON(atomic_read(&rxq->used));
482c9e79 1488 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1489}
1490
3c8def97
SP
1491static void be_tx_compl_clean(struct be_adapter *adapter,
1492 struct be_tx_obj *txo)
6b7c5b94 1493{
3c8def97
SP
1494 struct be_queue_info *tx_cq = &txo->cq;
1495 struct be_queue_info *txq = &txo->q;
a8e9179a 1496 struct be_eth_tx_compl *txcp;
4d586b82 1497 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1498 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1499 struct sk_buff *sent_skb;
1500 bool dummy_wrb;
a8e9179a
SP
1501
1502 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1503 do {
1504 while ((txcp = be_tx_compl_get(tx_cq))) {
1505 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1506 wrb_index, txcp);
3c8def97 1507 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1508 cmpl++;
1509 }
1510 if (cmpl) {
1511 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1512 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1513 cmpl = 0;
4d586b82 1514 num_wrbs = 0;
a8e9179a
SP
1515 }
1516
1517 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1518 break;
1519
1520 mdelay(1);
1521 } while (true);
1522
1523 if (atomic_read(&txq->used))
1524 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1525 atomic_read(&txq->used));
b03388d6
SP
1526
1527 /* free posted tx for which compls will never arrive */
1528 while (atomic_read(&txq->used)) {
1529 sent_skb = sent_skbs[txq->tail];
1530 end_idx = txq->tail;
1531 index_adv(&end_idx,
fe6d2a38
SP
1532 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1533 txq->len);
3c8def97 1534 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1535 atomic_sub(num_wrbs, &txq->used);
b03388d6 1536 }
6b7c5b94
SP
1537}
1538
5fb379ee
SP
1539static void be_mcc_queues_destroy(struct be_adapter *adapter)
1540{
1541 struct be_queue_info *q;
5fb379ee 1542
8788fdc2 1543 q = &adapter->mcc_obj.q;
5fb379ee 1544 if (q->created)
8788fdc2 1545 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1546 be_queue_free(adapter, q);
1547
8788fdc2 1548 q = &adapter->mcc_obj.cq;
5fb379ee 1549 if (q->created)
8788fdc2 1550 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1551 be_queue_free(adapter, q);
1552}
1553
1554/* Must be called only after TX qs are created as MCC shares TX EQ */
1555static int be_mcc_queues_create(struct be_adapter *adapter)
1556{
1557 struct be_queue_info *q, *cq;
5fb379ee
SP
1558
1559 /* Alloc MCC compl queue */
8788fdc2 1560 cq = &adapter->mcc_obj.cq;
5fb379ee 1561 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1562 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1563 goto err;
1564
1565 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1566 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1567 goto mcc_cq_free;
1568
1569 /* Alloc MCC queue */
8788fdc2 1570 q = &adapter->mcc_obj.q;
5fb379ee
SP
1571 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1572 goto mcc_cq_destroy;
1573
1574 /* Ask BE to create MCC queue */
8788fdc2 1575 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1576 goto mcc_q_free;
1577
1578 return 0;
1579
1580mcc_q_free:
1581 be_queue_free(adapter, q);
1582mcc_cq_destroy:
8788fdc2 1583 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1584mcc_cq_free:
1585 be_queue_free(adapter, cq);
1586err:
1587 return -1;
1588}
1589
6b7c5b94
SP
1590static void be_tx_queues_destroy(struct be_adapter *adapter)
1591{
1592 struct be_queue_info *q;
3c8def97
SP
1593 struct be_tx_obj *txo;
1594 u8 i;
6b7c5b94 1595
3c8def97
SP
1596 for_all_tx_queues(adapter, txo, i) {
1597 q = &txo->q;
1598 if (q->created)
1599 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1600 be_queue_free(adapter, q);
6b7c5b94 1601
3c8def97
SP
1602 q = &txo->cq;
1603 if (q->created)
1604 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1605 be_queue_free(adapter, q);
1606 }
6b7c5b94 1607
859b1e4e
SP
1608 /* Clear any residual events */
1609 be_eq_clean(adapter, &adapter->tx_eq);
1610
6b7c5b94
SP
1611 q = &adapter->tx_eq.q;
1612 if (q->created)
8788fdc2 1613 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1614 be_queue_free(adapter, q);
1615}
1616
3c8def97 1617/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1618static int be_tx_queues_create(struct be_adapter *adapter)
1619{
1620 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1621 struct be_tx_obj *txo;
1622 u8 i;
6b7c5b94
SP
1623
1624 adapter->tx_eq.max_eqd = 0;
1625 adapter->tx_eq.min_eqd = 0;
1626 adapter->tx_eq.cur_eqd = 96;
1627 adapter->tx_eq.enable_aic = false;
3c8def97 1628
6b7c5b94 1629 eq = &adapter->tx_eq.q;
3c8def97
SP
1630 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1631 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1632 return -1;
1633
8788fdc2 1634 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1635 goto err;
ecd62107 1636 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1637
3c8def97
SP
1638 for_all_tx_queues(adapter, txo, i) {
1639 cq = &txo->cq;
1640 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1641 sizeof(struct be_eth_tx_compl)))
3c8def97 1642 goto err;
6b7c5b94 1643
3c8def97
SP
1644 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1645 goto err;
6b7c5b94 1646
3c8def97
SP
1647 q = &txo->q;
1648 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1649 sizeof(struct be_eth_wrb)))
1650 goto err;
6b7c5b94 1651
3c8def97
SP
1652 if (be_cmd_txq_create(adapter, q, cq))
1653 goto err;
1654 }
6b7c5b94
SP
1655 return 0;
1656
3c8def97
SP
1657err:
1658 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1659 return -1;
1660}
1661
1662static void be_rx_queues_destroy(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *q;
3abcdeda
SP
1665 struct be_rx_obj *rxo;
1666 int i;
1667
1668 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1669 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1670
1671 q = &rxo->cq;
1672 if (q->created)
1673 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1674 be_queue_free(adapter, q);
1675
3abcdeda 1676 q = &rxo->rx_eq.q;
482c9e79 1677 if (q->created)
3abcdeda 1678 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1679 be_queue_free(adapter, q);
6b7c5b94 1680 }
6b7c5b94
SP
1681}
1682
ac6a0c4a
SP
1683static u32 be_num_rxqs_want(struct be_adapter *adapter)
1684{
c814fd36 1685 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1686 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1687 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1688 } else {
1689 dev_warn(&adapter->pdev->dev,
1690 "No support for multiple RX queues\n");
1691 return 1;
1692 }
1693}
1694
6b7c5b94
SP
1695static int be_rx_queues_create(struct be_adapter *adapter)
1696{
1697 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1698 struct be_rx_obj *rxo;
1699 int rc, i;
6b7c5b94 1700
ac6a0c4a
SP
1701 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1702 msix_enabled(adapter) ?
1703 adapter->num_msix_vec - 1 : 1);
1704 if (adapter->num_rx_qs != MAX_RX_QS)
1705 dev_warn(&adapter->pdev->dev,
1706 "Can create only %d RX queues", adapter->num_rx_qs);
1707
6b7c5b94 1708 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1709 for_all_rx_queues(adapter, rxo, i) {
1710 rxo->adapter = adapter;
1711 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1712 rxo->rx_eq.enable_aic = true;
1713
1714 /* EQ */
1715 eq = &rxo->rx_eq.q;
1716 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1717 sizeof(struct be_eq_entry));
1718 if (rc)
1719 goto err;
1720
1721 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1722 if (rc)
1723 goto err;
1724
ecd62107 1725 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1726
3abcdeda
SP
1727 /* CQ */
1728 cq = &rxo->cq;
1729 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1730 sizeof(struct be_eth_rx_compl));
1731 if (rc)
1732 goto err;
1733
1734 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1735 if (rc)
1736 goto err;
482c9e79
SP
1737
1738 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1739 q = &rxo->q;
1740 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1741 sizeof(struct be_eth_rx_d));
1742 if (rc)
1743 goto err;
1744
3abcdeda 1745 }
6b7c5b94
SP
1746
1747 return 0;
3abcdeda
SP
1748err:
1749 be_rx_queues_destroy(adapter);
1750 return -1;
6b7c5b94 1751}
6b7c5b94 1752
fe6d2a38 1753static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1754{
fe6d2a38
SP
1755 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1756 if (!eqe->evt)
1757 return false;
1758 else
1759 return true;
b628bde2
SP
1760}
1761
6b7c5b94
SP
1762static irqreturn_t be_intx(int irq, void *dev)
1763{
1764 struct be_adapter *adapter = dev;
3abcdeda 1765 struct be_rx_obj *rxo;
fe6d2a38 1766 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1767
fe6d2a38
SP
1768 if (lancer_chip(adapter)) {
1769 if (event_peek(&adapter->tx_eq))
3c8def97 1770 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1771 for_all_rx_queues(adapter, rxo, i) {
1772 if (event_peek(&rxo->rx_eq))
3c8def97 1773 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1774 }
6b7c5b94 1775
fe6d2a38
SP
1776 if (!(tx || rx))
1777 return IRQ_NONE;
3abcdeda 1778
fe6d2a38
SP
1779 } else {
1780 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1781 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1782 if (!isr)
1783 return IRQ_NONE;
1784
ecd62107 1785 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1786 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1787
1788 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1789 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1790 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1791 }
3abcdeda 1792 }
c001c213 1793
8788fdc2 1794 return IRQ_HANDLED;
6b7c5b94
SP
1795}
1796
1797static irqreturn_t be_msix_rx(int irq, void *dev)
1798{
3abcdeda
SP
1799 struct be_rx_obj *rxo = dev;
1800 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1801
3c8def97 1802 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1803
1804 return IRQ_HANDLED;
1805}
1806
5fb379ee 1807static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1808{
1809 struct be_adapter *adapter = dev;
1810
3c8def97 1811 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1812
1813 return IRQ_HANDLED;
1814}
1815
2e588f84 1816static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1817{
2e588f84 1818 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1819}
1820
49b05221 1821static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1822{
1823 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1824 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1825 struct be_adapter *adapter = rxo->adapter;
1826 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1827 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1828 u32 work_done;
1829
ac124ff9 1830 rx_stats(rxo)->rx_polls++;
6b7c5b94 1831 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1832 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1833 if (!rxcp)
1834 break;
1835
e80d9da6 1836 /* Ignore flush completions */
009dd872 1837 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1838 if (do_gro(rxcp))
64642811
SP
1839 be_rx_compl_process_gro(adapter, rxo, rxcp);
1840 else
1841 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1842 } else if (rxcp->pkt_size == 0) {
1843 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1844 }
009dd872 1845
2e588f84 1846 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1847 }
1848
6b7c5b94 1849 /* Refill the queue */
3abcdeda 1850 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1851 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1852
1853 /* All consumed */
1854 if (work_done < budget) {
1855 napi_complete(napi);
8788fdc2 1856 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1857 } else {
1858 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1859 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1860 }
1861 return work_done;
1862}
1863
f31e50a8
SP
1864/* As TX and MCC share the same EQ check for both TX and MCC completions.
1865 * For TX/MCC we don't honour budget; consume everything
1866 */
1867static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1868{
f31e50a8
SP
1869 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1870 struct be_adapter *adapter =
1871 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1872 struct be_tx_obj *txo;
6b7c5b94 1873 struct be_eth_tx_compl *txcp;
3c8def97
SP
1874 int tx_compl, mcc_compl, status = 0;
1875 u8 i;
1876 u16 num_wrbs;
1877
1878 for_all_tx_queues(adapter, txo, i) {
1879 tx_compl = 0;
1880 num_wrbs = 0;
1881 while ((txcp = be_tx_compl_get(&txo->cq))) {
1882 num_wrbs += be_tx_compl_process(adapter, txo,
1883 AMAP_GET_BITS(struct amap_eth_tx_compl,
1884 wrb_index, txcp));
1885 tx_compl++;
1886 }
1887 if (tx_compl) {
1888 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1889
1890 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1891
3c8def97
SP
1892 /* As Tx wrbs have been freed up, wake up netdev queue
1893 * if it was stopped due to lack of tx wrbs. */
1894 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1895 atomic_read(&txo->q.used) < txo->q.len / 2) {
1896 netif_wake_subqueue(adapter->netdev, i);
1897 }
1898
ab1594e9 1899 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1900 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1901 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1902 }
6b7c5b94
SP
1903 }
1904
f31e50a8
SP
1905 mcc_compl = be_process_mcc(adapter, &status);
1906
f31e50a8
SP
1907 if (mcc_compl) {
1908 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1909 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1910 }
1911
3c8def97 1912 napi_complete(napi);
6b7c5b94 1913
3c8def97 1914 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1915 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1916 return 1;
1917}
1918
d053de91 1919void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1920{
1921 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1922 u32 i;
1923
1924 pci_read_config_dword(adapter->pdev,
1925 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1926 pci_read_config_dword(adapter->pdev,
1927 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1928 pci_read_config_dword(adapter->pdev,
1929 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1930 pci_read_config_dword(adapter->pdev,
1931 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1932
1933 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1934 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1935
d053de91
AK
1936 if (ue_status_lo || ue_status_hi) {
1937 adapter->ue_detected = true;
7acc2087 1938 adapter->eeh_err = true;
d053de91
AK
1939 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1940 }
1941
7c185276
AK
1942 if (ue_status_lo) {
1943 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1944 if (ue_status_lo & 1)
1945 dev_err(&adapter->pdev->dev,
1946 "UE: %s bit set\n", ue_status_low_desc[i]);
1947 }
1948 }
1949 if (ue_status_hi) {
1950 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1951 if (ue_status_hi & 1)
1952 dev_err(&adapter->pdev->dev,
1953 "UE: %s bit set\n", ue_status_hi_desc[i]);
1954 }
1955 }
1956
1957}
1958
ea1dae11
SP
1959static void be_worker(struct work_struct *work)
1960{
1961 struct be_adapter *adapter =
1962 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1963 struct be_rx_obj *rxo;
1964 int i;
ea1dae11 1965
16da8250
SP
1966 if (!adapter->ue_detected && !lancer_chip(adapter))
1967 be_detect_dump_ue(adapter);
1968
f203af70
SK
1969 /* when interrupts are not yet enabled, just reap any pending
1970 * mcc completions */
1971 if (!netif_running(adapter->netdev)) {
1972 int mcc_compl, status = 0;
1973
1974 mcc_compl = be_process_mcc(adapter, &status);
1975
1976 if (mcc_compl) {
1977 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1978 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1979 }
9b037f38 1980
f203af70
SK
1981 goto reschedule;
1982 }
1983
005d5696
SX
1984 if (!adapter->stats_cmd_sent) {
1985 if (lancer_chip(adapter))
1986 lancer_cmd_get_pport_stats(adapter,
1987 &adapter->stats_cmd);
1988 else
1989 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1990 }
3c8def97 1991
3abcdeda 1992 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1993 be_rx_eqd_update(adapter, rxo);
1994
1995 if (rxo->rx_post_starved) {
1996 rxo->rx_post_starved = false;
1829b086 1997 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1998 }
ea1dae11
SP
1999 }
2000
f203af70 2001reschedule:
e74fbd03 2002 adapter->work_counter++;
ea1dae11
SP
2003 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2004}
2005
8d56ff11
SP
2006static void be_msix_disable(struct be_adapter *adapter)
2007{
ac6a0c4a 2008 if (msix_enabled(adapter)) {
8d56ff11 2009 pci_disable_msix(adapter->pdev);
ac6a0c4a 2010 adapter->num_msix_vec = 0;
3abcdeda
SP
2011 }
2012}
2013
6b7c5b94
SP
2014static void be_msix_enable(struct be_adapter *adapter)
2015{
3abcdeda 2016#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2017 int i, status, num_vec;
6b7c5b94 2018
ac6a0c4a 2019 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2020
ac6a0c4a 2021 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2022 adapter->msix_entries[i].entry = i;
2023
ac6a0c4a 2024 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2025 if (status == 0) {
2026 goto done;
2027 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2028 num_vec = status;
3abcdeda 2029 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2030 num_vec) == 0)
3abcdeda 2031 goto done;
3abcdeda
SP
2032 }
2033 return;
2034done:
ac6a0c4a
SP
2035 adapter->num_msix_vec = num_vec;
2036 return;
6b7c5b94
SP
2037}
2038
ba343c77
SB
2039static void be_sriov_enable(struct be_adapter *adapter)
2040{
344dbf10 2041 be_check_sriov_fn_type(adapter);
6dedec81 2042#ifdef CONFIG_PCI_IOV
ba343c77 2043 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2044 int status, pos;
2045 u16 nvfs;
2046
2047 pos = pci_find_ext_capability(adapter->pdev,
2048 PCI_EXT_CAP_ID_SRIOV);
2049 pci_read_config_word(adapter->pdev,
2050 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2051
2052 if (num_vfs > nvfs) {
2053 dev_info(&adapter->pdev->dev,
2054 "Device supports %d VFs and not %d\n",
2055 nvfs, num_vfs);
2056 num_vfs = nvfs;
2057 }
6dedec81 2058
ba343c77
SB
2059 status = pci_enable_sriov(adapter->pdev, num_vfs);
2060 adapter->sriov_enabled = status ? false : true;
2061 }
2062#endif
ba343c77
SB
2063}
2064
2065static void be_sriov_disable(struct be_adapter *adapter)
2066{
2067#ifdef CONFIG_PCI_IOV
2068 if (adapter->sriov_enabled) {
2069 pci_disable_sriov(adapter->pdev);
2070 adapter->sriov_enabled = false;
2071 }
2072#endif
2073}
2074
fe6d2a38
SP
2075static inline int be_msix_vec_get(struct be_adapter *adapter,
2076 struct be_eq_obj *eq_obj)
6b7c5b94 2077{
ecd62107 2078 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2079}
2080
b628bde2
SP
2081static int be_request_irq(struct be_adapter *adapter,
2082 struct be_eq_obj *eq_obj,
3abcdeda 2083 void *handler, char *desc, void *context)
6b7c5b94
SP
2084{
2085 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2086 int vec;
2087
2088 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2089 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2090 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2091}
2092
3abcdeda
SP
2093static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2094 void *context)
b628bde2 2095{
fe6d2a38 2096 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2097 free_irq(vec, context);
b628bde2 2098}
6b7c5b94 2099
b628bde2
SP
2100static int be_msix_register(struct be_adapter *adapter)
2101{
3abcdeda
SP
2102 struct be_rx_obj *rxo;
2103 int status, i;
2104 char qname[10];
b628bde2 2105
3abcdeda
SP
2106 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2107 adapter);
6b7c5b94
SP
2108 if (status)
2109 goto err;
2110
3abcdeda
SP
2111 for_all_rx_queues(adapter, rxo, i) {
2112 sprintf(qname, "rxq%d", i);
2113 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2114 qname, rxo);
2115 if (status)
2116 goto err_msix;
2117 }
b628bde2 2118
6b7c5b94 2119 return 0;
b628bde2 2120
3abcdeda
SP
2121err_msix:
2122 be_free_irq(adapter, &adapter->tx_eq, adapter);
2123
2124 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2125 be_free_irq(adapter, &rxo->rx_eq, rxo);
2126
6b7c5b94
SP
2127err:
2128 dev_warn(&adapter->pdev->dev,
2129 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2130 be_msix_disable(adapter);
6b7c5b94
SP
2131 return status;
2132}
2133
2134static int be_irq_register(struct be_adapter *adapter)
2135{
2136 struct net_device *netdev = adapter->netdev;
2137 int status;
2138
ac6a0c4a 2139 if (msix_enabled(adapter)) {
6b7c5b94
SP
2140 status = be_msix_register(adapter);
2141 if (status == 0)
2142 goto done;
ba343c77
SB
2143 /* INTx is not supported for VF */
2144 if (!be_physfn(adapter))
2145 return status;
6b7c5b94
SP
2146 }
2147
2148 /* INTx */
2149 netdev->irq = adapter->pdev->irq;
2150 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2151 adapter);
2152 if (status) {
2153 dev_err(&adapter->pdev->dev,
2154 "INTx request IRQ failed - err %d\n", status);
2155 return status;
2156 }
2157done:
2158 adapter->isr_registered = true;
2159 return 0;
2160}
2161
2162static void be_irq_unregister(struct be_adapter *adapter)
2163{
2164 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2165 struct be_rx_obj *rxo;
2166 int i;
6b7c5b94
SP
2167
2168 if (!adapter->isr_registered)
2169 return;
2170
2171 /* INTx */
ac6a0c4a 2172 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2173 free_irq(netdev->irq, adapter);
2174 goto done;
2175 }
2176
2177 /* MSIx */
3abcdeda
SP
2178 be_free_irq(adapter, &adapter->tx_eq, adapter);
2179
2180 for_all_rx_queues(adapter, rxo, i)
2181 be_free_irq(adapter, &rxo->rx_eq, rxo);
2182
6b7c5b94
SP
2183done:
2184 adapter->isr_registered = false;
6b7c5b94
SP
2185}
2186
482c9e79
SP
2187static void be_rx_queues_clear(struct be_adapter *adapter)
2188{
2189 struct be_queue_info *q;
2190 struct be_rx_obj *rxo;
2191 int i;
2192
2193 for_all_rx_queues(adapter, rxo, i) {
2194 q = &rxo->q;
2195 if (q->created) {
2196 be_cmd_rxq_destroy(adapter, q);
2197 /* After the rxq is invalidated, wait for a grace time
2198 * of 1ms for all dma to end and the flush compl to
2199 * arrive
2200 */
2201 mdelay(1);
2202 be_rx_q_clean(adapter, rxo);
2203 }
2204
2205 /* Clear any residual events */
2206 q = &rxo->rx_eq.q;
2207 if (q->created)
2208 be_eq_clean(adapter, &rxo->rx_eq);
2209 }
2210}
2211
889cd4b2
SP
2212static int be_close(struct net_device *netdev)
2213{
2214 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2215 struct be_rx_obj *rxo;
3c8def97 2216 struct be_tx_obj *txo;
889cd4b2 2217 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2218 int vec, i;
889cd4b2 2219
889cd4b2
SP
2220 be_async_mcc_disable(adapter);
2221
fe6d2a38
SP
2222 if (!lancer_chip(adapter))
2223 be_intr_set(adapter, false);
889cd4b2 2224
63fcb27f
PR
2225 for_all_rx_queues(adapter, rxo, i)
2226 napi_disable(&rxo->rx_eq.napi);
2227
2228 napi_disable(&tx_eq->napi);
2229
2230 if (lancer_chip(adapter)) {
63fcb27f
PR
2231 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2232 for_all_rx_queues(adapter, rxo, i)
2233 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2234 for_all_tx_queues(adapter, txo, i)
2235 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2236 }
2237
ac6a0c4a 2238 if (msix_enabled(adapter)) {
fe6d2a38 2239 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2240 synchronize_irq(vec);
3abcdeda
SP
2241
2242 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2243 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2244 synchronize_irq(vec);
2245 }
889cd4b2
SP
2246 } else {
2247 synchronize_irq(netdev->irq);
2248 }
2249 be_irq_unregister(adapter);
2250
889cd4b2
SP
2251 /* Wait for all pending tx completions to arrive so that
2252 * all tx skbs are freed.
2253 */
3c8def97
SP
2254 for_all_tx_queues(adapter, txo, i)
2255 be_tx_compl_clean(adapter, txo);
889cd4b2 2256
482c9e79
SP
2257 be_rx_queues_clear(adapter);
2258 return 0;
2259}
2260
2261static int be_rx_queues_setup(struct be_adapter *adapter)
2262{
2263 struct be_rx_obj *rxo;
2264 int rc, i;
2265 u8 rsstable[MAX_RSS_QS];
2266
2267 for_all_rx_queues(adapter, rxo, i) {
2268 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2269 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2270 adapter->if_handle,
2271 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2272 if (rc)
2273 return rc;
2274 }
2275
2276 if (be_multi_rxq(adapter)) {
2277 for_all_rss_queues(adapter, rxo, i)
2278 rsstable[i] = rxo->rss_id;
2279
2280 rc = be_cmd_rss_config(adapter, rsstable,
2281 adapter->num_rx_qs - 1);
2282 if (rc)
2283 return rc;
2284 }
2285
2286 /* First time posting */
2287 for_all_rx_queues(adapter, rxo, i) {
2288 be_post_rx_frags(rxo, GFP_KERNEL);
2289 napi_enable(&rxo->rx_eq.napi);
2290 }
889cd4b2
SP
2291 return 0;
2292}
2293
6b7c5b94
SP
2294static int be_open(struct net_device *netdev)
2295{
2296 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2297 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2298 struct be_rx_obj *rxo;
3abcdeda 2299 int status, i;
5fb379ee 2300
482c9e79
SP
2301 status = be_rx_queues_setup(adapter);
2302 if (status)
2303 goto err;
2304
5fb379ee
SP
2305 napi_enable(&tx_eq->napi);
2306
2307 be_irq_register(adapter);
2308
fe6d2a38
SP
2309 if (!lancer_chip(adapter))
2310 be_intr_set(adapter, true);
5fb379ee
SP
2311
2312 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2313 for_all_rx_queues(adapter, rxo, i) {
2314 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2315 be_cq_notify(adapter, rxo->cq.id, true, 0);
2316 }
8788fdc2 2317 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2318
7a1e9b20
SP
2319 /* Now that interrupts are on we can process async mcc */
2320 be_async_mcc_enable(adapter);
2321
889cd4b2 2322 if (be_physfn(adapter)) {
1da87b7f 2323 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2324 if (status)
2325 goto err;
4f2aa89c 2326
ba343c77
SB
2327 status = be_cmd_set_flow_control(adapter,
2328 adapter->tx_fc, adapter->rx_fc);
2329 if (status)
889cd4b2 2330 goto err;
ba343c77 2331 }
4f2aa89c 2332
889cd4b2
SP
2333 return 0;
2334err:
2335 be_close(adapter->netdev);
2336 return -EIO;
5fb379ee
SP
2337}
2338
71d8d1b5
AK
2339static int be_setup_wol(struct be_adapter *adapter, bool enable)
2340{
2341 struct be_dma_mem cmd;
2342 int status = 0;
2343 u8 mac[ETH_ALEN];
2344
2345 memset(mac, 0, ETH_ALEN);
2346
2347 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2348 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2349 GFP_KERNEL);
71d8d1b5
AK
2350 if (cmd.va == NULL)
2351 return -1;
2352 memset(cmd.va, 0, cmd.size);
2353
2354 if (enable) {
2355 status = pci_write_config_dword(adapter->pdev,
2356 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2357 if (status) {
2358 dev_err(&adapter->pdev->dev,
2381a55c 2359 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2360 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2361 cmd.dma);
71d8d1b5
AK
2362 return status;
2363 }
2364 status = be_cmd_enable_magic_wol(adapter,
2365 adapter->netdev->dev_addr, &cmd);
2366 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2367 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2368 } else {
2369 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2370 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2371 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2372 }
2373
2b7bcebf 2374 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2375 return status;
2376}
2377
6d87f5c3
AK
2378/*
2379 * Generate a seed MAC address from the PF MAC Address using jhash.
2380 * MAC Address for VFs are assigned incrementally starting from the seed.
2381 * These addresses are programmed in the ASIC by the PF and the VF driver
2382 * queries for the MAC address during its probe.
2383 */
2384static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2385{
2386 u32 vf = 0;
3abcdeda 2387 int status = 0;
6d87f5c3
AK
2388 u8 mac[ETH_ALEN];
2389
2390 be_vf_eth_addr_generate(adapter, mac);
2391
2392 for (vf = 0; vf < num_vfs; vf++) {
2393 status = be_cmd_pmac_add(adapter, mac,
2394 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2395 &adapter->vf_cfg[vf].vf_pmac_id,
2396 vf + 1);
6d87f5c3
AK
2397 if (status)
2398 dev_err(&adapter->pdev->dev,
2399 "Mac address add failed for VF %d\n", vf);
2400 else
2401 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2402
2403 mac[5] += 1;
2404 }
2405 return status;
2406}
2407
2408static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2409{
2410 u32 vf;
2411
2412 for (vf = 0; vf < num_vfs; vf++) {
2413 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2414 be_cmd_pmac_del(adapter,
2415 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2416 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2417 }
2418}
2419
5fb379ee
SP
2420static int be_setup(struct be_adapter *adapter)
2421{
5fb379ee 2422 struct net_device *netdev = adapter->netdev;
ba343c77 2423 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2424 int status;
ba343c77
SB
2425 u8 mac[ETH_ALEN];
2426
2dc1deb6
SP
2427 be_cmd_req_native_mode(adapter);
2428
f21b538c
PR
2429 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2430 BE_IF_FLAGS_BROADCAST |
2431 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2432
ba343c77
SB
2433 if (be_physfn(adapter)) {
2434 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2435 BE_IF_FLAGS_PROMISCUOUS |
2436 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2437 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2438
ac6a0c4a 2439 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2440 cap_flags |= BE_IF_FLAGS_RSS;
2441 en_flags |= BE_IF_FLAGS_RSS;
2442 }
ba343c77 2443 }
73d540f2
SP
2444
2445 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2446 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2447 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2448 if (status != 0)
2449 goto do_none;
2450
ba343c77 2451 if (be_physfn(adapter)) {
c99ac3e7
AK
2452 if (adapter->sriov_enabled) {
2453 while (vf < num_vfs) {
2454 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2455 BE_IF_FLAGS_BROADCAST;
2456 status = be_cmd_if_create(adapter, cap_flags,
2457 en_flags, mac, true,
64600ea5 2458 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2459 NULL, vf+1);
c99ac3e7
AK
2460 if (status) {
2461 dev_err(&adapter->pdev->dev,
2462 "Interface Create failed for VF %d\n",
2463 vf);
2464 goto if_destroy;
2465 }
2466 adapter->vf_cfg[vf].vf_pmac_id =
2467 BE_INVALID_PMAC_ID;
2468 vf++;
ba343c77 2469 }
84e5b9f7 2470 }
c99ac3e7 2471 } else {
ba343c77
SB
2472 status = be_cmd_mac_addr_query(adapter, mac,
2473 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2474 if (!status) {
2475 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2476 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2477 }
2478 }
2479
6b7c5b94
SP
2480 status = be_tx_queues_create(adapter);
2481 if (status != 0)
2482 goto if_destroy;
2483
2484 status = be_rx_queues_create(adapter);
2485 if (status != 0)
2486 goto tx_qs_destroy;
2487
2903dd65
SP
2488 /* Allow all priorities by default. A GRP5 evt may modify this */
2489 adapter->vlan_prio_bmap = 0xff;
2490
5fb379ee
SP
2491 status = be_mcc_queues_create(adapter);
2492 if (status != 0)
2493 goto rx_qs_destroy;
6b7c5b94 2494
0dffc83e
AK
2495 adapter->link_speed = -1;
2496
6b7c5b94
SP
2497 return 0;
2498
5fb379ee
SP
2499rx_qs_destroy:
2500 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2501tx_qs_destroy:
2502 be_tx_queues_destroy(adapter);
2503if_destroy:
c99ac3e7
AK
2504 if (be_physfn(adapter) && adapter->sriov_enabled)
2505 for (vf = 0; vf < num_vfs; vf++)
2506 if (adapter->vf_cfg[vf].vf_if_handle)
2507 be_cmd_if_destroy(adapter,
658681f7
AK
2508 adapter->vf_cfg[vf].vf_if_handle,
2509 vf + 1);
2510 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2511do_none:
2512 return status;
2513}
2514
5fb379ee
SP
2515static int be_clear(struct be_adapter *adapter)
2516{
7ab8b0b4
AK
2517 int vf;
2518
c99ac3e7 2519 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2520 be_vf_eth_addr_rem(adapter);
2521
1a8887d8 2522 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2523 be_rx_queues_destroy(adapter);
2524 be_tx_queues_destroy(adapter);
1f5db833 2525 adapter->eq_next_idx = 0;
5fb379ee 2526
7ab8b0b4
AK
2527 if (be_physfn(adapter) && adapter->sriov_enabled)
2528 for (vf = 0; vf < num_vfs; vf++)
2529 if (adapter->vf_cfg[vf].vf_if_handle)
2530 be_cmd_if_destroy(adapter,
2531 adapter->vf_cfg[vf].vf_if_handle,
2532 vf + 1);
2533
658681f7 2534 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2535
2dc1deb6
SP
2536 adapter->be3_native = 0;
2537
2243e2e9
SP
2538 /* tell fw we're done with firing cmds */
2539 be_cmd_fw_clean(adapter);
5fb379ee
SP
2540 return 0;
2541}
2542
6b7c5b94 2543
84517482 2544#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2545static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2546 const u8 *p, u32 img_start, int image_size,
2547 int hdr_size)
fa9a6fed
SB
2548{
2549 u32 crc_offset;
2550 u8 flashed_crc[4];
2551 int status;
3f0d4560
AK
2552
2553 crc_offset = hdr_size + img_start + image_size - 4;
2554
fa9a6fed 2555 p += crc_offset;
3f0d4560
AK
2556
2557 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2558 (image_size - 4));
fa9a6fed
SB
2559 if (status) {
2560 dev_err(&adapter->pdev->dev,
2561 "could not get crc from flash, not flashing redboot\n");
2562 return false;
2563 }
2564
2565 /*update redboot only if crc does not match*/
2566 if (!memcmp(flashed_crc, p, 4))
2567 return false;
2568 else
2569 return true;
fa9a6fed
SB
2570}
2571
3f0d4560 2572static int be_flash_data(struct be_adapter *adapter,
84517482 2573 const struct firmware *fw,
3f0d4560
AK
2574 struct be_dma_mem *flash_cmd, int num_of_images)
2575
84517482 2576{
3f0d4560
AK
2577 int status = 0, i, filehdr_size = 0;
2578 u32 total_bytes = 0, flash_op;
84517482
AK
2579 int num_bytes;
2580 const u8 *p = fw->data;
2581 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2582 const struct flash_comp *pflashcomp;
9fe96934 2583 int num_comp;
3f0d4560 2584
215faf9c 2585 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2586 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2587 FLASH_IMAGE_MAX_SIZE_g3},
2588 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2589 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2590 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2591 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2592 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2593 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2594 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2595 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2596 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2597 FLASH_IMAGE_MAX_SIZE_g3},
2598 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2599 FLASH_IMAGE_MAX_SIZE_g3},
2600 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2601 FLASH_IMAGE_MAX_SIZE_g3},
2602 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2603 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2604 };
215faf9c 2605 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2606 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2607 FLASH_IMAGE_MAX_SIZE_g2},
2608 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2609 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2610 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2611 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2612 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2613 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2614 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2615 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2616 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2617 FLASH_IMAGE_MAX_SIZE_g2},
2618 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2619 FLASH_IMAGE_MAX_SIZE_g2},
2620 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2621 FLASH_IMAGE_MAX_SIZE_g2}
2622 };
2623
2624 if (adapter->generation == BE_GEN3) {
2625 pflashcomp = gen3_flash_types;
2626 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2627 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2628 } else {
2629 pflashcomp = gen2_flash_types;
2630 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2631 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2632 }
9fe96934
SB
2633 for (i = 0; i < num_comp; i++) {
2634 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2635 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2636 continue;
3f0d4560
AK
2637 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2638 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2639 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2640 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2641 continue;
2642 p = fw->data;
2643 p += filehdr_size + pflashcomp[i].offset
2644 + (num_of_images * sizeof(struct image_hdr));
2645 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2646 return -1;
3f0d4560
AK
2647 total_bytes = pflashcomp[i].size;
2648 while (total_bytes) {
2649 if (total_bytes > 32*1024)
2650 num_bytes = 32*1024;
2651 else
2652 num_bytes = total_bytes;
2653 total_bytes -= num_bytes;
2654
2655 if (!total_bytes)
2656 flash_op = FLASHROM_OPER_FLASH;
2657 else
2658 flash_op = FLASHROM_OPER_SAVE;
2659 memcpy(req->params.data_buf, p, num_bytes);
2660 p += num_bytes;
2661 status = be_cmd_write_flashrom(adapter, flash_cmd,
2662 pflashcomp[i].optype, flash_op, num_bytes);
2663 if (status) {
2664 dev_err(&adapter->pdev->dev,
2665 "cmd to write to flash rom failed.\n");
2666 return -1;
2667 }
84517482 2668 }
84517482 2669 }
84517482
AK
2670 return 0;
2671}
2672
3f0d4560
AK
2673static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2674{
2675 if (fhdr == NULL)
2676 return 0;
2677 if (fhdr->build[0] == '3')
2678 return BE_GEN3;
2679 else if (fhdr->build[0] == '2')
2680 return BE_GEN2;
2681 else
2682 return 0;
2683}
2684
485bf569
SN
2685static int lancer_fw_download(struct be_adapter *adapter,
2686 const struct firmware *fw)
84517482 2687{
485bf569
SN
2688#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2689#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2690 struct be_dma_mem flash_cmd;
485bf569
SN
2691 const u8 *data_ptr = NULL;
2692 u8 *dest_image_ptr = NULL;
2693 size_t image_size = 0;
2694 u32 chunk_size = 0;
2695 u32 data_written = 0;
2696 u32 offset = 0;
2697 int status = 0;
2698 u8 add_status = 0;
84517482 2699
485bf569 2700 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2701 dev_err(&adapter->pdev->dev,
485bf569
SN
2702 "FW Image not properly aligned. "
2703 "Length must be 4 byte aligned.\n");
2704 status = -EINVAL;
2705 goto lancer_fw_exit;
d9efd2af
SB
2706 }
2707
485bf569
SN
2708 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2709 + LANCER_FW_DOWNLOAD_CHUNK;
2710 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2711 &flash_cmd.dma, GFP_KERNEL);
2712 if (!flash_cmd.va) {
2713 status = -ENOMEM;
2714 dev_err(&adapter->pdev->dev,
2715 "Memory allocation failure while flashing\n");
2716 goto lancer_fw_exit;
2717 }
84517482 2718
485bf569
SN
2719 dest_image_ptr = flash_cmd.va +
2720 sizeof(struct lancer_cmd_req_write_object);
2721 image_size = fw->size;
2722 data_ptr = fw->data;
2723
2724 while (image_size) {
2725 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2726
2727 /* Copy the image chunk content. */
2728 memcpy(dest_image_ptr, data_ptr, chunk_size);
2729
2730 status = lancer_cmd_write_object(adapter, &flash_cmd,
2731 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2732 &data_written, &add_status);
2733
2734 if (status)
2735 break;
2736
2737 offset += data_written;
2738 data_ptr += data_written;
2739 image_size -= data_written;
2740 }
2741
2742 if (!status) {
2743 /* Commit the FW written */
2744 status = lancer_cmd_write_object(adapter, &flash_cmd,
2745 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2746 &data_written, &add_status);
2747 }
2748
2749 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2750 flash_cmd.dma);
2751 if (status) {
2752 dev_err(&adapter->pdev->dev,
2753 "Firmware load error. "
2754 "Status code: 0x%x Additional Status: 0x%x\n",
2755 status, add_status);
2756 goto lancer_fw_exit;
2757 }
2758
2759 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2760lancer_fw_exit:
2761 return status;
2762}
2763
2764static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2765{
2766 struct flash_file_hdr_g2 *fhdr;
2767 struct flash_file_hdr_g3 *fhdr3;
2768 struct image_hdr *img_hdr_ptr = NULL;
2769 struct be_dma_mem flash_cmd;
2770 const u8 *p;
2771 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2772
2773 p = fw->data;
3f0d4560 2774 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2775
84517482 2776 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
485bf569 2783 goto be_fw_exit;
84517482
AK
2784 }
2785
3f0d4560
AK
2786 if ((adapter->generation == BE_GEN3) &&
2787 (get_ufigen_type(fhdr) == BE_GEN3)) {
2788 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2789 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2790 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2791 img_hdr_ptr = (struct image_hdr *) (fw->data +
2792 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2793 i * sizeof(struct image_hdr)));
2794 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2795 status = be_flash_data(adapter, fw, &flash_cmd,
2796 num_imgs);
3f0d4560
AK
2797 }
2798 } else if ((adapter->generation == BE_GEN2) &&
2799 (get_ufigen_type(fhdr) == BE_GEN2)) {
2800 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2801 } else {
2802 dev_err(&adapter->pdev->dev,
2803 "UFI and Interface are not compatible for flashing\n");
2804 status = -1;
84517482
AK
2805 }
2806
2b7bcebf
IV
2807 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2808 flash_cmd.dma);
84517482
AK
2809 if (status) {
2810 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2811 goto be_fw_exit;
84517482
AK
2812 }
2813
af901ca1 2814 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2815
485bf569
SN
2816be_fw_exit:
2817 return status;
2818}
2819
2820int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2821{
2822 const struct firmware *fw;
2823 int status;
2824
2825 if (!netif_running(adapter->netdev)) {
2826 dev_err(&adapter->pdev->dev,
2827 "Firmware load not allowed (interface is down)\n");
2828 return -1;
2829 }
2830
2831 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2832 if (status)
2833 goto fw_exit;
2834
2835 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2836
2837 if (lancer_chip(adapter))
2838 status = lancer_fw_download(adapter, fw);
2839 else
2840 status = be_fw_download(adapter, fw);
2841
84517482
AK
2842fw_exit:
2843 release_firmware(fw);
2844 return status;
2845}
2846
6b7c5b94
SP
2847static struct net_device_ops be_netdev_ops = {
2848 .ndo_open = be_open,
2849 .ndo_stop = be_close,
2850 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2851 .ndo_set_rx_mode = be_set_multicast_list,
2852 .ndo_set_mac_address = be_mac_addr_set,
2853 .ndo_change_mtu = be_change_mtu,
ab1594e9 2854 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2855 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2856 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2857 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2858 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2859 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2860 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2861 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2862};
2863
2864static void be_netdev_init(struct net_device *netdev)
2865{
2866 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2867 struct be_rx_obj *rxo;
2868 int i;
6b7c5b94 2869
6332c8d3 2870 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2871 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2872 NETIF_F_HW_VLAN_TX;
2873 if (be_multi_rxq(adapter))
2874 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2875
2876 netdev->features |= netdev->hw_features |
8b8ddc68 2877 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2878
eb8a50d9 2879 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2880 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2881
6b7c5b94
SP
2882 netdev->flags |= IFF_MULTICAST;
2883
9e90c961
AK
2884 /* Default settings for Rx and Tx flow control */
2885 adapter->rx_fc = true;
2886 adapter->tx_fc = true;
2887
c190e3c8
AK
2888 netif_set_gso_max_size(netdev, 65535);
2889
6b7c5b94
SP
2890 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2891
2892 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2893
3abcdeda
SP
2894 for_all_rx_queues(adapter, rxo, i)
2895 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2896 BE_NAPI_WEIGHT);
2897
5fb379ee 2898 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2899 BE_NAPI_WEIGHT);
6b7c5b94
SP
2900}
2901
2902static void be_unmap_pci_bars(struct be_adapter *adapter)
2903{
8788fdc2
SP
2904 if (adapter->csr)
2905 iounmap(adapter->csr);
2906 if (adapter->db)
2907 iounmap(adapter->db);
ba343c77 2908 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2909 iounmap(adapter->pcicfg);
6b7c5b94
SP
2910}
2911
2912static int be_map_pci_bars(struct be_adapter *adapter)
2913{
2914 u8 __iomem *addr;
ba343c77 2915 int pcicfg_reg, db_reg;
6b7c5b94 2916
fe6d2a38
SP
2917 if (lancer_chip(adapter)) {
2918 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2919 pci_resource_len(adapter->pdev, 0));
2920 if (addr == NULL)
2921 return -ENOMEM;
2922 adapter->db = addr;
2923 return 0;
2924 }
2925
ba343c77
SB
2926 if (be_physfn(adapter)) {
2927 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2928 pci_resource_len(adapter->pdev, 2));
2929 if (addr == NULL)
2930 return -ENOMEM;
2931 adapter->csr = addr;
2932 }
6b7c5b94 2933
ba343c77 2934 if (adapter->generation == BE_GEN2) {
7b139c83 2935 pcicfg_reg = 1;
ba343c77
SB
2936 db_reg = 4;
2937 } else {
7b139c83 2938 pcicfg_reg = 0;
ba343c77
SB
2939 if (be_physfn(adapter))
2940 db_reg = 4;
2941 else
2942 db_reg = 0;
2943 }
2944 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2945 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2946 if (addr == NULL)
2947 goto pci_map_err;
ba343c77
SB
2948 adapter->db = addr;
2949
2950 if (be_physfn(adapter)) {
2951 addr = ioremap_nocache(
2952 pci_resource_start(adapter->pdev, pcicfg_reg),
2953 pci_resource_len(adapter->pdev, pcicfg_reg));
2954 if (addr == NULL)
2955 goto pci_map_err;
2956 adapter->pcicfg = addr;
2957 } else
2958 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2959
2960 return 0;
2961pci_map_err:
2962 be_unmap_pci_bars(adapter);
2963 return -ENOMEM;
2964}
2965
2966
2967static void be_ctrl_cleanup(struct be_adapter *adapter)
2968{
8788fdc2 2969 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2970
2971 be_unmap_pci_bars(adapter);
2972
2973 if (mem->va)
2b7bcebf
IV
2974 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2975 mem->dma);
e7b909a6 2976
5b8821b7 2977 mem = &adapter->rx_filter;
e7b909a6 2978 if (mem->va)
2b7bcebf
IV
2979 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2980 mem->dma);
6b7c5b94
SP
2981}
2982
6b7c5b94
SP
2983static int be_ctrl_init(struct be_adapter *adapter)
2984{
8788fdc2
SP
2985 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2986 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 2987 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 2988 int status;
6b7c5b94
SP
2989
2990 status = be_map_pci_bars(adapter);
2991 if (status)
e7b909a6 2992 goto done;
6b7c5b94
SP
2993
2994 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2995 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2996 mbox_mem_alloc->size,
2997 &mbox_mem_alloc->dma,
2998 GFP_KERNEL);
6b7c5b94 2999 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3000 status = -ENOMEM;
3001 goto unmap_pci_bars;
6b7c5b94
SP
3002 }
3003 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3004 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3005 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3006 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3007
5b8821b7
SP
3008 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3009 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3010 &rx_filter->dma, GFP_KERNEL);
3011 if (rx_filter->va == NULL) {
e7b909a6
SP
3012 status = -ENOMEM;
3013 goto free_mbox;
3014 }
5b8821b7 3015 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3016
2984961c 3017 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3018 spin_lock_init(&adapter->mcc_lock);
3019 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3020
dd131e76 3021 init_completion(&adapter->flash_compl);
cf588477 3022 pci_save_state(adapter->pdev);
6b7c5b94 3023 return 0;
e7b909a6
SP
3024
3025free_mbox:
2b7bcebf
IV
3026 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3027 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3028
3029unmap_pci_bars:
3030 be_unmap_pci_bars(adapter);
3031
3032done:
3033 return status;
6b7c5b94
SP
3034}
3035
3036static void be_stats_cleanup(struct be_adapter *adapter)
3037{
3abcdeda 3038 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3039
3040 if (cmd->va)
2b7bcebf
IV
3041 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3042 cmd->va, cmd->dma);
6b7c5b94
SP
3043}
3044
3045static int be_stats_init(struct be_adapter *adapter)
3046{
3abcdeda 3047 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3048
005d5696 3049 if (adapter->generation == BE_GEN2) {
89a88ab8 3050 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3051 } else {
3052 if (lancer_chip(adapter))
3053 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3054 else
3055 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3056 }
2b7bcebf
IV
3057 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3058 GFP_KERNEL);
6b7c5b94
SP
3059 if (cmd->va == NULL)
3060 return -1;
d291b9af 3061 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3062 return 0;
3063}
3064
3065static void __devexit be_remove(struct pci_dev *pdev)
3066{
3067 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3068
6b7c5b94
SP
3069 if (!adapter)
3070 return;
3071
f203af70
SK
3072 cancel_delayed_work_sync(&adapter->work);
3073
6b7c5b94
SP
3074 unregister_netdev(adapter->netdev);
3075
5fb379ee
SP
3076 be_clear(adapter);
3077
6b7c5b94
SP
3078 be_stats_cleanup(adapter);
3079
3080 be_ctrl_cleanup(adapter);
3081
48f5a191 3082 kfree(adapter->vf_cfg);
ba343c77
SB
3083 be_sriov_disable(adapter);
3084
8d56ff11 3085 be_msix_disable(adapter);
6b7c5b94
SP
3086
3087 pci_set_drvdata(pdev, NULL);
3088 pci_release_regions(pdev);
3089 pci_disable_device(pdev);
3090
3091 free_netdev(adapter->netdev);
3092}
3093
2243e2e9 3094static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3095{
6b7c5b94 3096 int status;
2243e2e9 3097 u8 mac[ETH_ALEN];
6b7c5b94 3098
2243e2e9 3099 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3100 if (status)
3101 return status;
3102
3abcdeda
SP
3103 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3104 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3105 if (status)
3106 return status;
3107
2243e2e9 3108 memset(mac, 0, ETH_ALEN);
ba343c77 3109
12f4d0a8
ME
3110 /* A default permanent address is given to each VF for Lancer*/
3111 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3112 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3113 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3114
ba343c77
SB
3115 if (status)
3116 return status;
ca9e4988 3117
ba343c77
SB
3118 if (!is_valid_ether_addr(mac))
3119 return -EADDRNOTAVAIL;
3120
3121 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3122 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3123 }
6b7c5b94 3124
3486be29 3125 if (adapter->function_mode & 0x400)
82903e4b
AK
3126 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3127 else
3128 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3129
9e1453c5
AK
3130 status = be_cmd_get_cntl_attributes(adapter);
3131 if (status)
3132 return status;
3133
3c8def97
SP
3134 if ((num_vfs && adapter->sriov_enabled) ||
3135 (adapter->function_mode & 0x400) ||
3136 lancer_chip(adapter) || !be_physfn(adapter)) {
3137 adapter->num_tx_qs = 1;
3138 netif_set_real_num_tx_queues(adapter->netdev,
3139 adapter->num_tx_qs);
3140 } else {
3141 adapter->num_tx_qs = MAX_TX_QS;
3142 }
3143
2243e2e9 3144 return 0;
6b7c5b94
SP
3145}
3146
fe6d2a38
SP
3147static int be_dev_family_check(struct be_adapter *adapter)
3148{
3149 struct pci_dev *pdev = adapter->pdev;
3150 u32 sli_intf = 0, if_type;
3151
3152 switch (pdev->device) {
3153 case BE_DEVICE_ID1:
3154 case OC_DEVICE_ID1:
3155 adapter->generation = BE_GEN2;
3156 break;
3157 case BE_DEVICE_ID2:
3158 case OC_DEVICE_ID2:
3159 adapter->generation = BE_GEN3;
3160 break;
3161 case OC_DEVICE_ID3:
12f4d0a8 3162 case OC_DEVICE_ID4:
fe6d2a38
SP
3163 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3164 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3165 SLI_INTF_IF_TYPE_SHIFT;
3166
3167 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3168 if_type != 0x02) {
3169 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3170 return -EINVAL;
3171 }
fe6d2a38
SP
3172 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3173 SLI_INTF_FAMILY_SHIFT);
3174 adapter->generation = BE_GEN3;
3175 break;
3176 default:
3177 adapter->generation = 0;
3178 }
3179 return 0;
3180}
3181
37eed1cb
PR
3182static int lancer_wait_ready(struct be_adapter *adapter)
3183{
3184#define SLIPORT_READY_TIMEOUT 500
3185 u32 sliport_status;
3186 int status = 0, i;
3187
3188 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3189 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3190 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3191 break;
3192
3193 msleep(20);
3194 }
3195
3196 if (i == SLIPORT_READY_TIMEOUT)
3197 status = -1;
3198
3199 return status;
3200}
3201
3202static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3203{
3204 int status;
3205 u32 sliport_status, err, reset_needed;
3206 status = lancer_wait_ready(adapter);
3207 if (!status) {
3208 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3209 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3210 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3211 if (err && reset_needed) {
3212 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3213 adapter->db + SLIPORT_CONTROL_OFFSET);
3214
3215 /* check adapter has corrected the error */
3216 status = lancer_wait_ready(adapter);
3217 sliport_status = ioread32(adapter->db +
3218 SLIPORT_STATUS_OFFSET);
3219 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3220 SLIPORT_STATUS_RN_MASK);
3221 if (status || sliport_status)
3222 status = -1;
3223 } else if (err || reset_needed) {
3224 status = -1;
3225 }
3226 }
3227 return status;
3228}
3229
6b7c5b94
SP
3230static int __devinit be_probe(struct pci_dev *pdev,
3231 const struct pci_device_id *pdev_id)
3232{
3233 int status = 0;
3234 struct be_adapter *adapter;
3235 struct net_device *netdev;
6b7c5b94
SP
3236
3237 status = pci_enable_device(pdev);
3238 if (status)
3239 goto do_none;
3240
3241 status = pci_request_regions(pdev, DRV_NAME);
3242 if (status)
3243 goto disable_dev;
3244 pci_set_master(pdev);
3245
3c8def97 3246 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3247 if (netdev == NULL) {
3248 status = -ENOMEM;
3249 goto rel_reg;
3250 }
3251 adapter = netdev_priv(netdev);
3252 adapter->pdev = pdev;
3253 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3254
3255 status = be_dev_family_check(adapter);
63657b9c 3256 if (status)
fe6d2a38
SP
3257 goto free_netdev;
3258
6b7c5b94 3259 adapter->netdev = netdev;
2243e2e9 3260 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3261
2b7bcebf 3262 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3263 if (!status) {
3264 netdev->features |= NETIF_F_HIGHDMA;
3265 } else {
2b7bcebf 3266 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3267 if (status) {
3268 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3269 goto free_netdev;
3270 }
3271 }
3272
ba343c77 3273 be_sriov_enable(adapter);
48f5a191
AK
3274 if (adapter->sriov_enabled) {
3275 adapter->vf_cfg = kcalloc(num_vfs,
3276 sizeof(struct be_vf_cfg), GFP_KERNEL);
3277
3278 if (!adapter->vf_cfg)
3279 goto free_netdev;
3280 }
ba343c77 3281
6b7c5b94
SP
3282 status = be_ctrl_init(adapter);
3283 if (status)
48f5a191 3284 goto free_vf_cfg;
6b7c5b94 3285
37eed1cb
PR
3286 if (lancer_chip(adapter)) {
3287 status = lancer_test_and_set_rdy_state(adapter);
3288 if (status) {
3289 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3290 goto ctrl_clean;
37eed1cb
PR
3291 }
3292 }
3293
2243e2e9 3294 /* sync up with fw's ready state */
ba343c77
SB
3295 if (be_physfn(adapter)) {
3296 status = be_cmd_POST(adapter);
3297 if (status)
3298 goto ctrl_clean;
ba343c77 3299 }
6b7c5b94 3300
2243e2e9
SP
3301 /* tell fw we're ready to fire cmds */
3302 status = be_cmd_fw_init(adapter);
6b7c5b94 3303 if (status)
2243e2e9
SP
3304 goto ctrl_clean;
3305
a4b4dfab
AK
3306 status = be_cmd_reset_function(adapter);
3307 if (status)
3308 goto ctrl_clean;
556ae191 3309
2243e2e9
SP
3310 status = be_stats_init(adapter);
3311 if (status)
3312 goto ctrl_clean;
3313
3314 status = be_get_config(adapter);
6b7c5b94
SP
3315 if (status)
3316 goto stats_clean;
6b7c5b94 3317
b9ab82c7
SP
3318 /* The INTR bit may be set in the card when probed by a kdump kernel
3319 * after a crash.
3320 */
3321 if (!lancer_chip(adapter))
3322 be_intr_set(adapter, false);
3323
3abcdeda
SP
3324 be_msix_enable(adapter);
3325
6b7c5b94 3326 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3327
5fb379ee
SP
3328 status = be_setup(adapter);
3329 if (status)
3abcdeda 3330 goto msix_disable;
2243e2e9 3331
3abcdeda 3332 be_netdev_init(netdev);
6b7c5b94
SP
3333 status = register_netdev(netdev);
3334 if (status != 0)
5fb379ee 3335 goto unsetup;
6b7c5b94 3336
e6319365 3337 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42 3338 u8 mac_speed;
d0381c42
AK
3339 u16 vf, lnk_speed;
3340
12f4d0a8
ME
3341 if (!lancer_chip(adapter)) {
3342 status = be_vf_eth_addr_config(adapter);
3343 if (status)
3344 goto unreg_netdev;
3345 }
d0381c42
AK
3346
3347 for (vf = 0; vf < num_vfs; vf++) {
ea172a01
SP
3348 status = be_cmd_link_status_query(adapter, &mac_speed,
3349 &lnk_speed, vf + 1);
d0381c42
AK
3350 if (!status)
3351 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3352 else
3353 goto unreg_netdev;
3354 }
e6319365
AK
3355 }
3356
c4ca2374 3357 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3358
f203af70 3359 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3360 return 0;
3361
e6319365
AK
3362unreg_netdev:
3363 unregister_netdev(netdev);
5fb379ee
SP
3364unsetup:
3365 be_clear(adapter);
3abcdeda
SP
3366msix_disable:
3367 be_msix_disable(adapter);
6b7c5b94
SP
3368stats_clean:
3369 be_stats_cleanup(adapter);
3370ctrl_clean:
3371 be_ctrl_cleanup(adapter);
48f5a191
AK
3372free_vf_cfg:
3373 kfree(adapter->vf_cfg);
6b7c5b94 3374free_netdev:
ba343c77 3375 be_sriov_disable(adapter);
fe6d2a38 3376 free_netdev(netdev);
8d56ff11 3377 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3378rel_reg:
3379 pci_release_regions(pdev);
3380disable_dev:
3381 pci_disable_device(pdev);
3382do_none:
c4ca2374 3383 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3384 return status;
3385}
3386
3387static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3388{
3389 struct be_adapter *adapter = pci_get_drvdata(pdev);
3390 struct net_device *netdev = adapter->netdev;
3391
a4ca055f 3392 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3393 if (adapter->wol)
3394 be_setup_wol(adapter, true);
3395
6b7c5b94
SP
3396 netif_device_detach(netdev);
3397 if (netif_running(netdev)) {
3398 rtnl_lock();
3399 be_close(netdev);
3400 rtnl_unlock();
3401 }
9e90c961 3402 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3403 be_clear(adapter);
6b7c5b94 3404
a4ca055f 3405 be_msix_disable(adapter);
6b7c5b94
SP
3406 pci_save_state(pdev);
3407 pci_disable_device(pdev);
3408 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3409 return 0;
3410}
3411
3412static int be_resume(struct pci_dev *pdev)
3413{
3414 int status = 0;
3415 struct be_adapter *adapter = pci_get_drvdata(pdev);
3416 struct net_device *netdev = adapter->netdev;
3417
3418 netif_device_detach(netdev);
3419
3420 status = pci_enable_device(pdev);
3421 if (status)
3422 return status;
3423
3424 pci_set_power_state(pdev, 0);
3425 pci_restore_state(pdev);
3426
a4ca055f 3427 be_msix_enable(adapter);
2243e2e9
SP
3428 /* tell fw we're ready to fire cmds */
3429 status = be_cmd_fw_init(adapter);
3430 if (status)
3431 return status;
3432
9b0365f1 3433 be_setup(adapter);
6b7c5b94
SP
3434 if (netif_running(netdev)) {
3435 rtnl_lock();
3436 be_open(netdev);
3437 rtnl_unlock();
3438 }
3439 netif_device_attach(netdev);
71d8d1b5
AK
3440
3441 if (adapter->wol)
3442 be_setup_wol(adapter, false);
a4ca055f
AK
3443
3444 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3445 return 0;
3446}
3447
82456b03
SP
3448/*
3449 * An FLR will stop BE from DMAing any data.
3450 */
3451static void be_shutdown(struct pci_dev *pdev)
3452{
3453 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3454
2d5d4154
AK
3455 if (!adapter)
3456 return;
82456b03 3457
0f4a6828 3458 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3459
2d5d4154 3460 netif_device_detach(adapter->netdev);
82456b03 3461
82456b03
SP
3462 if (adapter->wol)
3463 be_setup_wol(adapter, true);
3464
57841869
AK
3465 be_cmd_reset_function(adapter);
3466
82456b03 3467 pci_disable_device(pdev);
82456b03
SP
3468}
3469
cf588477
SP
3470static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3471 pci_channel_state_t state)
3472{
3473 struct be_adapter *adapter = pci_get_drvdata(pdev);
3474 struct net_device *netdev = adapter->netdev;
3475
3476 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3477
3478 adapter->eeh_err = true;
3479
3480 netif_device_detach(netdev);
3481
3482 if (netif_running(netdev)) {
3483 rtnl_lock();
3484 be_close(netdev);
3485 rtnl_unlock();
3486 }
3487 be_clear(adapter);
3488
3489 if (state == pci_channel_io_perm_failure)
3490 return PCI_ERS_RESULT_DISCONNECT;
3491
3492 pci_disable_device(pdev);
3493
3494 return PCI_ERS_RESULT_NEED_RESET;
3495}
3496
3497static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3498{
3499 struct be_adapter *adapter = pci_get_drvdata(pdev);
3500 int status;
3501
3502 dev_info(&adapter->pdev->dev, "EEH reset\n");
3503 adapter->eeh_err = false;
3504
3505 status = pci_enable_device(pdev);
3506 if (status)
3507 return PCI_ERS_RESULT_DISCONNECT;
3508
3509 pci_set_master(pdev);
3510 pci_set_power_state(pdev, 0);
3511 pci_restore_state(pdev);
3512
3513 /* Check if card is ok and fw is ready */
3514 status = be_cmd_POST(adapter);
3515 if (status)
3516 return PCI_ERS_RESULT_DISCONNECT;
3517
3518 return PCI_ERS_RESULT_RECOVERED;
3519}
3520
3521static void be_eeh_resume(struct pci_dev *pdev)
3522{
3523 int status = 0;
3524 struct be_adapter *adapter = pci_get_drvdata(pdev);
3525 struct net_device *netdev = adapter->netdev;
3526
3527 dev_info(&adapter->pdev->dev, "EEH resume\n");
3528
3529 pci_save_state(pdev);
3530
3531 /* tell fw we're ready to fire cmds */
3532 status = be_cmd_fw_init(adapter);
3533 if (status)
3534 goto err;
3535
3536 status = be_setup(adapter);
3537 if (status)
3538 goto err;
3539
3540 if (netif_running(netdev)) {
3541 status = be_open(netdev);
3542 if (status)
3543 goto err;
3544 }
3545 netif_device_attach(netdev);
3546 return;
3547err:
3548 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3549}
3550
3551static struct pci_error_handlers be_eeh_handlers = {
3552 .error_detected = be_eeh_err_detected,
3553 .slot_reset = be_eeh_reset,
3554 .resume = be_eeh_resume,
3555};
3556
6b7c5b94
SP
3557static struct pci_driver be_driver = {
3558 .name = DRV_NAME,
3559 .id_table = be_dev_ids,
3560 .probe = be_probe,
3561 .remove = be_remove,
3562 .suspend = be_suspend,
cf588477 3563 .resume = be_resume,
82456b03 3564 .shutdown = be_shutdown,
cf588477 3565 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3566};
3567
3568static int __init be_init_module(void)
3569{
8e95a202
JP
3570 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3571 rx_frag_size != 2048) {
6b7c5b94
SP
3572 printk(KERN_WARNING DRV_NAME
3573 " : Module param rx_frag_size must be 2048/4096/8192."
3574 " Using 2048\n");
3575 rx_frag_size = 2048;
3576 }
6b7c5b94
SP
3577
3578 return pci_register_driver(&be_driver);
3579}
3580module_init(be_init_module);
3581
3582static void __exit be_exit_module(void)
3583{
3584 pci_unregister_driver(&be_driver);
3585}
3586module_exit(be_exit_module);
This page took 0.50821 seconds and 5 git commands to generate.