be2net: In case of UE, do not dump registers for Lancer
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94
SP
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
2b7bcebf
IV
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
6b7c5b94
SP
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
2b7bcebf
IV
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
6b7c5b94
SP
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
8788fdc2 144static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 145{
8788fdc2 146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 149
cf588477
SP
150 if (adapter->eeh_err)
151 return;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
6b7c5b94
SP
160 iowrite32(reg, addr);
161}
162
8788fdc2 163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
168
169 wmb();
8788fdc2 170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
171}
172
8788fdc2 173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
178
179 wmb();
8788fdc2 180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
181}
182
8788fdc2 183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
190
191 if (adapter->eeh_err)
192 return;
193
6b7c5b94
SP
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
201}
202
8788fdc2 203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
209
210 if (adapter->eeh_err)
211 return;
212
6b7c5b94
SP
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
217}
218
6b7c5b94
SP
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
ca9e4988
AK
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
ba343c77
SB
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
f8617e08
AK
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
a65027e4
SP
236 if (status)
237 return status;
6b7c5b94 238
a65027e4 239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 240 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 241netdev_addr:
6b7c5b94
SP
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
b31c50a7 248void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 249{
3abcdeda 250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
78122a52 254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 255 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
256 struct be_rx_obj *rxo;
257 int i;
6b7c5b94 258
3abcdeda
SP
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
68110868
SP
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
6b7c5b94
SP
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
6b7c5b94
SP
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 296
6b7c5b94
SP
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
302}
303
8788fdc2 304void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 305{
6b7c5b94
SP
306 struct net_device *netdev = adapter->netdev;
307
6b7c5b94 308 /* If link came up or went down */
a8f447bd 309 if (adapter->link_up != link_up) {
0dffc83e 310 adapter->link_speed = -1;
a8f447bd 311 if (link_up) {
6b7c5b94
SP
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 314 } else {
a8f447bd
SP
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 317 }
a8f447bd 318 adapter->link_up = link_up;
6b7c5b94 319 }
6b7c5b94
SP
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 324{
3abcdeda
SP
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
6b7c5b94
SP
338
339 /* Update once a second */
4097f663 340 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
341 return;
342
3abcdeda 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 344 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 345
4097f663 346 stats->rx_fps_jiffies = now;
3abcdeda
SP
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
8788fdc2 357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
358
359 rx_eq->cur_eqd = eqd;
360}
361
65f71b8b
SH
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
4097f663
SP
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
3abcdeda 375 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
4097f663
SP
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
6b7c5b94 394static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 396{
3abcdeda 397 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
91992e44 401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
402 if (stopped)
403 stats->be_tx_stops++;
6b7c5b94
SP
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
6b7c5b94 409{
ebc8d2ab
DM
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
6b7c5b94
SP
414 /* to account for hdr wrb */
415 cnt++;
fe6d2a38
SP
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
6b7c5b94
SP
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
fe6d2a38 422 }
6b7c5b94
SP
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
cc4ce020
SK
434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 436{
cc4ce020
SK
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
6b7c5b94
SP
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
49e4b847 444 if (skb_is_gso(skb)) {
6b7c5b94
SP
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
6b7c5b94
SP
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
cc4ce020 467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
2b7bcebf 484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 492 if (wrb->frag_len) {
7101e111 493 if (unmap_single)
2b7bcebf
IV
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
7101e111 496 else
2b7bcebf 497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
498 }
499}
6b7c5b94
SP
500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
7101e111
SP
504 dma_addr_t busaddr;
505 int i, copied = 0;
2b7bcebf 506 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
7101e111
SP
511 bool map_single = false;
512 u16 map_head;
6b7c5b94 513
6b7c5b94
SP
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
7101e111 516 map_head = txq->head;
6b7c5b94 517
ebc8d2ab 518 if (skb->len > skb->data_len) {
e743d313 519 int len = skb_headlen(skb);
2b7bcebf
IV
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
7101e111
SP
522 goto dma_err;
523 map_single = true;
ebc8d2ab
DM
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
6b7c5b94 530
ebc8d2ab
DM
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
7101e111 537 goto dma_err;
ebc8d2ab
DM
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
6b7c5b94
SP
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
cc4ce020 552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
7101e111
SP
556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
2b7bcebf 560 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
6b7c5b94
SP
566}
567
61357325 568static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 569 struct net_device *netdev)
6b7c5b94
SP
570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
fe6d2a38 578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
7101e111 590 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
6b7c5b94 596
c190e3c8 597 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 598
91992e44
AK
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
6b7c5b94 604 }
6b7c5b94
SP
605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
82903e4b
AK
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 629 */
1da87b7f 630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 631{
6b7c5b94
SP
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
82903e4b 634 int status = 0;
1da87b7f
AK
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
6b7c5b94 642
82903e4b 643 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 644 /* Construct VLAN Table to give to HW */
b738127d 645 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
b31c50a7
SP
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
6b7c5b94 653 } else {
b31c50a7
SP
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
6b7c5b94 656 }
1da87b7f 657
b31c50a7 658 return status;
6b7c5b94
SP
659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 664
6b7c5b94 665 adapter->vlan_grp = grp;
6b7c5b94
SP
666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
1da87b7f 672 adapter->vlans_added++;
ba343c77
SB
673 if (!be_physfn(adapter))
674 return;
675
6b7c5b94 676 adapter->vlan_tag[vid] = 1;
82903e4b 677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 678 be_vid_config(adapter, false, 0);
6b7c5b94
SP
679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
1da87b7f
AK
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
ba343c77
SB
688 if (!be_physfn(adapter))
689 return;
690
6b7c5b94 691 adapter->vlan_tag[vid] = 0;
82903e4b 692 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 693 be_vid_config(adapter, false, 0);
6b7c5b94
SP
694}
695
24307eef 696static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 699
24307eef 700 if (netdev->flags & IFF_PROMISC) {
8788fdc2 701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
702 adapter->promiscuous = true;
703 goto done;
6b7c5b94
SP
704 }
705
25985edc 706 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
8788fdc2 709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
710 }
711
e7b909a6 712 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 716 &adapter->mc_cmd_mem);
24307eef 717 goto done;
6b7c5b94 718 }
6b7c5b94 719
0ddf477b 720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 721 &adapter->mc_cmd_mem);
24307eef
SP
722done:
723 return;
6b7c5b94
SP
724}
725
ba343c77
SB
726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
64600ea5
AK
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 741
64600ea5
AK
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
745
746 if (status)
ba343c77
SB
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
64600ea5
AK
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
ba343c77
SB
752 return status;
753}
754
64600ea5
AK
755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
e1d18735 767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
1da87b7f
AK
775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
e1d18735
AK
803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
3abcdeda 827static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 828{
3abcdeda 829 struct be_rx_stats *stats = &rxo->stats;
4097f663 830 ulong now = jiffies;
6b7c5b94 831
4097f663 832 /* Wrapped around */
3abcdeda
SP
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
4097f663
SP
835 return;
836 }
6b7c5b94
SP
837
838 /* Update the rate once in two seconds */
3abcdeda 839 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
840 return;
841
3abcdeda
SP
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
846}
847
3abcdeda 848static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 849 struct be_rx_compl_info *rxcp)
4097f663 850{
3abcdeda 851 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 852
3abcdeda 853 stats->rx_compl++;
2e588f84
SP
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 856 stats->rx_pkts++;
2e588f84 857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 858 stats->rx_mcast_pkts++;
2e588f84
SP
859 if (rxcp->err)
860 stats->rxcp_err++;
4097f663
SP
861}
862
2e588f84 863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 864{
19fad86f
PR
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
869}
870
6b7c5b94 871static struct be_rx_page_info *
3abcdeda
SP
872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
6b7c5b94
SP
875{
876 struct be_rx_page_info *rx_page_info;
3abcdeda 877 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 878
3abcdeda 879 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
880 BUG_ON(!rx_page_info->page);
881
205859a2 882 if (rx_page_info->last_page_user) {
2b7bcebf
IV
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
886 rx_page_info->last_page_user = false;
887 }
6b7c5b94
SP
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 895 struct be_rx_obj *rxo,
2e588f84 896 struct be_rx_compl_info *rxcp)
6b7c5b94 897{
3abcdeda 898 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 899 struct be_rx_page_info *page_info;
2e588f84 900 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 901
e80d9da6 902 for (i = 0; i < num_rcvd; i++) {
2e588f84 903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
2e588f84 906 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
3abcdeda 914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 916{
3abcdeda 917 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 918 struct be_rx_page_info *page_info;
2e588f84
SP
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 921 u8 *start;
6b7c5b94 922
2e588f84 923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
2e588f84 928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
929
930 /* Copy the header portion into skb_data */
2e588f84 931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
205859a2 948 page_info->page = NULL;
6b7c5b94 949
2e588f84
SP
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
6b7c5b94
SP
953 }
954
955 /* More frags present for this completion */
2e588f84
SP
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 961
bd46cb6c
AK
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
6b7c5b94 978
2e588f84
SP
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 981 page_info->page = NULL;
6b7c5b94 982 }
bd46cb6c 983 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
984}
985
5be93b9a 986/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 987static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 988 struct be_rx_obj *rxo,
2e588f84 989 struct be_rx_compl_info *rxcp)
6b7c5b94 990{
6332c8d3 991 struct net_device *netdev = adapter->netdev;
6b7c5b94 992 struct sk_buff *skb;
89420424 993
6332c8d3 994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 995 if (unlikely(!skb)) {
6b7c5b94
SP
996 if (net_ratelimit())
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 998 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
999 return;
1000 }
1001
2e588f84 1002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1003
6332c8d3 1004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1006 else
1007 skb_checksum_none_assert(skb);
6b7c5b94
SP
1008
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1010 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
6b7c5b94 1014
2e588f84 1015 if (unlikely(rxcp->vlanf)) {
82903e4b 1016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1017 kfree_skb(skb);
1018 return;
1019 }
6709d952
SK
1020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1021 rxcp->vlan_tag);
6b7c5b94
SP
1022 } else {
1023 netif_receive_skb(skb);
1024 }
6b7c5b94
SP
1025}
1026
5be93b9a
AK
1027/* Process the RX completion indicated by rxcp when GRO is enabled */
1028static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1029 struct be_rx_obj *rxo,
2e588f84 1030 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1031{
1032 struct be_rx_page_info *page_info;
5be93b9a 1033 struct sk_buff *skb = NULL;
3abcdeda
SP
1034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1036 u16 remaining, curr_frag_len;
1037 u16 i, j;
3968fa1e 1038
5be93b9a
AK
1039 skb = napi_get_frags(&eq_obj->napi);
1040 if (!skb) {
3abcdeda 1041 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1042 return;
1043 }
1044
2e588f84
SP
1045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1048
1049 curr_frag_len = min(remaining, rx_frag_size);
1050
bd46cb6c
AK
1051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1054 j++;
5be93b9a
AK
1055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1059 } else {
1060 put_page(page_info->page);
1061 }
5be93b9a 1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1063
bd46cb6c 1064 remaining -= curr_frag_len;
2e588f84 1065 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1066 memset(page_info, 0, sizeof(*page_info));
1067 }
bd46cb6c 1068 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1069
5be93b9a 1070 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
5be93b9a 1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1075 if (adapter->netdev->features & NETIF_F_RXHASH)
1076 skb->rxhash = rxcp->rss_hash;
5be93b9a 1077
2e588f84 1078 if (likely(!rxcp->vlanf))
5be93b9a 1079 napi_gro_frags(&eq_obj->napi);
2e588f84 1080 else
6709d952
SK
1081 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1082 rxcp->vlan_tag);
2e588f84
SP
1083}
1084
1085static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1086 struct be_eth_rx_compl *compl,
1087 struct be_rx_compl_info *rxcp)
1088{
1089 rxcp->pkt_size =
1090 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1091 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1092 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1093 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1094 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1095 rxcp->ip_csum =
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1097 rxcp->l4_csum =
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1099 rxcp->ipv6 =
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1101 rxcp->rxq_idx =
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1103 rxcp->num_rcvd =
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1105 rxcp->pkt_type =
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1107 rxcp->rss_hash =
1108 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1109 if (rxcp->vlanf) {
1110 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1111 compl);
1112 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1113 compl);
15d72184 1114 }
2e588f84
SP
1115}
1116
1117static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1118 struct be_eth_rx_compl *compl,
1119 struct be_rx_compl_info *rxcp)
1120{
1121 rxcp->pkt_size =
1122 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1123 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1124 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1125 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1126 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1127 rxcp->ip_csum =
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1129 rxcp->l4_csum =
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1131 rxcp->ipv6 =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1133 rxcp->rxq_idx =
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1135 rxcp->num_rcvd =
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1137 rxcp->pkt_type =
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1139 rxcp->rss_hash =
1140 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1141 if (rxcp->vlanf) {
1142 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1143 compl);
1144 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1145 compl);
15d72184 1146 }
2e588f84
SP
1147}
1148
1149static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1150{
1151 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1152 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1153 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1154
2e588f84
SP
1155 /* For checking the valid bit it is Ok to use either definition as the
1156 * valid bit is at the same position in both v0 and v1 Rx compl */
1157 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1158 return NULL;
6b7c5b94 1159
2e588f84
SP
1160 rmb();
1161 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1162
2e588f84
SP
1163 if (adapter->be3_native)
1164 be_parse_rx_compl_v1(adapter, compl, rxcp);
1165 else
1166 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1167
15d72184
SP
1168 if (rxcp->vlanf) {
1169 /* vlanf could be wrongly set in some cards.
1170 * ignore if vtm is not set */
1171 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1172 rxcp->vlanf = 0;
6b7c5b94 1173
15d72184 1174 if (!lancer_chip(adapter))
3c709f8f 1175 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1176
3c709f8f
DM
1177 if (((adapter->pvid & VLAN_VID_MASK) ==
1178 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1179 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1180 rxcp->vlanf = 0;
1181 }
2e588f84
SP
1182
1183 /* As the compl has been parsed, reset it; we wont touch it again */
1184 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1185
3abcdeda 1186 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1187 return rxcp;
1188}
1189
1829b086 1190static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1191{
6b7c5b94 1192 u32 order = get_order(size);
1829b086 1193
6b7c5b94 1194 if (order > 0)
1829b086
ED
1195 gfp |= __GFP_COMP;
1196 return alloc_pages(gfp, order);
6b7c5b94
SP
1197}
1198
1199/*
1200 * Allocate a page, split it to fragments of size rx_frag_size and post as
1201 * receive buffers to BE
1202 */
1829b086 1203static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1204{
3abcdeda
SP
1205 struct be_adapter *adapter = rxo->adapter;
1206 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1207 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1208 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1209 struct page *pagep = NULL;
1210 struct be_eth_rx_d *rxd;
1211 u64 page_dmaaddr = 0, frag_dmaaddr;
1212 u32 posted, page_offset = 0;
1213
3abcdeda 1214 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1215 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1216 if (!pagep) {
1829b086 1217 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1218 if (unlikely(!pagep)) {
3abcdeda 1219 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1220 break;
1221 }
2b7bcebf
IV
1222 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1223 0, adapter->big_page_size,
1224 DMA_FROM_DEVICE);
6b7c5b94
SP
1225 page_info->page_offset = 0;
1226 } else {
1227 get_page(pagep);
1228 page_info->page_offset = page_offset + rx_frag_size;
1229 }
1230 page_offset = page_info->page_offset;
1231 page_info->page = pagep;
fac6da5b 1232 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1233 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1234
1235 rxd = queue_head_node(rxq);
1236 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1237 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1238
1239 /* Any space left in the current big page for another frag? */
1240 if ((page_offset + rx_frag_size + rx_frag_size) >
1241 adapter->big_page_size) {
1242 pagep = NULL;
1243 page_info->last_page_user = true;
1244 }
26d92f92
SP
1245
1246 prev_page_info = page_info;
1247 queue_head_inc(rxq);
6b7c5b94
SP
1248 page_info = &page_info_tbl[rxq->head];
1249 }
1250 if (pagep)
26d92f92 1251 prev_page_info->last_page_user = true;
6b7c5b94
SP
1252
1253 if (posted) {
6b7c5b94 1254 atomic_add(posted, &rxq->used);
8788fdc2 1255 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1256 } else if (atomic_read(&rxq->used) == 0) {
1257 /* Let be_worker replenish when memory is available */
3abcdeda 1258 rxo->rx_post_starved = true;
6b7c5b94 1259 }
6b7c5b94
SP
1260}
1261
5fb379ee 1262static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1263{
6b7c5b94
SP
1264 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1265
1266 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1267 return NULL;
1268
f3eb62d2 1269 rmb();
6b7c5b94
SP
1270 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1271
1272 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1273
1274 queue_tail_inc(tx_cq);
1275 return txcp;
1276}
1277
1278static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1279{
1280 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1281 struct be_eth_wrb *wrb;
6b7c5b94
SP
1282 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1283 struct sk_buff *sent_skb;
ec43b1a6
SP
1284 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1285 bool unmap_skb_hdr = true;
6b7c5b94 1286
ec43b1a6 1287 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1288 BUG_ON(!sent_skb);
ec43b1a6
SP
1289 sent_skbs[txq->tail] = NULL;
1290
1291 /* skip header wrb */
a73b796e 1292 queue_tail_inc(txq);
6b7c5b94 1293
ec43b1a6 1294 do {
6b7c5b94 1295 cur_index = txq->tail;
a73b796e 1296 wrb = queue_tail_node(txq);
2b7bcebf
IV
1297 unmap_tx_frag(&adapter->pdev->dev, wrb,
1298 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1299 unmap_skb_hdr = false;
1300
6b7c5b94
SP
1301 num_wrbs++;
1302 queue_tail_inc(txq);
ec43b1a6 1303 } while (cur_index != last_index);
6b7c5b94
SP
1304
1305 atomic_sub(num_wrbs, &txq->used);
a73b796e 1306
6b7c5b94
SP
1307 kfree_skb(sent_skb);
1308}
1309
859b1e4e
SP
1310static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1311{
1312 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1313
1314 if (!eqe->evt)
1315 return NULL;
1316
f3eb62d2 1317 rmb();
859b1e4e
SP
1318 eqe->evt = le32_to_cpu(eqe->evt);
1319 queue_tail_inc(&eq_obj->q);
1320 return eqe;
1321}
1322
1323static int event_handle(struct be_adapter *adapter,
1324 struct be_eq_obj *eq_obj)
1325{
1326 struct be_eq_entry *eqe;
1327 u16 num = 0;
1328
1329 while ((eqe = event_get(eq_obj)) != NULL) {
1330 eqe->evt = 0;
1331 num++;
1332 }
1333
1334 /* Deal with any spurious interrupts that come
1335 * without events
1336 */
1337 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1338 if (num)
1339 napi_schedule(&eq_obj->napi);
1340
1341 return num;
1342}
1343
1344/* Just read and notify events without processing them.
1345 * Used at the time of destroying event queues */
1346static void be_eq_clean(struct be_adapter *adapter,
1347 struct be_eq_obj *eq_obj)
1348{
1349 struct be_eq_entry *eqe;
1350 u16 num = 0;
1351
1352 while ((eqe = event_get(eq_obj)) != NULL) {
1353 eqe->evt = 0;
1354 num++;
1355 }
1356
1357 if (num)
1358 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1359}
1360
3abcdeda 1361static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1362{
1363 struct be_rx_page_info *page_info;
3abcdeda
SP
1364 struct be_queue_info *rxq = &rxo->q;
1365 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1366 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1367 u16 tail;
1368
1369 /* First cleanup pending rx completions */
3abcdeda
SP
1370 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1371 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1372 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1373 }
1374
1375 /* Then free posted rx buffer that were not used */
1376 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1377 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1378 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1379 put_page(page_info->page);
1380 memset(page_info, 0, sizeof(*page_info));
1381 }
1382 BUG_ON(atomic_read(&rxq->used));
1383}
1384
a8e9179a 1385static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1386{
a8e9179a 1387 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1388 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1389 struct be_eth_tx_compl *txcp;
1390 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1391 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1392 struct sk_buff *sent_skb;
1393 bool dummy_wrb;
a8e9179a
SP
1394
1395 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1396 do {
1397 while ((txcp = be_tx_compl_get(tx_cq))) {
1398 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1399 wrb_index, txcp);
1400 be_tx_compl_process(adapter, end_idx);
1401 cmpl++;
1402 }
1403 if (cmpl) {
1404 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1405 cmpl = 0;
1406 }
1407
1408 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1409 break;
1410
1411 mdelay(1);
1412 } while (true);
1413
1414 if (atomic_read(&txq->used))
1415 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1416 atomic_read(&txq->used));
b03388d6
SP
1417
1418 /* free posted tx for which compls will never arrive */
1419 while (atomic_read(&txq->used)) {
1420 sent_skb = sent_skbs[txq->tail];
1421 end_idx = txq->tail;
1422 index_adv(&end_idx,
fe6d2a38
SP
1423 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1424 txq->len);
b03388d6
SP
1425 be_tx_compl_process(adapter, end_idx);
1426 }
6b7c5b94
SP
1427}
1428
5fb379ee
SP
1429static void be_mcc_queues_destroy(struct be_adapter *adapter)
1430{
1431 struct be_queue_info *q;
5fb379ee 1432
8788fdc2 1433 q = &adapter->mcc_obj.q;
5fb379ee 1434 if (q->created)
8788fdc2 1435 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1436 be_queue_free(adapter, q);
1437
8788fdc2 1438 q = &adapter->mcc_obj.cq;
5fb379ee 1439 if (q->created)
8788fdc2 1440 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1441 be_queue_free(adapter, q);
1442}
1443
1444/* Must be called only after TX qs are created as MCC shares TX EQ */
1445static int be_mcc_queues_create(struct be_adapter *adapter)
1446{
1447 struct be_queue_info *q, *cq;
5fb379ee
SP
1448
1449 /* Alloc MCC compl queue */
8788fdc2 1450 cq = &adapter->mcc_obj.cq;
5fb379ee 1451 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1452 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1453 goto err;
1454
1455 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1456 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1457 goto mcc_cq_free;
1458
1459 /* Alloc MCC queue */
8788fdc2 1460 q = &adapter->mcc_obj.q;
5fb379ee
SP
1461 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1462 goto mcc_cq_destroy;
1463
1464 /* Ask BE to create MCC queue */
8788fdc2 1465 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1466 goto mcc_q_free;
1467
1468 return 0;
1469
1470mcc_q_free:
1471 be_queue_free(adapter, q);
1472mcc_cq_destroy:
8788fdc2 1473 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1474mcc_cq_free:
1475 be_queue_free(adapter, cq);
1476err:
1477 return -1;
1478}
1479
6b7c5b94
SP
1480static void be_tx_queues_destroy(struct be_adapter *adapter)
1481{
1482 struct be_queue_info *q;
1483
1484 q = &adapter->tx_obj.q;
a8e9179a 1485 if (q->created)
8788fdc2 1486 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1487 be_queue_free(adapter, q);
1488
1489 q = &adapter->tx_obj.cq;
1490 if (q->created)
8788fdc2 1491 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1492 be_queue_free(adapter, q);
1493
859b1e4e
SP
1494 /* Clear any residual events */
1495 be_eq_clean(adapter, &adapter->tx_eq);
1496
6b7c5b94
SP
1497 q = &adapter->tx_eq.q;
1498 if (q->created)
8788fdc2 1499 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1500 be_queue_free(adapter, q);
1501}
1502
1503static int be_tx_queues_create(struct be_adapter *adapter)
1504{
1505 struct be_queue_info *eq, *q, *cq;
1506
1507 adapter->tx_eq.max_eqd = 0;
1508 adapter->tx_eq.min_eqd = 0;
1509 adapter->tx_eq.cur_eqd = 96;
1510 adapter->tx_eq.enable_aic = false;
1511 /* Alloc Tx Event queue */
1512 eq = &adapter->tx_eq.q;
1513 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1514 return -1;
1515
1516 /* Ask BE to create Tx Event queue */
8788fdc2 1517 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1518 goto tx_eq_free;
fe6d2a38 1519
ecd62107 1520 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1521
ba343c77 1522
6b7c5b94
SP
1523 /* Alloc TX eth compl queue */
1524 cq = &adapter->tx_obj.cq;
1525 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1526 sizeof(struct be_eth_tx_compl)))
1527 goto tx_eq_destroy;
1528
1529 /* Ask BE to create Tx eth compl queue */
8788fdc2 1530 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1531 goto tx_cq_free;
1532
1533 /* Alloc TX eth queue */
1534 q = &adapter->tx_obj.q;
1535 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1536 goto tx_cq_destroy;
1537
1538 /* Ask BE to create Tx eth queue */
8788fdc2 1539 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1540 goto tx_q_free;
1541 return 0;
1542
1543tx_q_free:
1544 be_queue_free(adapter, q);
1545tx_cq_destroy:
8788fdc2 1546 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1547tx_cq_free:
1548 be_queue_free(adapter, cq);
1549tx_eq_destroy:
8788fdc2 1550 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1551tx_eq_free:
1552 be_queue_free(adapter, eq);
1553 return -1;
1554}
1555
1556static void be_rx_queues_destroy(struct be_adapter *adapter)
1557{
1558 struct be_queue_info *q;
3abcdeda
SP
1559 struct be_rx_obj *rxo;
1560 int i;
1561
1562 for_all_rx_queues(adapter, rxo, i) {
1563 q = &rxo->q;
1564 if (q->created) {
1565 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1566 /* After the rxq is invalidated, wait for a grace time
1567 * of 1ms for all dma to end and the flush compl to
1568 * arrive
1569 */
1570 mdelay(1);
1571 be_rx_q_clean(adapter, rxo);
1572 }
1573 be_queue_free(adapter, q);
1574
1575 q = &rxo->cq;
1576 if (q->created)
1577 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1578 be_queue_free(adapter, q);
1579
1580 /* Clear any residual events */
1581 q = &rxo->rx_eq.q;
1582 if (q->created) {
1583 be_eq_clean(adapter, &rxo->rx_eq);
1584 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1585 }
1586 be_queue_free(adapter, q);
6b7c5b94 1587 }
6b7c5b94
SP
1588}
1589
ac6a0c4a
SP
1590static u32 be_num_rxqs_want(struct be_adapter *adapter)
1591{
1592 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1593 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1594 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1595 } else {
1596 dev_warn(&adapter->pdev->dev,
1597 "No support for multiple RX queues\n");
1598 return 1;
1599 }
1600}
1601
6b7c5b94
SP
1602static int be_rx_queues_create(struct be_adapter *adapter)
1603{
1604 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1605 struct be_rx_obj *rxo;
1606 int rc, i;
6b7c5b94 1607
ac6a0c4a
SP
1608 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1609 msix_enabled(adapter) ?
1610 adapter->num_msix_vec - 1 : 1);
1611 if (adapter->num_rx_qs != MAX_RX_QS)
1612 dev_warn(&adapter->pdev->dev,
1613 "Can create only %d RX queues", adapter->num_rx_qs);
1614
6b7c5b94 1615 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1616 for_all_rx_queues(adapter, rxo, i) {
1617 rxo->adapter = adapter;
1618 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1619 rxo->rx_eq.enable_aic = true;
1620
1621 /* EQ */
1622 eq = &rxo->rx_eq.q;
1623 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1624 sizeof(struct be_eq_entry));
1625 if (rc)
1626 goto err;
1627
1628 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1629 if (rc)
1630 goto err;
1631
ecd62107 1632 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1633
3abcdeda
SP
1634 /* CQ */
1635 cq = &rxo->cq;
1636 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1637 sizeof(struct be_eth_rx_compl));
1638 if (rc)
1639 goto err;
1640
1641 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1642 if (rc)
1643 goto err;
3abcdeda
SP
1644 /* Rx Q */
1645 q = &rxo->q;
1646 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1647 sizeof(struct be_eth_rx_d));
1648 if (rc)
1649 goto err;
1650
1651 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1652 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1653 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1654 if (rc)
1655 goto err;
1656 }
1657
1658 if (be_multi_rxq(adapter)) {
1659 u8 rsstable[MAX_RSS_QS];
1660
1661 for_all_rss_queues(adapter, rxo, i)
1662 rsstable[i] = rxo->rss_id;
1663
1664 rc = be_cmd_rss_config(adapter, rsstable,
1665 adapter->num_rx_qs - 1);
1666 if (rc)
1667 goto err;
1668 }
6b7c5b94
SP
1669
1670 return 0;
3abcdeda
SP
1671err:
1672 be_rx_queues_destroy(adapter);
1673 return -1;
6b7c5b94 1674}
6b7c5b94 1675
fe6d2a38 1676static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1677{
fe6d2a38
SP
1678 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1679 if (!eqe->evt)
1680 return false;
1681 else
1682 return true;
b628bde2
SP
1683}
1684
6b7c5b94
SP
1685static irqreturn_t be_intx(int irq, void *dev)
1686{
1687 struct be_adapter *adapter = dev;
3abcdeda 1688 struct be_rx_obj *rxo;
fe6d2a38 1689 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1690
fe6d2a38
SP
1691 if (lancer_chip(adapter)) {
1692 if (event_peek(&adapter->tx_eq))
1693 tx = event_handle(adapter, &adapter->tx_eq);
1694 for_all_rx_queues(adapter, rxo, i) {
1695 if (event_peek(&rxo->rx_eq))
1696 rx |= event_handle(adapter, &rxo->rx_eq);
1697 }
6b7c5b94 1698
fe6d2a38
SP
1699 if (!(tx || rx))
1700 return IRQ_NONE;
3abcdeda 1701
fe6d2a38
SP
1702 } else {
1703 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1704 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1705 if (!isr)
1706 return IRQ_NONE;
1707
ecd62107 1708 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1709 event_handle(adapter, &adapter->tx_eq);
1710
1711 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1712 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1713 event_handle(adapter, &rxo->rx_eq);
1714 }
3abcdeda 1715 }
c001c213 1716
8788fdc2 1717 return IRQ_HANDLED;
6b7c5b94
SP
1718}
1719
1720static irqreturn_t be_msix_rx(int irq, void *dev)
1721{
3abcdeda
SP
1722 struct be_rx_obj *rxo = dev;
1723 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1724
3abcdeda 1725 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1726
1727 return IRQ_HANDLED;
1728}
1729
5fb379ee 1730static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1731{
1732 struct be_adapter *adapter = dev;
1733
8788fdc2 1734 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1735
1736 return IRQ_HANDLED;
1737}
1738
2e588f84 1739static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1740{
2e588f84 1741 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1742}
1743
49b05221 1744static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1745{
1746 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1747 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1748 struct be_adapter *adapter = rxo->adapter;
1749 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1750 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1751 u32 work_done;
1752
3abcdeda 1753 rxo->stats.rx_polls++;
6b7c5b94 1754 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1755 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1756 if (!rxcp)
1757 break;
1758
e80d9da6 1759 /* Ignore flush completions */
009dd872 1760 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1761 if (do_gro(rxcp))
64642811
SP
1762 be_rx_compl_process_gro(adapter, rxo, rxcp);
1763 else
1764 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1765 } else if (rxcp->pkt_size == 0) {
1766 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1767 }
009dd872 1768
2e588f84 1769 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1770 }
1771
6b7c5b94 1772 /* Refill the queue */
3abcdeda 1773 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1774 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1775
1776 /* All consumed */
1777 if (work_done < budget) {
1778 napi_complete(napi);
8788fdc2 1779 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1780 } else {
1781 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1782 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1783 }
1784 return work_done;
1785}
1786
f31e50a8
SP
1787/* As TX and MCC share the same EQ check for both TX and MCC completions.
1788 * For TX/MCC we don't honour budget; consume everything
1789 */
1790static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1791{
f31e50a8
SP
1792 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1793 struct be_adapter *adapter =
1794 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1795 struct be_queue_info *txq = &adapter->tx_obj.q;
1796 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1797 struct be_eth_tx_compl *txcp;
f31e50a8 1798 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1799 u16 end_idx;
1800
5fb379ee 1801 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1802 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1803 wrb_index, txcp);
6b7c5b94 1804 be_tx_compl_process(adapter, end_idx);
f31e50a8 1805 tx_compl++;
6b7c5b94
SP
1806 }
1807
f31e50a8
SP
1808 mcc_compl = be_process_mcc(adapter, &status);
1809
1810 napi_complete(napi);
1811
1812 if (mcc_compl) {
1813 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1814 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1815 }
1816
1817 if (tx_compl) {
1818 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1819
1820 /* As Tx wrbs have been freed up, wake up netdev queue if
1821 * it was stopped due to lack of tx wrbs.
1822 */
1823 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1824 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1825 netif_wake_queue(adapter->netdev);
1826 }
1827
3abcdeda
SP
1828 tx_stats(adapter)->be_tx_events++;
1829 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1830 }
6b7c5b94
SP
1831
1832 return 1;
1833}
1834
d053de91 1835void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1836{
1837 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1838 u32 i;
1839
1840 pci_read_config_dword(adapter->pdev,
1841 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1842 pci_read_config_dword(adapter->pdev,
1843 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1844 pci_read_config_dword(adapter->pdev,
1845 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1846 pci_read_config_dword(adapter->pdev,
1847 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1848
1849 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1850 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1851
d053de91
AK
1852 if (ue_status_lo || ue_status_hi) {
1853 adapter->ue_detected = true;
7acc2087 1854 adapter->eeh_err = true;
d053de91
AK
1855 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1856 }
1857
7c185276
AK
1858 if (ue_status_lo) {
1859 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1860 if (ue_status_lo & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_low_desc[i]);
1863 }
1864 }
1865 if (ue_status_hi) {
1866 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1867 if (ue_status_hi & 1)
1868 dev_err(&adapter->pdev->dev,
1869 "UE: %s bit set\n", ue_status_hi_desc[i]);
1870 }
1871 }
1872
1873}
1874
ea1dae11
SP
1875static void be_worker(struct work_struct *work)
1876{
1877 struct be_adapter *adapter =
1878 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1879 struct be_rx_obj *rxo;
1880 int i;
ea1dae11 1881
16da8250
SP
1882 if (!adapter->ue_detected && !lancer_chip(adapter))
1883 be_detect_dump_ue(adapter);
1884
f203af70
SK
1885 /* when interrupts are not yet enabled, just reap any pending
1886 * mcc completions */
1887 if (!netif_running(adapter->netdev)) {
1888 int mcc_compl, status = 0;
1889
1890 mcc_compl = be_process_mcc(adapter, &status);
1891
1892 if (mcc_compl) {
1893 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1894 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1895 }
9b037f38 1896
f203af70
SK
1897 goto reschedule;
1898 }
1899
b2aebe6d 1900 if (!adapter->stats_cmd_sent)
3abcdeda 1901 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1902
4097f663 1903 be_tx_rate_update(adapter);
4097f663 1904
3abcdeda
SP
1905 for_all_rx_queues(adapter, rxo, i) {
1906 be_rx_rate_update(rxo);
1907 be_rx_eqd_update(adapter, rxo);
1908
1909 if (rxo->rx_post_starved) {
1910 rxo->rx_post_starved = false;
1829b086 1911 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1912 }
ea1dae11
SP
1913 }
1914
f203af70 1915reschedule:
e74fbd03 1916 adapter->work_counter++;
ea1dae11
SP
1917 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1918}
1919
8d56ff11
SP
1920static void be_msix_disable(struct be_adapter *adapter)
1921{
ac6a0c4a 1922 if (msix_enabled(adapter)) {
8d56ff11 1923 pci_disable_msix(adapter->pdev);
ac6a0c4a 1924 adapter->num_msix_vec = 0;
3abcdeda
SP
1925 }
1926}
1927
6b7c5b94
SP
1928static void be_msix_enable(struct be_adapter *adapter)
1929{
3abcdeda 1930#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 1931 int i, status, num_vec;
6b7c5b94 1932
ac6a0c4a 1933 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 1934
ac6a0c4a 1935 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
1936 adapter->msix_entries[i].entry = i;
1937
ac6a0c4a 1938 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
1939 if (status == 0) {
1940 goto done;
1941 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 1942 num_vec = status;
3abcdeda 1943 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 1944 num_vec) == 0)
3abcdeda 1945 goto done;
3abcdeda
SP
1946 }
1947 return;
1948done:
ac6a0c4a
SP
1949 adapter->num_msix_vec = num_vec;
1950 return;
6b7c5b94
SP
1951}
1952
ba343c77
SB
1953static void be_sriov_enable(struct be_adapter *adapter)
1954{
344dbf10 1955 be_check_sriov_fn_type(adapter);
6dedec81 1956#ifdef CONFIG_PCI_IOV
ba343c77 1957 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
1958 int status, pos;
1959 u16 nvfs;
1960
1961 pos = pci_find_ext_capability(adapter->pdev,
1962 PCI_EXT_CAP_ID_SRIOV);
1963 pci_read_config_word(adapter->pdev,
1964 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1965
1966 if (num_vfs > nvfs) {
1967 dev_info(&adapter->pdev->dev,
1968 "Device supports %d VFs and not %d\n",
1969 nvfs, num_vfs);
1970 num_vfs = nvfs;
1971 }
6dedec81 1972
ba343c77
SB
1973 status = pci_enable_sriov(adapter->pdev, num_vfs);
1974 adapter->sriov_enabled = status ? false : true;
1975 }
1976#endif
ba343c77
SB
1977}
1978
1979static void be_sriov_disable(struct be_adapter *adapter)
1980{
1981#ifdef CONFIG_PCI_IOV
1982 if (adapter->sriov_enabled) {
1983 pci_disable_sriov(adapter->pdev);
1984 adapter->sriov_enabled = false;
1985 }
1986#endif
1987}
1988
fe6d2a38
SP
1989static inline int be_msix_vec_get(struct be_adapter *adapter,
1990 struct be_eq_obj *eq_obj)
6b7c5b94 1991{
ecd62107 1992 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
1993}
1994
b628bde2
SP
1995static int be_request_irq(struct be_adapter *adapter,
1996 struct be_eq_obj *eq_obj,
3abcdeda 1997 void *handler, char *desc, void *context)
6b7c5b94
SP
1998{
1999 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2000 int vec;
2001
2002 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2003 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2004 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2005}
2006
3abcdeda
SP
2007static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2008 void *context)
b628bde2 2009{
fe6d2a38 2010 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2011 free_irq(vec, context);
b628bde2 2012}
6b7c5b94 2013
b628bde2
SP
2014static int be_msix_register(struct be_adapter *adapter)
2015{
3abcdeda
SP
2016 struct be_rx_obj *rxo;
2017 int status, i;
2018 char qname[10];
b628bde2 2019
3abcdeda
SP
2020 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2021 adapter);
6b7c5b94
SP
2022 if (status)
2023 goto err;
2024
3abcdeda
SP
2025 for_all_rx_queues(adapter, rxo, i) {
2026 sprintf(qname, "rxq%d", i);
2027 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2028 qname, rxo);
2029 if (status)
2030 goto err_msix;
2031 }
b628bde2 2032
6b7c5b94 2033 return 0;
b628bde2 2034
3abcdeda
SP
2035err_msix:
2036 be_free_irq(adapter, &adapter->tx_eq, adapter);
2037
2038 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2039 be_free_irq(adapter, &rxo->rx_eq, rxo);
2040
6b7c5b94
SP
2041err:
2042 dev_warn(&adapter->pdev->dev,
2043 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2044 be_msix_disable(adapter);
6b7c5b94
SP
2045 return status;
2046}
2047
2048static int be_irq_register(struct be_adapter *adapter)
2049{
2050 struct net_device *netdev = adapter->netdev;
2051 int status;
2052
ac6a0c4a 2053 if (msix_enabled(adapter)) {
6b7c5b94
SP
2054 status = be_msix_register(adapter);
2055 if (status == 0)
2056 goto done;
ba343c77
SB
2057 /* INTx is not supported for VF */
2058 if (!be_physfn(adapter))
2059 return status;
6b7c5b94
SP
2060 }
2061
2062 /* INTx */
2063 netdev->irq = adapter->pdev->irq;
2064 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2065 adapter);
2066 if (status) {
2067 dev_err(&adapter->pdev->dev,
2068 "INTx request IRQ failed - err %d\n", status);
2069 return status;
2070 }
2071done:
2072 adapter->isr_registered = true;
2073 return 0;
2074}
2075
2076static void be_irq_unregister(struct be_adapter *adapter)
2077{
2078 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2079 struct be_rx_obj *rxo;
2080 int i;
6b7c5b94
SP
2081
2082 if (!adapter->isr_registered)
2083 return;
2084
2085 /* INTx */
ac6a0c4a 2086 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2087 free_irq(netdev->irq, adapter);
2088 goto done;
2089 }
2090
2091 /* MSIx */
3abcdeda
SP
2092 be_free_irq(adapter, &adapter->tx_eq, adapter);
2093
2094 for_all_rx_queues(adapter, rxo, i)
2095 be_free_irq(adapter, &rxo->rx_eq, rxo);
2096
6b7c5b94
SP
2097done:
2098 adapter->isr_registered = false;
6b7c5b94
SP
2099}
2100
889cd4b2
SP
2101static int be_close(struct net_device *netdev)
2102{
2103 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2104 struct be_rx_obj *rxo;
889cd4b2 2105 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2106 int vec, i;
889cd4b2 2107
889cd4b2
SP
2108 be_async_mcc_disable(adapter);
2109
889cd4b2
SP
2110 netif_carrier_off(netdev);
2111 adapter->link_up = false;
2112
fe6d2a38
SP
2113 if (!lancer_chip(adapter))
2114 be_intr_set(adapter, false);
889cd4b2 2115
63fcb27f
PR
2116 for_all_rx_queues(adapter, rxo, i)
2117 napi_disable(&rxo->rx_eq.napi);
2118
2119 napi_disable(&tx_eq->napi);
2120
2121 if (lancer_chip(adapter)) {
2122 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2123 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2124 for_all_rx_queues(adapter, rxo, i)
2125 be_cq_notify(adapter, rxo->cq.id, false, 0);
2126 }
2127
ac6a0c4a 2128 if (msix_enabled(adapter)) {
fe6d2a38 2129 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2130 synchronize_irq(vec);
3abcdeda
SP
2131
2132 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2133 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2134 synchronize_irq(vec);
2135 }
889cd4b2
SP
2136 } else {
2137 synchronize_irq(netdev->irq);
2138 }
2139 be_irq_unregister(adapter);
2140
889cd4b2
SP
2141 /* Wait for all pending tx completions to arrive so that
2142 * all tx skbs are freed.
2143 */
2144 be_tx_compl_clean(adapter);
2145
2146 return 0;
2147}
2148
6b7c5b94
SP
2149static int be_open(struct net_device *netdev)
2150{
2151 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2152 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2153 struct be_rx_obj *rxo;
a8f447bd 2154 bool link_up;
3abcdeda 2155 int status, i;
0388f251
SB
2156 u8 mac_speed;
2157 u16 link_speed;
5fb379ee 2158
3abcdeda 2159 for_all_rx_queues(adapter, rxo, i) {
1829b086 2160 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2161 napi_enable(&rxo->rx_eq.napi);
2162 }
5fb379ee
SP
2163 napi_enable(&tx_eq->napi);
2164
2165 be_irq_register(adapter);
2166
fe6d2a38
SP
2167 if (!lancer_chip(adapter))
2168 be_intr_set(adapter, true);
5fb379ee
SP
2169
2170 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2171 for_all_rx_queues(adapter, rxo, i) {
2172 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2173 be_cq_notify(adapter, rxo->cq.id, true, 0);
2174 }
8788fdc2 2175 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2176
7a1e9b20
SP
2177 /* Now that interrupts are on we can process async mcc */
2178 be_async_mcc_enable(adapter);
2179
0388f251 2180 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2181 &link_speed, 0);
a8f447bd 2182 if (status)
889cd4b2 2183 goto err;
a8f447bd 2184 be_link_status_update(adapter, link_up);
5fb379ee 2185
889cd4b2 2186 if (be_physfn(adapter)) {
1da87b7f 2187 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2188 if (status)
2189 goto err;
4f2aa89c 2190
ba343c77
SB
2191 status = be_cmd_set_flow_control(adapter,
2192 adapter->tx_fc, adapter->rx_fc);
2193 if (status)
889cd4b2 2194 goto err;
ba343c77 2195 }
4f2aa89c 2196
889cd4b2
SP
2197 return 0;
2198err:
2199 be_close(adapter->netdev);
2200 return -EIO;
5fb379ee
SP
2201}
2202
71d8d1b5
AK
2203static int be_setup_wol(struct be_adapter *adapter, bool enable)
2204{
2205 struct be_dma_mem cmd;
2206 int status = 0;
2207 u8 mac[ETH_ALEN];
2208
2209 memset(mac, 0, ETH_ALEN);
2210
2211 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2212 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2213 GFP_KERNEL);
71d8d1b5
AK
2214 if (cmd.va == NULL)
2215 return -1;
2216 memset(cmd.va, 0, cmd.size);
2217
2218 if (enable) {
2219 status = pci_write_config_dword(adapter->pdev,
2220 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2221 if (status) {
2222 dev_err(&adapter->pdev->dev,
2381a55c 2223 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2224 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2225 cmd.dma);
71d8d1b5
AK
2226 return status;
2227 }
2228 status = be_cmd_enable_magic_wol(adapter,
2229 adapter->netdev->dev_addr, &cmd);
2230 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2231 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2232 } else {
2233 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2234 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2235 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2236 }
2237
2b7bcebf 2238 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2239 return status;
2240}
2241
6d87f5c3
AK
2242/*
2243 * Generate a seed MAC address from the PF MAC Address using jhash.
2244 * MAC Address for VFs are assigned incrementally starting from the seed.
2245 * These addresses are programmed in the ASIC by the PF and the VF driver
2246 * queries for the MAC address during its probe.
2247 */
2248static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2249{
2250 u32 vf = 0;
3abcdeda 2251 int status = 0;
6d87f5c3
AK
2252 u8 mac[ETH_ALEN];
2253
2254 be_vf_eth_addr_generate(adapter, mac);
2255
2256 for (vf = 0; vf < num_vfs; vf++) {
2257 status = be_cmd_pmac_add(adapter, mac,
2258 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2259 &adapter->vf_cfg[vf].vf_pmac_id,
2260 vf + 1);
6d87f5c3
AK
2261 if (status)
2262 dev_err(&adapter->pdev->dev,
2263 "Mac address add failed for VF %d\n", vf);
2264 else
2265 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2266
2267 mac[5] += 1;
2268 }
2269 return status;
2270}
2271
2272static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2273{
2274 u32 vf;
2275
2276 for (vf = 0; vf < num_vfs; vf++) {
2277 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2278 be_cmd_pmac_del(adapter,
2279 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2280 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2281 }
2282}
2283
5fb379ee
SP
2284static int be_setup(struct be_adapter *adapter)
2285{
5fb379ee 2286 struct net_device *netdev = adapter->netdev;
ba343c77 2287 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2288 int status;
ba343c77
SB
2289 u8 mac[ETH_ALEN];
2290
f21b538c
PR
2291 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2292 BE_IF_FLAGS_BROADCAST |
2293 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2294
ba343c77
SB
2295 if (be_physfn(adapter)) {
2296 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2297 BE_IF_FLAGS_PROMISCUOUS |
2298 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2299 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2300
ac6a0c4a 2301 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2302 cap_flags |= BE_IF_FLAGS_RSS;
2303 en_flags |= BE_IF_FLAGS_RSS;
2304 }
ba343c77 2305 }
73d540f2
SP
2306
2307 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2308 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2309 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2310 if (status != 0)
2311 goto do_none;
2312
ba343c77 2313 if (be_physfn(adapter)) {
c99ac3e7
AK
2314 if (adapter->sriov_enabled) {
2315 while (vf < num_vfs) {
2316 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2317 BE_IF_FLAGS_BROADCAST;
2318 status = be_cmd_if_create(adapter, cap_flags,
2319 en_flags, mac, true,
64600ea5 2320 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2321 NULL, vf+1);
c99ac3e7
AK
2322 if (status) {
2323 dev_err(&adapter->pdev->dev,
2324 "Interface Create failed for VF %d\n",
2325 vf);
2326 goto if_destroy;
2327 }
2328 adapter->vf_cfg[vf].vf_pmac_id =
2329 BE_INVALID_PMAC_ID;
2330 vf++;
ba343c77 2331 }
84e5b9f7 2332 }
c99ac3e7 2333 } else {
ba343c77
SB
2334 status = be_cmd_mac_addr_query(adapter, mac,
2335 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2336 if (!status) {
2337 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2338 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2339 }
2340 }
2341
6b7c5b94
SP
2342 status = be_tx_queues_create(adapter);
2343 if (status != 0)
2344 goto if_destroy;
2345
2346 status = be_rx_queues_create(adapter);
2347 if (status != 0)
2348 goto tx_qs_destroy;
2349
5fb379ee
SP
2350 status = be_mcc_queues_create(adapter);
2351 if (status != 0)
2352 goto rx_qs_destroy;
6b7c5b94 2353
0dffc83e
AK
2354 adapter->link_speed = -1;
2355
6b7c5b94
SP
2356 return 0;
2357
5fb379ee
SP
2358rx_qs_destroy:
2359 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2360tx_qs_destroy:
2361 be_tx_queues_destroy(adapter);
2362if_destroy:
c99ac3e7
AK
2363 if (be_physfn(adapter) && adapter->sriov_enabled)
2364 for (vf = 0; vf < num_vfs; vf++)
2365 if (adapter->vf_cfg[vf].vf_if_handle)
2366 be_cmd_if_destroy(adapter,
658681f7
AK
2367 adapter->vf_cfg[vf].vf_if_handle,
2368 vf + 1);
2369 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2370do_none:
2371 return status;
2372}
2373
5fb379ee
SP
2374static int be_clear(struct be_adapter *adapter)
2375{
7ab8b0b4
AK
2376 int vf;
2377
c99ac3e7 2378 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2379 be_vf_eth_addr_rem(adapter);
2380
1a8887d8 2381 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2382 be_rx_queues_destroy(adapter);
2383 be_tx_queues_destroy(adapter);
1f5db833 2384 adapter->eq_next_idx = 0;
5fb379ee 2385
7ab8b0b4
AK
2386 if (be_physfn(adapter) && adapter->sriov_enabled)
2387 for (vf = 0; vf < num_vfs; vf++)
2388 if (adapter->vf_cfg[vf].vf_if_handle)
2389 be_cmd_if_destroy(adapter,
2390 adapter->vf_cfg[vf].vf_if_handle,
2391 vf + 1);
2392
658681f7 2393 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2394
2243e2e9
SP
2395 /* tell fw we're done with firing cmds */
2396 be_cmd_fw_clean(adapter);
5fb379ee
SP
2397 return 0;
2398}
2399
6b7c5b94 2400
84517482 2401#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2402static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2403 const u8 *p, u32 img_start, int image_size,
2404 int hdr_size)
fa9a6fed
SB
2405{
2406 u32 crc_offset;
2407 u8 flashed_crc[4];
2408 int status;
3f0d4560
AK
2409
2410 crc_offset = hdr_size + img_start + image_size - 4;
2411
fa9a6fed 2412 p += crc_offset;
3f0d4560
AK
2413
2414 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2415 (image_size - 4));
fa9a6fed
SB
2416 if (status) {
2417 dev_err(&adapter->pdev->dev,
2418 "could not get crc from flash, not flashing redboot\n");
2419 return false;
2420 }
2421
2422 /*update redboot only if crc does not match*/
2423 if (!memcmp(flashed_crc, p, 4))
2424 return false;
2425 else
2426 return true;
fa9a6fed
SB
2427}
2428
3f0d4560 2429static int be_flash_data(struct be_adapter *adapter,
84517482 2430 const struct firmware *fw,
3f0d4560
AK
2431 struct be_dma_mem *flash_cmd, int num_of_images)
2432
84517482 2433{
3f0d4560
AK
2434 int status = 0, i, filehdr_size = 0;
2435 u32 total_bytes = 0, flash_op;
84517482
AK
2436 int num_bytes;
2437 const u8 *p = fw->data;
2438 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2439 const struct flash_comp *pflashcomp;
9fe96934 2440 int num_comp;
3f0d4560 2441
215faf9c 2442 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2443 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2444 FLASH_IMAGE_MAX_SIZE_g3},
2445 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2446 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2447 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2448 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2449 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2450 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2451 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2452 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2453 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2454 FLASH_IMAGE_MAX_SIZE_g3},
2455 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2456 FLASH_IMAGE_MAX_SIZE_g3},
2457 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2458 FLASH_IMAGE_MAX_SIZE_g3},
2459 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2460 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2461 };
215faf9c 2462 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2463 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2464 FLASH_IMAGE_MAX_SIZE_g2},
2465 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2466 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2467 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2468 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2469 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2470 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2471 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2472 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2473 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2474 FLASH_IMAGE_MAX_SIZE_g2},
2475 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2476 FLASH_IMAGE_MAX_SIZE_g2},
2477 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2478 FLASH_IMAGE_MAX_SIZE_g2}
2479 };
2480
2481 if (adapter->generation == BE_GEN3) {
2482 pflashcomp = gen3_flash_types;
2483 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2484 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2485 } else {
2486 pflashcomp = gen2_flash_types;
2487 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2488 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2489 }
9fe96934
SB
2490 for (i = 0; i < num_comp; i++) {
2491 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2492 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2493 continue;
3f0d4560
AK
2494 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2495 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2496 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2497 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2498 continue;
2499 p = fw->data;
2500 p += filehdr_size + pflashcomp[i].offset
2501 + (num_of_images * sizeof(struct image_hdr));
2502 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2503 return -1;
3f0d4560
AK
2504 total_bytes = pflashcomp[i].size;
2505 while (total_bytes) {
2506 if (total_bytes > 32*1024)
2507 num_bytes = 32*1024;
2508 else
2509 num_bytes = total_bytes;
2510 total_bytes -= num_bytes;
2511
2512 if (!total_bytes)
2513 flash_op = FLASHROM_OPER_FLASH;
2514 else
2515 flash_op = FLASHROM_OPER_SAVE;
2516 memcpy(req->params.data_buf, p, num_bytes);
2517 p += num_bytes;
2518 status = be_cmd_write_flashrom(adapter, flash_cmd,
2519 pflashcomp[i].optype, flash_op, num_bytes);
2520 if (status) {
2521 dev_err(&adapter->pdev->dev,
2522 "cmd to write to flash rom failed.\n");
2523 return -1;
2524 }
2525 yield();
84517482 2526 }
84517482 2527 }
84517482
AK
2528 return 0;
2529}
2530
3f0d4560
AK
2531static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2532{
2533 if (fhdr == NULL)
2534 return 0;
2535 if (fhdr->build[0] == '3')
2536 return BE_GEN3;
2537 else if (fhdr->build[0] == '2')
2538 return BE_GEN2;
2539 else
2540 return 0;
2541}
2542
84517482
AK
2543int be_load_fw(struct be_adapter *adapter, u8 *func)
2544{
2545 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2546 const struct firmware *fw;
3f0d4560
AK
2547 struct flash_file_hdr_g2 *fhdr;
2548 struct flash_file_hdr_g3 *fhdr3;
2549 struct image_hdr *img_hdr_ptr = NULL;
84517482 2550 struct be_dma_mem flash_cmd;
8b93b710 2551 int status, i = 0, num_imgs = 0;
84517482 2552 const u8 *p;
84517482 2553
d9efd2af
SB
2554 if (!netif_running(adapter->netdev)) {
2555 dev_err(&adapter->pdev->dev,
2556 "Firmware load not allowed (interface is down)\n");
2557 return -EPERM;
2558 }
2559
84517482
AK
2560 strcpy(fw_file, func);
2561
2562 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2563 if (status)
2564 goto fw_exit;
2565
2566 p = fw->data;
3f0d4560 2567 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2568 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2569
84517482 2570 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2571 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2572 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2573 if (!flash_cmd.va) {
2574 status = -ENOMEM;
2575 dev_err(&adapter->pdev->dev,
2576 "Memory allocation failure while flashing\n");
2577 goto fw_exit;
2578 }
2579
3f0d4560
AK
2580 if ((adapter->generation == BE_GEN3) &&
2581 (get_ufigen_type(fhdr) == BE_GEN3)) {
2582 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2583 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2584 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2585 img_hdr_ptr = (struct image_hdr *) (fw->data +
2586 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2587 i * sizeof(struct image_hdr)));
2588 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2589 status = be_flash_data(adapter, fw, &flash_cmd,
2590 num_imgs);
3f0d4560
AK
2591 }
2592 } else if ((adapter->generation == BE_GEN2) &&
2593 (get_ufigen_type(fhdr) == BE_GEN2)) {
2594 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2595 } else {
2596 dev_err(&adapter->pdev->dev,
2597 "UFI and Interface are not compatible for flashing\n");
2598 status = -1;
84517482
AK
2599 }
2600
2b7bcebf
IV
2601 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2602 flash_cmd.dma);
84517482
AK
2603 if (status) {
2604 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2605 goto fw_exit;
2606 }
2607
af901ca1 2608 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2609
2610fw_exit:
2611 release_firmware(fw);
2612 return status;
2613}
2614
6b7c5b94
SP
2615static struct net_device_ops be_netdev_ops = {
2616 .ndo_open = be_open,
2617 .ndo_stop = be_close,
2618 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2619 .ndo_set_rx_mode = be_set_multicast_list,
2620 .ndo_set_mac_address = be_mac_addr_set,
2621 .ndo_change_mtu = be_change_mtu,
2622 .ndo_validate_addr = eth_validate_addr,
2623 .ndo_vlan_rx_register = be_vlan_register,
2624 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2625 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2626 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2627 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2628 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2629 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2630};
2631
2632static void be_netdev_init(struct net_device *netdev)
2633{
2634 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2635 struct be_rx_obj *rxo;
2636 int i;
6b7c5b94 2637
6332c8d3 2638 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2640 NETIF_F_HW_VLAN_TX;
2641 if (be_multi_rxq(adapter))
2642 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2643
2644 netdev->features |= netdev->hw_features |
8b8ddc68 2645 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2646
79032644
MM
2647 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2648 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2649
fe6d2a38
SP
2650 if (lancer_chip(adapter))
2651 netdev->vlan_features |= NETIF_F_TSO6;
2652
6b7c5b94
SP
2653 netdev->flags |= IFF_MULTICAST;
2654
9e90c961
AK
2655 /* Default settings for Rx and Tx flow control */
2656 adapter->rx_fc = true;
2657 adapter->tx_fc = true;
2658
c190e3c8
AK
2659 netif_set_gso_max_size(netdev, 65535);
2660
6b7c5b94
SP
2661 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2662
2663 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2664
3abcdeda
SP
2665 for_all_rx_queues(adapter, rxo, i)
2666 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2667 BE_NAPI_WEIGHT);
2668
5fb379ee 2669 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2670 BE_NAPI_WEIGHT);
6b7c5b94
SP
2671}
2672
2673static void be_unmap_pci_bars(struct be_adapter *adapter)
2674{
8788fdc2
SP
2675 if (adapter->csr)
2676 iounmap(adapter->csr);
2677 if (adapter->db)
2678 iounmap(adapter->db);
ba343c77 2679 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2680 iounmap(adapter->pcicfg);
6b7c5b94
SP
2681}
2682
2683static int be_map_pci_bars(struct be_adapter *adapter)
2684{
2685 u8 __iomem *addr;
ba343c77 2686 int pcicfg_reg, db_reg;
6b7c5b94 2687
fe6d2a38
SP
2688 if (lancer_chip(adapter)) {
2689 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2690 pci_resource_len(adapter->pdev, 0));
2691 if (addr == NULL)
2692 return -ENOMEM;
2693 adapter->db = addr;
2694 return 0;
2695 }
2696
ba343c77
SB
2697 if (be_physfn(adapter)) {
2698 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2699 pci_resource_len(adapter->pdev, 2));
2700 if (addr == NULL)
2701 return -ENOMEM;
2702 adapter->csr = addr;
2703 }
6b7c5b94 2704
ba343c77 2705 if (adapter->generation == BE_GEN2) {
7b139c83 2706 pcicfg_reg = 1;
ba343c77
SB
2707 db_reg = 4;
2708 } else {
7b139c83 2709 pcicfg_reg = 0;
ba343c77
SB
2710 if (be_physfn(adapter))
2711 db_reg = 4;
2712 else
2713 db_reg = 0;
2714 }
2715 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2716 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2717 if (addr == NULL)
2718 goto pci_map_err;
ba343c77
SB
2719 adapter->db = addr;
2720
2721 if (be_physfn(adapter)) {
2722 addr = ioremap_nocache(
2723 pci_resource_start(adapter->pdev, pcicfg_reg),
2724 pci_resource_len(adapter->pdev, pcicfg_reg));
2725 if (addr == NULL)
2726 goto pci_map_err;
2727 adapter->pcicfg = addr;
2728 } else
2729 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2730
2731 return 0;
2732pci_map_err:
2733 be_unmap_pci_bars(adapter);
2734 return -ENOMEM;
2735}
2736
2737
2738static void be_ctrl_cleanup(struct be_adapter *adapter)
2739{
8788fdc2 2740 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2741
2742 be_unmap_pci_bars(adapter);
2743
2744 if (mem->va)
2b7bcebf
IV
2745 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2746 mem->dma);
e7b909a6
SP
2747
2748 mem = &adapter->mc_cmd_mem;
2749 if (mem->va)
2b7bcebf
IV
2750 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2751 mem->dma);
6b7c5b94
SP
2752}
2753
6b7c5b94
SP
2754static int be_ctrl_init(struct be_adapter *adapter)
2755{
8788fdc2
SP
2756 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2757 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2758 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2759 int status;
6b7c5b94
SP
2760
2761 status = be_map_pci_bars(adapter);
2762 if (status)
e7b909a6 2763 goto done;
6b7c5b94
SP
2764
2765 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2766 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2767 mbox_mem_alloc->size,
2768 &mbox_mem_alloc->dma,
2769 GFP_KERNEL);
6b7c5b94 2770 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2771 status = -ENOMEM;
2772 goto unmap_pci_bars;
6b7c5b94 2773 }
e7b909a6 2774
6b7c5b94
SP
2775 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2776 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2778 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2779
2780 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
2781 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2782 mc_cmd_mem->size, &mc_cmd_mem->dma,
2783 GFP_KERNEL);
e7b909a6
SP
2784 if (mc_cmd_mem->va == NULL) {
2785 status = -ENOMEM;
2786 goto free_mbox;
2787 }
2788 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2789
2984961c 2790 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2791 spin_lock_init(&adapter->mcc_lock);
2792 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2793
dd131e76 2794 init_completion(&adapter->flash_compl);
cf588477 2795 pci_save_state(adapter->pdev);
6b7c5b94 2796 return 0;
e7b909a6
SP
2797
2798free_mbox:
2b7bcebf
IV
2799 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2800 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
2801
2802unmap_pci_bars:
2803 be_unmap_pci_bars(adapter);
2804
2805done:
2806 return status;
6b7c5b94
SP
2807}
2808
2809static void be_stats_cleanup(struct be_adapter *adapter)
2810{
3abcdeda 2811 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2812
2813 if (cmd->va)
2b7bcebf
IV
2814 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2815 cmd->va, cmd->dma);
6b7c5b94
SP
2816}
2817
2818static int be_stats_init(struct be_adapter *adapter)
2819{
3abcdeda 2820 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2821
2822 cmd->size = sizeof(struct be_cmd_req_get_stats);
2b7bcebf
IV
2823 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2824 GFP_KERNEL);
6b7c5b94
SP
2825 if (cmd->va == NULL)
2826 return -1;
d291b9af 2827 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2828 return 0;
2829}
2830
2831static void __devexit be_remove(struct pci_dev *pdev)
2832{
2833 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2834
6b7c5b94
SP
2835 if (!adapter)
2836 return;
2837
f203af70
SK
2838 cancel_delayed_work_sync(&adapter->work);
2839
6b7c5b94
SP
2840 unregister_netdev(adapter->netdev);
2841
5fb379ee
SP
2842 be_clear(adapter);
2843
6b7c5b94
SP
2844 be_stats_cleanup(adapter);
2845
2846 be_ctrl_cleanup(adapter);
2847
48f5a191 2848 kfree(adapter->vf_cfg);
ba343c77
SB
2849 be_sriov_disable(adapter);
2850
8d56ff11 2851 be_msix_disable(adapter);
6b7c5b94
SP
2852
2853 pci_set_drvdata(pdev, NULL);
2854 pci_release_regions(pdev);
2855 pci_disable_device(pdev);
2856
2857 free_netdev(adapter->netdev);
2858}
2859
2243e2e9 2860static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2861{
6b7c5b94 2862 int status;
2243e2e9 2863 u8 mac[ETH_ALEN];
6b7c5b94 2864
2243e2e9 2865 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2866 if (status)
2867 return status;
2868
3abcdeda
SP
2869 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2870 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2871 if (status)
2872 return status;
2873
2243e2e9 2874 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2875
2876 if (be_physfn(adapter)) {
2877 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2878 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2879
ba343c77
SB
2880 if (status)
2881 return status;
ca9e4988 2882
ba343c77
SB
2883 if (!is_valid_ether_addr(mac))
2884 return -EADDRNOTAVAIL;
2885
2886 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2887 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2888 }
6b7c5b94 2889
3486be29 2890 if (adapter->function_mode & 0x400)
82903e4b
AK
2891 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2892 else
2893 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2894
9e1453c5
AK
2895 status = be_cmd_get_cntl_attributes(adapter);
2896 if (status)
2897 return status;
2898
2e588f84 2899 be_cmd_check_native_mode(adapter);
2243e2e9 2900 return 0;
6b7c5b94
SP
2901}
2902
fe6d2a38
SP
2903static int be_dev_family_check(struct be_adapter *adapter)
2904{
2905 struct pci_dev *pdev = adapter->pdev;
2906 u32 sli_intf = 0, if_type;
2907
2908 switch (pdev->device) {
2909 case BE_DEVICE_ID1:
2910 case OC_DEVICE_ID1:
2911 adapter->generation = BE_GEN2;
2912 break;
2913 case BE_DEVICE_ID2:
2914 case OC_DEVICE_ID2:
2915 adapter->generation = BE_GEN3;
2916 break;
2917 case OC_DEVICE_ID3:
2918 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2919 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2920 SLI_INTF_IF_TYPE_SHIFT;
2921
2922 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2923 if_type != 0x02) {
2924 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2925 return -EINVAL;
2926 }
2927 if (num_vfs > 0) {
2928 dev_err(&pdev->dev, "VFs not supported\n");
2929 return -EINVAL;
2930 }
2931 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2932 SLI_INTF_FAMILY_SHIFT);
2933 adapter->generation = BE_GEN3;
2934 break;
2935 default:
2936 adapter->generation = 0;
2937 }
2938 return 0;
2939}
2940
37eed1cb
PR
2941static int lancer_wait_ready(struct be_adapter *adapter)
2942{
2943#define SLIPORT_READY_TIMEOUT 500
2944 u32 sliport_status;
2945 int status = 0, i;
2946
2947 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2948 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2949 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2950 break;
2951
2952 msleep(20);
2953 }
2954
2955 if (i == SLIPORT_READY_TIMEOUT)
2956 status = -1;
2957
2958 return status;
2959}
2960
2961static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2962{
2963 int status;
2964 u32 sliport_status, err, reset_needed;
2965 status = lancer_wait_ready(adapter);
2966 if (!status) {
2967 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2968 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2969 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2970 if (err && reset_needed) {
2971 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2972 adapter->db + SLIPORT_CONTROL_OFFSET);
2973
2974 /* check adapter has corrected the error */
2975 status = lancer_wait_ready(adapter);
2976 sliport_status = ioread32(adapter->db +
2977 SLIPORT_STATUS_OFFSET);
2978 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2979 SLIPORT_STATUS_RN_MASK);
2980 if (status || sliport_status)
2981 status = -1;
2982 } else if (err || reset_needed) {
2983 status = -1;
2984 }
2985 }
2986 return status;
2987}
2988
6b7c5b94
SP
2989static int __devinit be_probe(struct pci_dev *pdev,
2990 const struct pci_device_id *pdev_id)
2991{
2992 int status = 0;
2993 struct be_adapter *adapter;
2994 struct net_device *netdev;
6b7c5b94
SP
2995
2996 status = pci_enable_device(pdev);
2997 if (status)
2998 goto do_none;
2999
3000 status = pci_request_regions(pdev, DRV_NAME);
3001 if (status)
3002 goto disable_dev;
3003 pci_set_master(pdev);
3004
3005 netdev = alloc_etherdev(sizeof(struct be_adapter));
3006 if (netdev == NULL) {
3007 status = -ENOMEM;
3008 goto rel_reg;
3009 }
3010 adapter = netdev_priv(netdev);
3011 adapter->pdev = pdev;
3012 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3013
3014 status = be_dev_family_check(adapter);
63657b9c 3015 if (status)
fe6d2a38
SP
3016 goto free_netdev;
3017
6b7c5b94 3018 adapter->netdev = netdev;
2243e2e9 3019 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3020
2b7bcebf 3021 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3022 if (!status) {
3023 netdev->features |= NETIF_F_HIGHDMA;
3024 } else {
2b7bcebf 3025 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3026 if (status) {
3027 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3028 goto free_netdev;
3029 }
3030 }
3031
ba343c77 3032 be_sriov_enable(adapter);
48f5a191
AK
3033 if (adapter->sriov_enabled) {
3034 adapter->vf_cfg = kcalloc(num_vfs,
3035 sizeof(struct be_vf_cfg), GFP_KERNEL);
3036
3037 if (!adapter->vf_cfg)
3038 goto free_netdev;
3039 }
ba343c77 3040
6b7c5b94
SP
3041 status = be_ctrl_init(adapter);
3042 if (status)
48f5a191 3043 goto free_vf_cfg;
6b7c5b94 3044
37eed1cb
PR
3045 if (lancer_chip(adapter)) {
3046 status = lancer_test_and_set_rdy_state(adapter);
3047 if (status) {
3048 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3049 goto ctrl_clean;
37eed1cb
PR
3050 }
3051 }
3052
2243e2e9 3053 /* sync up with fw's ready state */
ba343c77
SB
3054 if (be_physfn(adapter)) {
3055 status = be_cmd_POST(adapter);
3056 if (status)
3057 goto ctrl_clean;
ba343c77 3058 }
6b7c5b94 3059
2243e2e9
SP
3060 /* tell fw we're ready to fire cmds */
3061 status = be_cmd_fw_init(adapter);
6b7c5b94 3062 if (status)
2243e2e9
SP
3063 goto ctrl_clean;
3064
a4b4dfab
AK
3065 status = be_cmd_reset_function(adapter);
3066 if (status)
3067 goto ctrl_clean;
556ae191 3068
2243e2e9
SP
3069 status = be_stats_init(adapter);
3070 if (status)
3071 goto ctrl_clean;
3072
3073 status = be_get_config(adapter);
6b7c5b94
SP
3074 if (status)
3075 goto stats_clean;
6b7c5b94 3076
3abcdeda
SP
3077 be_msix_enable(adapter);
3078
6b7c5b94 3079 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3080
5fb379ee
SP
3081 status = be_setup(adapter);
3082 if (status)
3abcdeda 3083 goto msix_disable;
2243e2e9 3084
3abcdeda 3085 be_netdev_init(netdev);
6b7c5b94
SP
3086 status = register_netdev(netdev);
3087 if (status != 0)
5fb379ee 3088 goto unsetup;
63a76944 3089 netif_carrier_off(netdev);
6b7c5b94 3090
e6319365 3091 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3092 u8 mac_speed;
3093 bool link_up;
3094 u16 vf, lnk_speed;
3095
e6319365
AK
3096 status = be_vf_eth_addr_config(adapter);
3097 if (status)
3098 goto unreg_netdev;
d0381c42
AK
3099
3100 for (vf = 0; vf < num_vfs; vf++) {
3101 status = be_cmd_link_status_query(adapter, &link_up,
3102 &mac_speed, &lnk_speed, vf + 1);
3103 if (!status)
3104 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3105 else
3106 goto unreg_netdev;
3107 }
e6319365
AK
3108 }
3109
c4ca2374 3110 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3111 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3112 return 0;
3113
e6319365
AK
3114unreg_netdev:
3115 unregister_netdev(netdev);
5fb379ee
SP
3116unsetup:
3117 be_clear(adapter);
3abcdeda
SP
3118msix_disable:
3119 be_msix_disable(adapter);
6b7c5b94
SP
3120stats_clean:
3121 be_stats_cleanup(adapter);
3122ctrl_clean:
3123 be_ctrl_cleanup(adapter);
48f5a191
AK
3124free_vf_cfg:
3125 kfree(adapter->vf_cfg);
6b7c5b94 3126free_netdev:
ba343c77 3127 be_sriov_disable(adapter);
fe6d2a38 3128 free_netdev(netdev);
8d56ff11 3129 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3130rel_reg:
3131 pci_release_regions(pdev);
3132disable_dev:
3133 pci_disable_device(pdev);
3134do_none:
c4ca2374 3135 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3136 return status;
3137}
3138
3139static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3140{
3141 struct be_adapter *adapter = pci_get_drvdata(pdev);
3142 struct net_device *netdev = adapter->netdev;
3143
a4ca055f 3144 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3145 if (adapter->wol)
3146 be_setup_wol(adapter, true);
3147
6b7c5b94
SP
3148 netif_device_detach(netdev);
3149 if (netif_running(netdev)) {
3150 rtnl_lock();
3151 be_close(netdev);
3152 rtnl_unlock();
3153 }
9e90c961 3154 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3155 be_clear(adapter);
6b7c5b94 3156
a4ca055f 3157 be_msix_disable(adapter);
6b7c5b94
SP
3158 pci_save_state(pdev);
3159 pci_disable_device(pdev);
3160 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3161 return 0;
3162}
3163
3164static int be_resume(struct pci_dev *pdev)
3165{
3166 int status = 0;
3167 struct be_adapter *adapter = pci_get_drvdata(pdev);
3168 struct net_device *netdev = adapter->netdev;
3169
3170 netif_device_detach(netdev);
3171
3172 status = pci_enable_device(pdev);
3173 if (status)
3174 return status;
3175
3176 pci_set_power_state(pdev, 0);
3177 pci_restore_state(pdev);
3178
a4ca055f 3179 be_msix_enable(adapter);
2243e2e9
SP
3180 /* tell fw we're ready to fire cmds */
3181 status = be_cmd_fw_init(adapter);
3182 if (status)
3183 return status;
3184
9b0365f1 3185 be_setup(adapter);
6b7c5b94
SP
3186 if (netif_running(netdev)) {
3187 rtnl_lock();
3188 be_open(netdev);
3189 rtnl_unlock();
3190 }
3191 netif_device_attach(netdev);
71d8d1b5
AK
3192
3193 if (adapter->wol)
3194 be_setup_wol(adapter, false);
a4ca055f
AK
3195
3196 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3197 return 0;
3198}
3199
82456b03
SP
3200/*
3201 * An FLR will stop BE from DMAing any data.
3202 */
3203static void be_shutdown(struct pci_dev *pdev)
3204{
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3206
2d5d4154
AK
3207 if (!adapter)
3208 return;
82456b03 3209
0f4a6828 3210 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3211
2d5d4154 3212 netif_device_detach(adapter->netdev);
82456b03 3213
82456b03
SP
3214 if (adapter->wol)
3215 be_setup_wol(adapter, true);
3216
57841869
AK
3217 be_cmd_reset_function(adapter);
3218
82456b03 3219 pci_disable_device(pdev);
82456b03
SP
3220}
3221
cf588477
SP
3222static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3223 pci_channel_state_t state)
3224{
3225 struct be_adapter *adapter = pci_get_drvdata(pdev);
3226 struct net_device *netdev = adapter->netdev;
3227
3228 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3229
3230 adapter->eeh_err = true;
3231
3232 netif_device_detach(netdev);
3233
3234 if (netif_running(netdev)) {
3235 rtnl_lock();
3236 be_close(netdev);
3237 rtnl_unlock();
3238 }
3239 be_clear(adapter);
3240
3241 if (state == pci_channel_io_perm_failure)
3242 return PCI_ERS_RESULT_DISCONNECT;
3243
3244 pci_disable_device(pdev);
3245
3246 return PCI_ERS_RESULT_NEED_RESET;
3247}
3248
3249static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3250{
3251 struct be_adapter *adapter = pci_get_drvdata(pdev);
3252 int status;
3253
3254 dev_info(&adapter->pdev->dev, "EEH reset\n");
3255 adapter->eeh_err = false;
3256
3257 status = pci_enable_device(pdev);
3258 if (status)
3259 return PCI_ERS_RESULT_DISCONNECT;
3260
3261 pci_set_master(pdev);
3262 pci_set_power_state(pdev, 0);
3263 pci_restore_state(pdev);
3264
3265 /* Check if card is ok and fw is ready */
3266 status = be_cmd_POST(adapter);
3267 if (status)
3268 return PCI_ERS_RESULT_DISCONNECT;
3269
3270 return PCI_ERS_RESULT_RECOVERED;
3271}
3272
3273static void be_eeh_resume(struct pci_dev *pdev)
3274{
3275 int status = 0;
3276 struct be_adapter *adapter = pci_get_drvdata(pdev);
3277 struct net_device *netdev = adapter->netdev;
3278
3279 dev_info(&adapter->pdev->dev, "EEH resume\n");
3280
3281 pci_save_state(pdev);
3282
3283 /* tell fw we're ready to fire cmds */
3284 status = be_cmd_fw_init(adapter);
3285 if (status)
3286 goto err;
3287
3288 status = be_setup(adapter);
3289 if (status)
3290 goto err;
3291
3292 if (netif_running(netdev)) {
3293 status = be_open(netdev);
3294 if (status)
3295 goto err;
3296 }
3297 netif_device_attach(netdev);
3298 return;
3299err:
3300 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3301}
3302
3303static struct pci_error_handlers be_eeh_handlers = {
3304 .error_detected = be_eeh_err_detected,
3305 .slot_reset = be_eeh_reset,
3306 .resume = be_eeh_resume,
3307};
3308
6b7c5b94
SP
3309static struct pci_driver be_driver = {
3310 .name = DRV_NAME,
3311 .id_table = be_dev_ids,
3312 .probe = be_probe,
3313 .remove = be_remove,
3314 .suspend = be_suspend,
cf588477 3315 .resume = be_resume,
82456b03 3316 .shutdown = be_shutdown,
cf588477 3317 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3318};
3319
3320static int __init be_init_module(void)
3321{
8e95a202
JP
3322 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3323 rx_frag_size != 2048) {
6b7c5b94
SP
3324 printk(KERN_WARNING DRV_NAME
3325 " : Module param rx_frag_size must be 2048/4096/8192."
3326 " Using 2048\n");
3327 rx_frag_size = 2048;
3328 }
6b7c5b94
SP
3329
3330 return pci_register_driver(&be_driver);
3331}
3332module_init(be_init_module);
3333
3334static void __exit be_exit_module(void)
3335{
3336 pci_unregister_driver(&be_driver);
3337}
3338module_exit(be_exit_module);
This page took 0.482153 seconds and 5 git commands to generate.