drivers/net/*/: Use static const
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
294aedcf 2 * Copyright (C) 2005 - 2010 ServerEngines
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
6b7c5b94 30module_param(rx_frag_size, uint, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94 118
3abcdeda
SP
119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
6b7c5b94
SP
124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size,
129 mem->va, mem->dma);
130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
142 if (!mem->va)
143 return -1;
144 memset(mem->va, 0, mem->size);
145 return 0;
146}
147
8788fdc2 148static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 149{
8788fdc2 150 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
151 u32 reg = ioread32(addr);
152 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
5f0b849e 157 if (!enabled && enable)
6b7c5b94 158 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 159 else if (enabled && !enable)
6b7c5b94 160 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else
6b7c5b94 162 return;
5f0b849e 163
6b7c5b94
SP
164 iowrite32(reg, addr);
165}
166
8788fdc2 167static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
168{
169 u32 val = 0;
170 val |= qid & DB_RQ_RING_ID_MASK;
171 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
172
173 wmb();
8788fdc2 174 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
175}
176
8788fdc2 177static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
178{
179 u32 val = 0;
180 val |= qid & DB_TXULP_RING_ID_MASK;
181 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
182
183 wmb();
8788fdc2 184 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
185}
186
8788fdc2 187static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
188 bool arm, bool clear_int, u16 num_popped)
189{
190 u32 val = 0;
191 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
194
195 if (adapter->eeh_err)
196 return;
197
6b7c5b94
SP
198 if (arm)
199 val |= 1 << DB_EQ_REARM_SHIFT;
200 if (clear_int)
201 val |= 1 << DB_EQ_CLR_SHIFT;
202 val |= 1 << DB_EQ_EVNT_SHIFT;
203 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 204 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
205}
206
8788fdc2 207void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
208{
209 u32 val = 0;
210 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
213
214 if (adapter->eeh_err)
215 return;
216
6b7c5b94
SP
217 if (arm)
218 val |= 1 << DB_CQ_REARM_SHIFT;
219 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 220 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
221}
222
6b7c5b94
SP
223static int be_mac_addr_set(struct net_device *netdev, void *p)
224{
225 struct be_adapter *adapter = netdev_priv(netdev);
226 struct sockaddr *addr = p;
227 int status = 0;
228
ca9e4988
AK
229 if (!is_valid_ether_addr(addr->sa_data))
230 return -EADDRNOTAVAIL;
231
ba343c77
SB
232 /* MAC addr configuration will be done in hardware for VFs
233 * by their corresponding PFs. Just copy to netdev addr here
234 */
235 if (!be_physfn(adapter))
236 goto netdev_addr;
237
a65027e4
SP
238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
239 if (status)
240 return status;
6b7c5b94 241
a65027e4
SP
242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id);
ba343c77 244netdev_addr:
6b7c5b94
SP
245 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
247
248 return status;
249}
250
b31c50a7 251void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 252{
3abcdeda 253 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
254 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
255 struct be_port_rxf_stats *port_stats =
256 &rxf_stats->port[adapter->port_num];
78122a52 257 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 258 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
259 struct be_rx_obj *rxo;
260 int i;
6b7c5b94 261
3abcdeda
SP
262 memset(dev_stats, 0, sizeof(*dev_stats));
263 for_all_rx_queues(adapter, rxo, i) {
264 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
265 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
266 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
267 /* no space in linux buffers: best possible approximation */
268 dev_stats->rx_dropped +=
269 erx_stats->rx_drops_no_fragments[rxo->q.id];
270 }
271
272 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
273 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
274
275 /* bad pkts received */
276 dev_stats->rx_errors = port_stats->rx_crc_errors +
277 port_stats->rx_alignment_symbol_errors +
278 port_stats->rx_in_range_errors +
68110868
SP
279 port_stats->rx_out_range_errors +
280 port_stats->rx_frame_too_long +
281 port_stats->rx_dropped_too_small +
282 port_stats->rx_dropped_too_short +
283 port_stats->rx_dropped_header_too_small +
284 port_stats->rx_dropped_tcp_length +
285 port_stats->rx_dropped_runt +
286 port_stats->rx_tcp_checksum_errs +
287 port_stats->rx_ip_checksum_errs +
288 port_stats->rx_udp_checksum_errs;
289
6b7c5b94
SP
290 /* detailed rx errors */
291 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
292 port_stats->rx_out_range_errors +
293 port_stats->rx_frame_too_long;
294
6b7c5b94
SP
295 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
296
297 /* frame alignment errors */
298 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 299
6b7c5b94
SP
300 /* receiver fifo overrun */
301 /* drops_no_pbuf is no per i/f, it's per BE card */
302 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
303 port_stats->rx_input_fifo_overflow +
304 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
305}
306
8788fdc2 307void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 308{
6b7c5b94
SP
309 struct net_device *netdev = adapter->netdev;
310
6b7c5b94 311 /* If link came up or went down */
a8f447bd 312 if (adapter->link_up != link_up) {
0dffc83e 313 adapter->link_speed = -1;
a8f447bd 314 if (link_up) {
6b7c5b94
SP
315 netif_start_queue(netdev);
316 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd
SP
318 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 322 }
a8f447bd 323 adapter->link_up = link_up;
6b7c5b94 324 }
6b7c5b94
SP
325}
326
327/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 328static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 329{
3abcdeda
SP
330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
332 ulong now = jiffies;
333 u32 eqd;
334
335 if (!rx_eq->enable_aic)
336 return;
337
338 /* Wrapped around */
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
341 return;
342 }
6b7c5b94
SP
343
344 /* Update once a second */
4097f663 345 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
346 return;
347
3abcdeda 348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 349 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 350
4097f663 351 stats->rx_fps_jiffies = now;
3abcdeda
SP
352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
354 eqd = eqd << 3;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
359 if (eqd < 10)
360 eqd = 0;
361 if (eqd != rx_eq->cur_eqd)
8788fdc2 362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
363
364 rx_eq->cur_eqd = eqd;
365}
366
65f71b8b
SH
367static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368{
369 u64 rate = bytes;
370
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
374
375 return rate;
376}
377
4097f663
SP
378static void be_tx_rate_update(struct be_adapter *adapter)
379{
3abcdeda 380 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
381 ulong now = jiffies;
382
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
386 return;
387 }
388
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
4097f663
SP
394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396 }
397}
398
6b7c5b94 399static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 401{
3abcdeda 402 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
403 stats->be_tx_reqs++;
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
91992e44 406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
407 if (stopped)
408 stats->be_tx_stops++;
6b7c5b94
SP
409}
410
411/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
6b7c5b94 414{
ebc8d2ab
DM
415 int cnt = (skb->len > skb->data_len);
416
417 cnt += skb_shinfo(skb)->nr_frags;
418
6b7c5b94
SP
419 /* to account for hdr wrb */
420 cnt++;
fe6d2a38
SP
421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
6b7c5b94
SP
424 /* add a dummy to make it an even num */
425 cnt++;
426 *dummy = true;
fe6d2a38 427 }
6b7c5b94
SP
428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429 return cnt;
430}
431
432static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433{
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437}
438
cc4ce020
SK
439static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 441{
cc4ce020
SK
442 u8 vlan_prio = 0;
443 u16 vlan_tag = 0;
444
6b7c5b94
SP
445 memset(hdr, 0, sizeof(*hdr));
446
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
49e4b847 449 if (skb_is_gso(skb)) {
6b7c5b94
SP
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
6b7c5b94
SP
465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 if (is_tcp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470 }
471
cc4ce020 472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
481 }
482
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487}
488
7101e111
SP
489static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
490 bool unmap_single)
491{
492 dma_addr_t dma;
493
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 497 if (wrb->frag_len) {
7101e111
SP
498 if (unmap_single)
499 pci_unmap_single(pdev, dma, wrb->frag_len,
500 PCI_DMA_TODEVICE);
501 else
502 pci_unmap_page(pdev, dma, wrb->frag_len,
503 PCI_DMA_TODEVICE);
504 }
505}
6b7c5b94
SP
506
507static int make_tx_wrbs(struct be_adapter *adapter,
508 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509{
7101e111
SP
510 dma_addr_t busaddr;
511 int i, copied = 0;
6b7c5b94
SP
512 struct pci_dev *pdev = adapter->pdev;
513 struct sk_buff *first_skb = skb;
514 struct be_queue_info *txq = &adapter->tx_obj.q;
515 struct be_eth_wrb *wrb;
516 struct be_eth_hdr_wrb *hdr;
7101e111
SP
517 bool map_single = false;
518 u16 map_head;
6b7c5b94 519
6b7c5b94
SP
520 hdr = queue_head_node(txq);
521 queue_head_inc(txq);
7101e111 522 map_head = txq->head;
6b7c5b94 523
ebc8d2ab 524 if (skb->len > skb->data_len) {
e743d313 525 int len = skb_headlen(skb);
a73b796e
AD
526 busaddr = pci_map_single(pdev, skb->data, len,
527 PCI_DMA_TODEVICE);
7101e111
SP
528 if (pci_dma_mapping_error(pdev, busaddr))
529 goto dma_err;
530 map_single = true;
ebc8d2ab
DM
531 wrb = queue_head_node(txq);
532 wrb_fill(wrb, busaddr, len);
533 be_dws_cpu_to_le(wrb, sizeof(*wrb));
534 queue_head_inc(txq);
535 copied += len;
536 }
6b7c5b94 537
ebc8d2ab
DM
538 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
539 struct skb_frag_struct *frag =
540 &skb_shinfo(skb)->frags[i];
a73b796e
AD
541 busaddr = pci_map_page(pdev, frag->page,
542 frag->page_offset,
543 frag->size, PCI_DMA_TODEVICE);
7101e111
SP
544 if (pci_dma_mapping_error(pdev, busaddr))
545 goto dma_err;
ebc8d2ab
DM
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, busaddr, frag->size);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 copied += frag->size;
6b7c5b94
SP
551 }
552
553 if (dummy_wrb) {
554 wrb = queue_head_node(txq);
555 wrb_fill(wrb, 0, 0);
556 be_dws_cpu_to_le(wrb, sizeof(*wrb));
557 queue_head_inc(txq);
558 }
559
cc4ce020 560 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
561 be_dws_cpu_to_le(hdr, sizeof(*hdr));
562
563 return copied;
7101e111
SP
564dma_err:
565 txq->head = map_head;
566 while (copied) {
567 wrb = queue_head_node(txq);
568 unmap_tx_frag(pdev, wrb, map_single);
569 map_single = false;
570 copied -= wrb->frag_len;
571 queue_head_inc(txq);
572 }
573 return 0;
6b7c5b94
SP
574}
575
61357325 576static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 577 struct net_device *netdev)
6b7c5b94
SP
578{
579 struct be_adapter *adapter = netdev_priv(netdev);
580 struct be_tx_obj *tx_obj = &adapter->tx_obj;
581 struct be_queue_info *txq = &tx_obj->q;
582 u32 wrb_cnt = 0, copied = 0;
583 u32 start = txq->head;
584 bool dummy_wrb, stopped = false;
585
fe6d2a38 586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
587
588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
589 if (copied) {
590 /* record the sent skb in the sent_skb table */
591 BUG_ON(tx_obj->sent_skb_list[start]);
592 tx_obj->sent_skb_list[start] = skb;
593
594 /* Ensure txq has space for the next skb; Else stop the queue
595 * *BEFORE* ringing the tx doorbell, so that we serialze the
596 * tx compls of the current transmit which'll wake up the queue
597 */
7101e111 598 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
599 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
600 txq->len) {
601 netif_stop_queue(netdev);
602 stopped = true;
603 }
6b7c5b94 604
c190e3c8 605 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 606
91992e44
AK
607 be_tx_stats_update(adapter, wrb_cnt, copied,
608 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
609 } else {
610 txq->head = start;
611 dev_kfree_skb_any(skb);
6b7c5b94 612 }
6b7c5b94
SP
613 return NETDEV_TX_OK;
614}
615
616static int be_change_mtu(struct net_device *netdev, int new_mtu)
617{
618 struct be_adapter *adapter = netdev_priv(netdev);
619 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
620 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
621 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
622 dev_info(&adapter->pdev->dev,
623 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
624 BE_MIN_MTU,
625 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
626 return -EINVAL;
627 }
628 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
629 netdev->mtu, new_mtu);
630 netdev->mtu = new_mtu;
631 return 0;
632}
633
634/*
82903e4b
AK
635 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
636 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 637 */
1da87b7f 638static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 639{
6b7c5b94
SP
640 u16 vtag[BE_NUM_VLANS_SUPPORTED];
641 u16 ntags = 0, i;
82903e4b 642 int status = 0;
1da87b7f
AK
643 u32 if_handle;
644
645 if (vf) {
646 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
647 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
648 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
649 }
6b7c5b94 650
82903e4b 651 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 652 /* Construct VLAN Table to give to HW */
b738127d 653 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
654 if (adapter->vlan_tag[i]) {
655 vtag[ntags] = cpu_to_le16(i);
656 ntags++;
657 }
658 }
b31c50a7
SP
659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 vtag, ntags, 1, 0);
6b7c5b94 661 } else {
b31c50a7
SP
662 status = be_cmd_vlan_config(adapter, adapter->if_handle,
663 NULL, 0, 1, 1);
6b7c5b94 664 }
1da87b7f 665
b31c50a7 666 return status;
6b7c5b94
SP
667}
668
669static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
670{
671 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 672
6b7c5b94 673 adapter->vlan_grp = grp;
6b7c5b94
SP
674}
675
676static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
677{
678 struct be_adapter *adapter = netdev_priv(netdev);
679
1da87b7f 680 adapter->vlans_added++;
ba343c77
SB
681 if (!be_physfn(adapter))
682 return;
683
6b7c5b94 684 adapter->vlan_tag[vid] = 1;
82903e4b 685 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 686 be_vid_config(adapter, false, 0);
6b7c5b94
SP
687}
688
689static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
690{
691 struct be_adapter *adapter = netdev_priv(netdev);
692
1da87b7f
AK
693 adapter->vlans_added--;
694 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
695
ba343c77
SB
696 if (!be_physfn(adapter))
697 return;
698
6b7c5b94 699 adapter->vlan_tag[vid] = 0;
82903e4b 700 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 701 be_vid_config(adapter, false, 0);
6b7c5b94
SP
702}
703
24307eef 704static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
705{
706 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 707
24307eef 708 if (netdev->flags & IFF_PROMISC) {
8788fdc2 709 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
710 adapter->promiscuous = true;
711 goto done;
6b7c5b94
SP
712 }
713
24307eef
SP
714 /* BE was previously in promiscous mode; disable it */
715 if (adapter->promiscuous) {
716 adapter->promiscuous = false;
8788fdc2 717 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
718 }
719
e7b909a6 720 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
721 if (netdev->flags & IFF_ALLMULTI ||
722 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 723 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 724 &adapter->mc_cmd_mem);
24307eef 725 goto done;
6b7c5b94 726 }
6b7c5b94 727
0ddf477b 728 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 729 &adapter->mc_cmd_mem);
24307eef
SP
730done:
731 return;
6b7c5b94
SP
732}
733
ba343c77
SB
734static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
735{
736 struct be_adapter *adapter = netdev_priv(netdev);
737 int status;
738
739 if (!adapter->sriov_enabled)
740 return -EPERM;
741
742 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
743 return -EINVAL;
744
64600ea5
AK
745 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
746 status = be_cmd_pmac_del(adapter,
747 adapter->vf_cfg[vf].vf_if_handle,
748 adapter->vf_cfg[vf].vf_pmac_id);
ba343c77 749
64600ea5
AK
750 status = be_cmd_pmac_add(adapter, mac,
751 adapter->vf_cfg[vf].vf_if_handle,
752 &adapter->vf_cfg[vf].vf_pmac_id);
753
754 if (status)
ba343c77
SB
755 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
756 mac, vf);
64600ea5
AK
757 else
758 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
759
ba343c77
SB
760 return status;
761}
762
64600ea5
AK
763static int be_get_vf_config(struct net_device *netdev, int vf,
764 struct ifla_vf_info *vi)
765{
766 struct be_adapter *adapter = netdev_priv(netdev);
767
768 if (!adapter->sriov_enabled)
769 return -EPERM;
770
771 if (vf >= num_vfs)
772 return -EINVAL;
773
774 vi->vf = vf;
e1d18735 775 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 776 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
777 vi->qos = 0;
778 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
779
780 return 0;
781}
782
1da87b7f
AK
783static int be_set_vf_vlan(struct net_device *netdev,
784 int vf, u16 vlan, u8 qos)
785{
786 struct be_adapter *adapter = netdev_priv(netdev);
787 int status = 0;
788
789 if (!adapter->sriov_enabled)
790 return -EPERM;
791
792 if ((vf >= num_vfs) || (vlan > 4095))
793 return -EINVAL;
794
795 if (vlan) {
796 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
797 adapter->vlans_added++;
798 } else {
799 adapter->vf_cfg[vf].vf_vlan_tag = 0;
800 adapter->vlans_added--;
801 }
802
803 status = be_vid_config(adapter, true, vf);
804
805 if (status)
806 dev_info(&adapter->pdev->dev,
807 "VLAN %d config on VF %d failed\n", vlan, vf);
808 return status;
809}
810
e1d18735
AK
811static int be_set_vf_tx_rate(struct net_device *netdev,
812 int vf, int rate)
813{
814 struct be_adapter *adapter = netdev_priv(netdev);
815 int status = 0;
816
817 if (!adapter->sriov_enabled)
818 return -EPERM;
819
820 if ((vf >= num_vfs) || (rate < 0))
821 return -EINVAL;
822
823 if (rate > 10000)
824 rate = 10000;
825
826 adapter->vf_cfg[vf].vf_tx_rate = rate;
827 status = be_cmd_set_qos(adapter, rate / 10, vf);
828
829 if (status)
830 dev_info(&adapter->pdev->dev,
831 "tx rate %d on VF %d failed\n", rate, vf);
832 return status;
833}
834
3abcdeda 835static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 836{
3abcdeda 837 struct be_rx_stats *stats = &rxo->stats;
4097f663 838 ulong now = jiffies;
6b7c5b94 839
4097f663 840 /* Wrapped around */
3abcdeda
SP
841 if (time_before(now, stats->rx_jiffies)) {
842 stats->rx_jiffies = now;
4097f663
SP
843 return;
844 }
6b7c5b94
SP
845
846 /* Update the rate once in two seconds */
3abcdeda 847 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
848 return;
849
3abcdeda
SP
850 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
851 now - stats->rx_jiffies);
852 stats->rx_jiffies = now;
853 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
854}
855
3abcdeda 856static void be_rx_stats_update(struct be_rx_obj *rxo,
1ef78abe 857 u32 pktsize, u16 numfrags, u8 pkt_type)
4097f663 858{
3abcdeda 859 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 860
3abcdeda
SP
861 stats->rx_compl++;
862 stats->rx_frags += numfrags;
863 stats->rx_bytes += pktsize;
864 stats->rx_pkts++;
1ef78abe 865 if (pkt_type == BE_MULTICAST_PACKET)
3abcdeda 866 stats->rx_mcast_pkts++;
4097f663
SP
867}
868
c6ce2f4b 869static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
728a9972 870{
c6ce2f4b 871 u8 l4_cksm, ipv6, ipcksm;
728a9972
AK
872
873 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
874 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
c6ce2f4b 875 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
728a9972 876
c6ce2f4b
SK
877 /* Ignore ipcksm for ipv6 pkts */
878 return l4_cksm && (ipcksm || ipv6);
728a9972
AK
879}
880
6b7c5b94 881static struct be_rx_page_info *
3abcdeda
SP
882get_rx_page_info(struct be_adapter *adapter,
883 struct be_rx_obj *rxo,
884 u16 frag_idx)
6b7c5b94
SP
885{
886 struct be_rx_page_info *rx_page_info;
3abcdeda 887 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 888
3abcdeda 889 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
890 BUG_ON(!rx_page_info->page);
891
205859a2 892 if (rx_page_info->last_page_user) {
fac6da5b 893 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
6b7c5b94 894 adapter->big_page_size, PCI_DMA_FROMDEVICE);
205859a2
AK
895 rx_page_info->last_page_user = false;
896 }
6b7c5b94
SP
897
898 atomic_dec(&rxq->used);
899 return rx_page_info;
900}
901
902/* Throwaway the data in the Rx completion */
903static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda
SP
904 struct be_rx_obj *rxo,
905 struct be_eth_rx_compl *rxcp)
6b7c5b94 906{
3abcdeda 907 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
908 struct be_rx_page_info *page_info;
909 u16 rxq_idx, i, num_rcvd;
910
911 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
912 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
913
64642811
SP
914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
915 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
916
917 rxo->last_frag_index = rxq_idx;
918
919 for (i = 0; i < num_rcvd; i++) {
920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921 put_page(page_info->page);
922 memset(page_info, 0, sizeof(*page_info));
923 index_inc(&rxq_idx, rxq->len);
924 }
6b7c5b94
SP
925 }
926}
927
928/*
929 * skb_fill_rx_data forms a complete skb for an ether frame
930 * indicated by rxcp.
931 */
3abcdeda 932static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
89420424
SP
933 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
934 u16 num_rcvd)
6b7c5b94 935{
3abcdeda 936 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 937 struct be_rx_page_info *page_info;
89420424 938 u16 rxq_idx, i, j;
fa77406a 939 u32 pktsize, hdr_len, curr_frag_len, size;
6b7c5b94 940 u8 *start;
1ef78abe 941 u8 pkt_type;
6b7c5b94
SP
942
943 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
944 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1ef78abe 945 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
6b7c5b94 946
3abcdeda 947 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
948
949 start = page_address(page_info->page) + page_info->page_offset;
950 prefetch(start);
951
952 /* Copy data in the first descriptor of this completion */
953 curr_frag_len = min(pktsize, rx_frag_size);
954
955 /* Copy the header portion into skb_data */
956 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
957 memcpy(skb->data, start, hdr_len);
958 skb->len = curr_frag_len;
959 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
960 /* Complete packet has now been moved to data */
961 put_page(page_info->page);
962 skb->data_len = 0;
963 skb->tail += curr_frag_len;
964 } else {
965 skb_shinfo(skb)->nr_frags = 1;
966 skb_shinfo(skb)->frags[0].page = page_info->page;
967 skb_shinfo(skb)->frags[0].page_offset =
968 page_info->page_offset + hdr_len;
969 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
970 skb->data_len = curr_frag_len - hdr_len;
971 skb->tail += hdr_len;
972 }
205859a2 973 page_info->page = NULL;
6b7c5b94
SP
974
975 if (pktsize <= rx_frag_size) {
976 BUG_ON(num_rcvd != 1);
76fbb429 977 goto done;
6b7c5b94
SP
978 }
979
980 /* More frags present for this completion */
fa77406a 981 size = pktsize;
bd46cb6c 982 for (i = 1, j = 0; i < num_rcvd; i++) {
fa77406a 983 size -= curr_frag_len;
6b7c5b94 984 index_inc(&rxq_idx, rxq->len);
3abcdeda 985 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94 986
fa77406a 987 curr_frag_len = min(size, rx_frag_size);
6b7c5b94 988
bd46cb6c
AK
989 /* Coalesce all frags from the same physical page in one slot */
990 if (page_info->page_offset == 0) {
991 /* Fresh page */
992 j++;
993 skb_shinfo(skb)->frags[j].page = page_info->page;
994 skb_shinfo(skb)->frags[j].page_offset =
995 page_info->page_offset;
996 skb_shinfo(skb)->frags[j].size = 0;
997 skb_shinfo(skb)->nr_frags++;
998 } else {
999 put_page(page_info->page);
1000 }
1001
1002 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1003 skb->len += curr_frag_len;
1004 skb->data_len += curr_frag_len;
6b7c5b94 1005
205859a2 1006 page_info->page = NULL;
6b7c5b94 1007 }
bd46cb6c 1008 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1009
76fbb429 1010done:
3abcdeda 1011 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
6b7c5b94
SP
1012}
1013
5be93b9a 1014/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1015static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1016 struct be_rx_obj *rxo,
6b7c5b94
SP
1017 struct be_eth_rx_compl *rxcp)
1018{
1019 struct sk_buff *skb;
dcb9b564 1020 u32 vlanf, vid;
89420424 1021 u16 num_rcvd;
dcb9b564 1022 u8 vtm;
6b7c5b94 1023
89420424 1024 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
89420424 1025
89d71a66 1026 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
a058a632 1027 if (unlikely(!skb)) {
6b7c5b94
SP
1028 if (net_ratelimit())
1029 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1030 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1031 return;
1032 }
1033
3abcdeda 1034 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
6b7c5b94 1035
c6ce2f4b 1036 if (likely(adapter->rx_csum && csum_passed(rxcp)))
728a9972 1037 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1038 else
1039 skb_checksum_none_assert(skb);
6b7c5b94
SP
1040
1041 skb->truesize = skb->len + sizeof(struct sk_buff);
1042 skb->protocol = eth_type_trans(skb, adapter->netdev);
6b7c5b94 1043
a058a632
SP
1044 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1045 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1046
1047 /* vlanf could be wrongly set in some cards.
1048 * ignore if vtm is not set */
3486be29 1049 if ((adapter->function_mode & 0x400) && !vtm)
a058a632
SP
1050 vlanf = 0;
1051
1052 if (unlikely(vlanf)) {
82903e4b 1053 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1054 kfree_skb(skb);
1055 return;
1056 }
1057 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
fe6d2a38
SP
1058 if (!lancer_chip(adapter))
1059 vid = swab16(vid);
6b7c5b94
SP
1060 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1061 } else {
1062 netif_receive_skb(skb);
1063 }
6b7c5b94
SP
1064}
1065
5be93b9a
AK
1066/* Process the RX completion indicated by rxcp when GRO is enabled */
1067static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda
SP
1068 struct be_rx_obj *rxo,
1069 struct be_eth_rx_compl *rxcp)
6b7c5b94
SP
1070{
1071 struct be_rx_page_info *page_info;
5be93b9a 1072 struct sk_buff *skb = NULL;
3abcdeda
SP
1073 struct be_queue_info *rxq = &rxo->q;
1074 struct be_eq_obj *eq_obj = &rxo->rx_eq;
6b7c5b94 1075 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
bd46cb6c 1076 u16 i, rxq_idx = 0, vid, j;
dcb9b564 1077 u8 vtm;
1ef78abe 1078 u8 pkt_type;
6b7c5b94
SP
1079
1080 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1081 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1082 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1083 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
dcb9b564 1084 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1ef78abe 1085 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
dcb9b564
AK
1086
1087 /* vlanf could be wrongly set in some cards.
1088 * ignore if vtm is not set */
3486be29 1089 if ((adapter->function_mode & 0x400) && !vtm)
dcb9b564 1090 vlanf = 0;
6b7c5b94 1091
5be93b9a
AK
1092 skb = napi_get_frags(&eq_obj->napi);
1093 if (!skb) {
3abcdeda 1094 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1095 return;
1096 }
1097
6b7c5b94 1098 remaining = pkt_size;
bd46cb6c 1099 for (i = 0, j = -1; i < num_rcvd; i++) {
3abcdeda 1100 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
1101
1102 curr_frag_len = min(remaining, rx_frag_size);
1103
bd46cb6c
AK
1104 /* Coalesce all frags from the same physical page in one slot */
1105 if (i == 0 || page_info->page_offset == 0) {
1106 /* First frag or Fresh page */
1107 j++;
5be93b9a
AK
1108 skb_shinfo(skb)->frags[j].page = page_info->page;
1109 skb_shinfo(skb)->frags[j].page_offset =
1110 page_info->page_offset;
1111 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1112 } else {
1113 put_page(page_info->page);
1114 }
5be93b9a 1115 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1116
bd46cb6c 1117 remaining -= curr_frag_len;
6b7c5b94 1118 index_inc(&rxq_idx, rxq->len);
6b7c5b94
SP
1119 memset(page_info, 0, sizeof(*page_info));
1120 }
bd46cb6c 1121 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1122
5be93b9a
AK
1123 skb_shinfo(skb)->nr_frags = j + 1;
1124 skb->len = pkt_size;
1125 skb->data_len = pkt_size;
1126 skb->truesize += pkt_size;
1127 skb->ip_summed = CHECKSUM_UNNECESSARY;
1128
6b7c5b94 1129 if (likely(!vlanf)) {
5be93b9a 1130 napi_gro_frags(&eq_obj->napi);
6b7c5b94
SP
1131 } else {
1132 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
fe6d2a38
SP
1133 if (!lancer_chip(adapter))
1134 vid = swab16(vid);
6b7c5b94 1135
82903e4b 1136 if (!adapter->vlan_grp || adapter->vlans_added == 0)
6b7c5b94
SP
1137 return;
1138
5be93b9a 1139 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
6b7c5b94
SP
1140 }
1141
3abcdeda 1142 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
6b7c5b94
SP
1143}
1144
3abcdeda 1145static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
6b7c5b94 1146{
3abcdeda 1147 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
6b7c5b94
SP
1148
1149 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1150 return NULL;
1151
f3eb62d2 1152 rmb();
6b7c5b94
SP
1153 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1154
3abcdeda 1155 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1156 return rxcp;
1157}
1158
a7a0ef31
SP
1159/* To reset the valid bit, we need to reset the whole word as
1160 * when walking the queue the valid entries are little-endian
1161 * and invalid entries are host endian
1162 */
1163static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1164{
1165 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1166}
1167
6b7c5b94
SP
1168static inline struct page *be_alloc_pages(u32 size)
1169{
1170 gfp_t alloc_flags = GFP_ATOMIC;
1171 u32 order = get_order(size);
1172 if (order > 0)
1173 alloc_flags |= __GFP_COMP;
1174 return alloc_pages(alloc_flags, order);
1175}
1176
1177/*
1178 * Allocate a page, split it to fragments of size rx_frag_size and post as
1179 * receive buffers to BE
1180 */
3abcdeda 1181static void be_post_rx_frags(struct be_rx_obj *rxo)
6b7c5b94 1182{
3abcdeda
SP
1183 struct be_adapter *adapter = rxo->adapter;
1184 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1185 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1186 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1187 struct page *pagep = NULL;
1188 struct be_eth_rx_d *rxd;
1189 u64 page_dmaaddr = 0, frag_dmaaddr;
1190 u32 posted, page_offset = 0;
1191
3abcdeda 1192 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1193 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1194 if (!pagep) {
1195 pagep = be_alloc_pages(adapter->big_page_size);
1196 if (unlikely(!pagep)) {
3abcdeda 1197 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1198 break;
1199 }
1200 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1201 adapter->big_page_size,
1202 PCI_DMA_FROMDEVICE);
1203 page_info->page_offset = 0;
1204 } else {
1205 get_page(pagep);
1206 page_info->page_offset = page_offset + rx_frag_size;
1207 }
1208 page_offset = page_info->page_offset;
1209 page_info->page = pagep;
fac6da5b 1210 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1211 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1212
1213 rxd = queue_head_node(rxq);
1214 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1215 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1216
1217 /* Any space left in the current big page for another frag? */
1218 if ((page_offset + rx_frag_size + rx_frag_size) >
1219 adapter->big_page_size) {
1220 pagep = NULL;
1221 page_info->last_page_user = true;
1222 }
26d92f92
SP
1223
1224 prev_page_info = page_info;
1225 queue_head_inc(rxq);
6b7c5b94
SP
1226 page_info = &page_info_tbl[rxq->head];
1227 }
1228 if (pagep)
26d92f92 1229 prev_page_info->last_page_user = true;
6b7c5b94
SP
1230
1231 if (posted) {
6b7c5b94 1232 atomic_add(posted, &rxq->used);
8788fdc2 1233 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1234 } else if (atomic_read(&rxq->used) == 0) {
1235 /* Let be_worker replenish when memory is available */
3abcdeda 1236 rxo->rx_post_starved = true;
6b7c5b94 1237 }
6b7c5b94
SP
1238}
1239
5fb379ee 1240static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1241{
6b7c5b94
SP
1242 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1243
1244 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1245 return NULL;
1246
f3eb62d2 1247 rmb();
6b7c5b94
SP
1248 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1249
1250 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1251
1252 queue_tail_inc(tx_cq);
1253 return txcp;
1254}
1255
1256static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1257{
1258 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1259 struct be_eth_wrb *wrb;
6b7c5b94
SP
1260 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1261 struct sk_buff *sent_skb;
ec43b1a6
SP
1262 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1263 bool unmap_skb_hdr = true;
6b7c5b94 1264
ec43b1a6 1265 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1266 BUG_ON(!sent_skb);
ec43b1a6
SP
1267 sent_skbs[txq->tail] = NULL;
1268
1269 /* skip header wrb */
a73b796e 1270 queue_tail_inc(txq);
6b7c5b94 1271
ec43b1a6 1272 do {
6b7c5b94 1273 cur_index = txq->tail;
a73b796e 1274 wrb = queue_tail_node(txq);
ec43b1a6 1275 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
e743d313 1276 skb_headlen(sent_skb)));
ec43b1a6
SP
1277 unmap_skb_hdr = false;
1278
6b7c5b94
SP
1279 num_wrbs++;
1280 queue_tail_inc(txq);
ec43b1a6 1281 } while (cur_index != last_index);
6b7c5b94
SP
1282
1283 atomic_sub(num_wrbs, &txq->used);
a73b796e 1284
6b7c5b94
SP
1285 kfree_skb(sent_skb);
1286}
1287
859b1e4e
SP
1288static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1289{
1290 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1291
1292 if (!eqe->evt)
1293 return NULL;
1294
f3eb62d2 1295 rmb();
859b1e4e
SP
1296 eqe->evt = le32_to_cpu(eqe->evt);
1297 queue_tail_inc(&eq_obj->q);
1298 return eqe;
1299}
1300
1301static int event_handle(struct be_adapter *adapter,
1302 struct be_eq_obj *eq_obj)
1303{
1304 struct be_eq_entry *eqe;
1305 u16 num = 0;
1306
1307 while ((eqe = event_get(eq_obj)) != NULL) {
1308 eqe->evt = 0;
1309 num++;
1310 }
1311
1312 /* Deal with any spurious interrupts that come
1313 * without events
1314 */
1315 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1316 if (num)
1317 napi_schedule(&eq_obj->napi);
1318
1319 return num;
1320}
1321
1322/* Just read and notify events without processing them.
1323 * Used at the time of destroying event queues */
1324static void be_eq_clean(struct be_adapter *adapter,
1325 struct be_eq_obj *eq_obj)
1326{
1327 struct be_eq_entry *eqe;
1328 u16 num = 0;
1329
1330 while ((eqe = event_get(eq_obj)) != NULL) {
1331 eqe->evt = 0;
1332 num++;
1333 }
1334
1335 if (num)
1336 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1337}
1338
3abcdeda 1339static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1340{
1341 struct be_rx_page_info *page_info;
3abcdeda
SP
1342 struct be_queue_info *rxq = &rxo->q;
1343 struct be_queue_info *rx_cq = &rxo->cq;
6b7c5b94
SP
1344 struct be_eth_rx_compl *rxcp;
1345 u16 tail;
1346
1347 /* First cleanup pending rx completions */
3abcdeda
SP
1348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1349 be_rx_compl_discard(adapter, rxo, rxcp);
a7a0ef31 1350 be_rx_compl_reset(rxcp);
64642811 1351 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1352 }
1353
1354 /* Then free posted rx buffer that were not used */
1355 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1356 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1357 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1358 put_page(page_info->page);
1359 memset(page_info, 0, sizeof(*page_info));
1360 }
1361 BUG_ON(atomic_read(&rxq->used));
1362}
1363
a8e9179a 1364static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1365{
a8e9179a 1366 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1367 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1368 struct be_eth_tx_compl *txcp;
1369 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1370 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1371 struct sk_buff *sent_skb;
1372 bool dummy_wrb;
a8e9179a
SP
1373
1374 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1375 do {
1376 while ((txcp = be_tx_compl_get(tx_cq))) {
1377 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1378 wrb_index, txcp);
1379 be_tx_compl_process(adapter, end_idx);
1380 cmpl++;
1381 }
1382 if (cmpl) {
1383 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1384 cmpl = 0;
1385 }
1386
1387 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1388 break;
1389
1390 mdelay(1);
1391 } while (true);
1392
1393 if (atomic_read(&txq->used))
1394 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1395 atomic_read(&txq->used));
b03388d6
SP
1396
1397 /* free posted tx for which compls will never arrive */
1398 while (atomic_read(&txq->used)) {
1399 sent_skb = sent_skbs[txq->tail];
1400 end_idx = txq->tail;
1401 index_adv(&end_idx,
fe6d2a38
SP
1402 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 txq->len);
b03388d6
SP
1404 be_tx_compl_process(adapter, end_idx);
1405 }
6b7c5b94
SP
1406}
1407
5fb379ee
SP
1408static void be_mcc_queues_destroy(struct be_adapter *adapter)
1409{
1410 struct be_queue_info *q;
5fb379ee 1411
8788fdc2 1412 q = &adapter->mcc_obj.q;
5fb379ee 1413 if (q->created)
8788fdc2 1414 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1415 be_queue_free(adapter, q);
1416
8788fdc2 1417 q = &adapter->mcc_obj.cq;
5fb379ee 1418 if (q->created)
8788fdc2 1419 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1420 be_queue_free(adapter, q);
1421}
1422
1423/* Must be called only after TX qs are created as MCC shares TX EQ */
1424static int be_mcc_queues_create(struct be_adapter *adapter)
1425{
1426 struct be_queue_info *q, *cq;
5fb379ee
SP
1427
1428 /* Alloc MCC compl queue */
8788fdc2 1429 cq = &adapter->mcc_obj.cq;
5fb379ee 1430 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1431 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1432 goto err;
1433
1434 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1435 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1436 goto mcc_cq_free;
1437
1438 /* Alloc MCC queue */
8788fdc2 1439 q = &adapter->mcc_obj.q;
5fb379ee
SP
1440 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1441 goto mcc_cq_destroy;
1442
1443 /* Ask BE to create MCC queue */
8788fdc2 1444 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1445 goto mcc_q_free;
1446
1447 return 0;
1448
1449mcc_q_free:
1450 be_queue_free(adapter, q);
1451mcc_cq_destroy:
8788fdc2 1452 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1453mcc_cq_free:
1454 be_queue_free(adapter, cq);
1455err:
1456 return -1;
1457}
1458
6b7c5b94
SP
1459static void be_tx_queues_destroy(struct be_adapter *adapter)
1460{
1461 struct be_queue_info *q;
1462
1463 q = &adapter->tx_obj.q;
a8e9179a 1464 if (q->created)
8788fdc2 1465 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1466 be_queue_free(adapter, q);
1467
1468 q = &adapter->tx_obj.cq;
1469 if (q->created)
8788fdc2 1470 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1471 be_queue_free(adapter, q);
1472
859b1e4e
SP
1473 /* Clear any residual events */
1474 be_eq_clean(adapter, &adapter->tx_eq);
1475
6b7c5b94
SP
1476 q = &adapter->tx_eq.q;
1477 if (q->created)
8788fdc2 1478 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1479 be_queue_free(adapter, q);
1480}
1481
1482static int be_tx_queues_create(struct be_adapter *adapter)
1483{
1484 struct be_queue_info *eq, *q, *cq;
1485
1486 adapter->tx_eq.max_eqd = 0;
1487 adapter->tx_eq.min_eqd = 0;
1488 adapter->tx_eq.cur_eqd = 96;
1489 adapter->tx_eq.enable_aic = false;
1490 /* Alloc Tx Event queue */
1491 eq = &adapter->tx_eq.q;
1492 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1493 return -1;
1494
1495 /* Ask BE to create Tx Event queue */
8788fdc2 1496 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1497 goto tx_eq_free;
fe6d2a38
SP
1498
1499 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500
ba343c77 1501
6b7c5b94
SP
1502 /* Alloc TX eth compl queue */
1503 cq = &adapter->tx_obj.cq;
1504 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1505 sizeof(struct be_eth_tx_compl)))
1506 goto tx_eq_destroy;
1507
1508 /* Ask BE to create Tx eth compl queue */
8788fdc2 1509 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1510 goto tx_cq_free;
1511
1512 /* Alloc TX eth queue */
1513 q = &adapter->tx_obj.q;
1514 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1515 goto tx_cq_destroy;
1516
1517 /* Ask BE to create Tx eth queue */
8788fdc2 1518 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1519 goto tx_q_free;
1520 return 0;
1521
1522tx_q_free:
1523 be_queue_free(adapter, q);
1524tx_cq_destroy:
8788fdc2 1525 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1526tx_cq_free:
1527 be_queue_free(adapter, cq);
1528tx_eq_destroy:
8788fdc2 1529 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1530tx_eq_free:
1531 be_queue_free(adapter, eq);
1532 return -1;
1533}
1534
1535static void be_rx_queues_destroy(struct be_adapter *adapter)
1536{
1537 struct be_queue_info *q;
3abcdeda
SP
1538 struct be_rx_obj *rxo;
1539 int i;
1540
1541 for_all_rx_queues(adapter, rxo, i) {
1542 q = &rxo->q;
1543 if (q->created) {
1544 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1545 /* After the rxq is invalidated, wait for a grace time
1546 * of 1ms for all dma to end and the flush compl to
1547 * arrive
1548 */
1549 mdelay(1);
1550 be_rx_q_clean(adapter, rxo);
1551 }
1552 be_queue_free(adapter, q);
1553
1554 q = &rxo->cq;
1555 if (q->created)
1556 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1557 be_queue_free(adapter, q);
1558
1559 /* Clear any residual events */
1560 q = &rxo->rx_eq.q;
1561 if (q->created) {
1562 be_eq_clean(adapter, &rxo->rx_eq);
1563 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1564 }
1565 be_queue_free(adapter, q);
6b7c5b94 1566 }
6b7c5b94
SP
1567}
1568
1569static int be_rx_queues_create(struct be_adapter *adapter)
1570{
1571 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1572 struct be_rx_obj *rxo;
1573 int rc, i;
6b7c5b94 1574
6b7c5b94 1575 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1576 for_all_rx_queues(adapter, rxo, i) {
1577 rxo->adapter = adapter;
64642811
SP
1578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo->last_frag_index = 0xffff;
3abcdeda
SP
1581 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1582 rxo->rx_eq.enable_aic = true;
1583
1584 /* EQ */
1585 eq = &rxo->rx_eq.q;
1586 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1587 sizeof(struct be_eq_entry));
1588 if (rc)
1589 goto err;
1590
1591 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1592 if (rc)
1593 goto err;
1594
fe6d2a38
SP
1595 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1596
3abcdeda
SP
1597 /* CQ */
1598 cq = &rxo->cq;
1599 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1600 sizeof(struct be_eth_rx_compl));
1601 if (rc)
1602 goto err;
1603
1604 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1605 if (rc)
1606 goto err;
3abcdeda
SP
1607 /* Rx Q */
1608 q = &rxo->q;
1609 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1610 sizeof(struct be_eth_rx_d));
1611 if (rc)
1612 goto err;
1613
1614 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1615 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1616 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1617 if (rc)
1618 goto err;
1619 }
1620
1621 if (be_multi_rxq(adapter)) {
1622 u8 rsstable[MAX_RSS_QS];
1623
1624 for_all_rss_queues(adapter, rxo, i)
1625 rsstable[i] = rxo->rss_id;
1626
1627 rc = be_cmd_rss_config(adapter, rsstable,
1628 adapter->num_rx_qs - 1);
1629 if (rc)
1630 goto err;
1631 }
6b7c5b94
SP
1632
1633 return 0;
3abcdeda
SP
1634err:
1635 be_rx_queues_destroy(adapter);
1636 return -1;
6b7c5b94 1637}
6b7c5b94 1638
fe6d2a38 1639static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1640{
fe6d2a38
SP
1641 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1642 if (!eqe->evt)
1643 return false;
1644 else
1645 return true;
b628bde2
SP
1646}
1647
6b7c5b94
SP
1648static irqreturn_t be_intx(int irq, void *dev)
1649{
1650 struct be_adapter *adapter = dev;
3abcdeda 1651 struct be_rx_obj *rxo;
fe6d2a38 1652 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1653
fe6d2a38
SP
1654 if (lancer_chip(adapter)) {
1655 if (event_peek(&adapter->tx_eq))
1656 tx = event_handle(adapter, &adapter->tx_eq);
1657 for_all_rx_queues(adapter, rxo, i) {
1658 if (event_peek(&rxo->rx_eq))
1659 rx |= event_handle(adapter, &rxo->rx_eq);
1660 }
6b7c5b94 1661
fe6d2a38
SP
1662 if (!(tx || rx))
1663 return IRQ_NONE;
3abcdeda 1664
fe6d2a38
SP
1665 } else {
1666 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1667 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1668 if (!isr)
1669 return IRQ_NONE;
1670
1671 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1672 event_handle(adapter, &adapter->tx_eq);
1673
1674 for_all_rx_queues(adapter, rxo, i) {
1675 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1676 event_handle(adapter, &rxo->rx_eq);
1677 }
3abcdeda 1678 }
c001c213 1679
8788fdc2 1680 return IRQ_HANDLED;
6b7c5b94
SP
1681}
1682
1683static irqreturn_t be_msix_rx(int irq, void *dev)
1684{
3abcdeda
SP
1685 struct be_rx_obj *rxo = dev;
1686 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1687
3abcdeda 1688 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1689
1690 return IRQ_HANDLED;
1691}
1692
5fb379ee 1693static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1694{
1695 struct be_adapter *adapter = dev;
1696
8788fdc2 1697 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1698
1699 return IRQ_HANDLED;
1700}
1701
64642811
SP
1702static inline bool do_gro(struct be_rx_obj *rxo,
1703 struct be_eth_rx_compl *rxcp, u8 err)
6b7c5b94 1704{
6b7c5b94
SP
1705 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1706
1707 if (err)
3abcdeda 1708 rxo->stats.rxcp_err++;
6b7c5b94 1709
5be93b9a 1710 return (tcp_frame && !err) ? true : false;
6b7c5b94
SP
1711}
1712
49b05221 1713static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1714{
1715 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1716 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1717 struct be_adapter *adapter = rxo->adapter;
1718 struct be_queue_info *rx_cq = &rxo->cq;
6b7c5b94
SP
1719 struct be_eth_rx_compl *rxcp;
1720 u32 work_done;
64642811
SP
1721 u16 frag_index, num_rcvd;
1722 u8 err;
6b7c5b94 1723
3abcdeda 1724 rxo->stats.rx_polls++;
6b7c5b94 1725 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1726 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1727 if (!rxcp)
1728 break;
1729
64642811
SP
1730 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1731 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1732 rxcp);
1733 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 rxcp);
1735
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index != rxo->last_frag_index &&
1738 num_rcvd != 0)) {
1739 rxo->last_frag_index = frag_index;
1740
1741 if (do_gro(rxo, rxcp, err))
1742 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 else
1744 be_rx_compl_process(adapter, rxo, rxcp);
1745 }
a7a0ef31
SP
1746
1747 be_rx_compl_reset(rxcp);
6b7c5b94
SP
1748 }
1749
6b7c5b94 1750 /* Refill the queue */
3abcdeda
SP
1751 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1752 be_post_rx_frags(rxo);
6b7c5b94
SP
1753
1754 /* All consumed */
1755 if (work_done < budget) {
1756 napi_complete(napi);
8788fdc2 1757 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1758 } else {
1759 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1760 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1761 }
1762 return work_done;
1763}
1764
f31e50a8
SP
1765/* As TX and MCC share the same EQ check for both TX and MCC completions.
1766 * For TX/MCC we don't honour budget; consume everything
1767 */
1768static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1769{
f31e50a8
SP
1770 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1771 struct be_adapter *adapter =
1772 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1773 struct be_queue_info *txq = &adapter->tx_obj.q;
1774 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1775 struct be_eth_tx_compl *txcp;
f31e50a8 1776 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1777 u16 end_idx;
1778
5fb379ee 1779 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1780 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1781 wrb_index, txcp);
6b7c5b94 1782 be_tx_compl_process(adapter, end_idx);
f31e50a8 1783 tx_compl++;
6b7c5b94
SP
1784 }
1785
f31e50a8
SP
1786 mcc_compl = be_process_mcc(adapter, &status);
1787
1788 napi_complete(napi);
1789
1790 if (mcc_compl) {
1791 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1792 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1793 }
1794
1795 if (tx_compl) {
1796 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1797
1798 /* As Tx wrbs have been freed up, wake up netdev queue if
1799 * it was stopped due to lack of tx wrbs.
1800 */
1801 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1802 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1803 netif_wake_queue(adapter->netdev);
1804 }
1805
3abcdeda
SP
1806 tx_stats(adapter)->be_tx_events++;
1807 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1808 }
6b7c5b94
SP
1809
1810 return 1;
1811}
1812
d053de91 1813void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1814{
1815 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1816 u32 i;
1817
1818 pci_read_config_dword(adapter->pdev,
1819 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1820 pci_read_config_dword(adapter->pdev,
1821 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1822 pci_read_config_dword(adapter->pdev,
1823 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1824 pci_read_config_dword(adapter->pdev,
1825 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1826
1827 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1828 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1829
d053de91
AK
1830 if (ue_status_lo || ue_status_hi) {
1831 adapter->ue_detected = true;
1832 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1833 }
1834
7c185276
AK
1835 if (ue_status_lo) {
1836 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1837 if (ue_status_lo & 1)
1838 dev_err(&adapter->pdev->dev,
1839 "UE: %s bit set\n", ue_status_low_desc[i]);
1840 }
1841 }
1842 if (ue_status_hi) {
1843 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1844 if (ue_status_hi & 1)
1845 dev_err(&adapter->pdev->dev,
1846 "UE: %s bit set\n", ue_status_hi_desc[i]);
1847 }
1848 }
1849
1850}
1851
ea1dae11
SP
1852static void be_worker(struct work_struct *work)
1853{
1854 struct be_adapter *adapter =
1855 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1856 struct be_rx_obj *rxo;
1857 int i;
ea1dae11 1858
f203af70
SK
1859 /* when interrupts are not yet enabled, just reap any pending
1860 * mcc completions */
1861 if (!netif_running(adapter->netdev)) {
1862 int mcc_compl, status = 0;
1863
1864 mcc_compl = be_process_mcc(adapter, &status);
1865
1866 if (mcc_compl) {
1867 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1868 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1869 }
1870 goto reschedule;
1871 }
1872
0fc48c37 1873 if (!adapter->stats_ioctl_sent)
3abcdeda 1874 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1875
4097f663 1876 be_tx_rate_update(adapter);
4097f663 1877
3abcdeda
SP
1878 for_all_rx_queues(adapter, rxo, i) {
1879 be_rx_rate_update(rxo);
1880 be_rx_eqd_update(adapter, rxo);
1881
1882 if (rxo->rx_post_starved) {
1883 rxo->rx_post_starved = false;
1884 be_post_rx_frags(rxo);
1885 }
ea1dae11 1886 }
fe6d2a38 1887 if (!adapter->ue_detected && !lancer_chip(adapter))
d053de91 1888 be_detect_dump_ue(adapter);
ea1dae11 1889
f203af70 1890reschedule:
ea1dae11
SP
1891 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1892}
1893
8d56ff11
SP
1894static void be_msix_disable(struct be_adapter *adapter)
1895{
1896 if (adapter->msix_enabled) {
1897 pci_disable_msix(adapter->pdev);
1898 adapter->msix_enabled = false;
1899 }
1900}
1901
3abcdeda
SP
1902static int be_num_rxqs_get(struct be_adapter *adapter)
1903{
1904 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1905 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1906 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1907 } else {
1908 dev_warn(&adapter->pdev->dev,
1909 "No support for multiple RX queues\n");
1910 return 1;
1911 }
1912}
1913
6b7c5b94
SP
1914static void be_msix_enable(struct be_adapter *adapter)
1915{
3abcdeda 1916#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
6b7c5b94
SP
1917 int i, status;
1918
3abcdeda
SP
1919 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1920
1921 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
6b7c5b94
SP
1922 adapter->msix_entries[i].entry = i;
1923
1924 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
3abcdeda
SP
1925 adapter->num_rx_qs + 1);
1926 if (status == 0) {
1927 goto done;
1928 } else if (status >= BE_MIN_MSIX_VECTORS) {
1929 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1930 status) == 0) {
1931 adapter->num_rx_qs = status - 1;
1932 dev_warn(&adapter->pdev->dev,
1933 "Could alloc only %d MSIx vectors. "
1934 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1935 goto done;
1936 }
1937 }
1938 return;
1939done:
1940 adapter->msix_enabled = true;
6b7c5b94
SP
1941}
1942
ba343c77
SB
1943static void be_sriov_enable(struct be_adapter *adapter)
1944{
344dbf10 1945 be_check_sriov_fn_type(adapter);
6dedec81 1946#ifdef CONFIG_PCI_IOV
ba343c77 1947 if (be_physfn(adapter) && num_vfs) {
6dedec81
AK
1948 int status;
1949
ba343c77
SB
1950 status = pci_enable_sriov(adapter->pdev, num_vfs);
1951 adapter->sriov_enabled = status ? false : true;
1952 }
1953#endif
ba343c77
SB
1954}
1955
1956static void be_sriov_disable(struct be_adapter *adapter)
1957{
1958#ifdef CONFIG_PCI_IOV
1959 if (adapter->sriov_enabled) {
1960 pci_disable_sriov(adapter->pdev);
1961 adapter->sriov_enabled = false;
1962 }
1963#endif
1964}
1965
fe6d2a38
SP
1966static inline int be_msix_vec_get(struct be_adapter *adapter,
1967 struct be_eq_obj *eq_obj)
6b7c5b94 1968{
fe6d2a38 1969 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
6b7c5b94
SP
1970}
1971
b628bde2
SP
1972static int be_request_irq(struct be_adapter *adapter,
1973 struct be_eq_obj *eq_obj,
3abcdeda 1974 void *handler, char *desc, void *context)
6b7c5b94
SP
1975{
1976 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1977 int vec;
1978
1979 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 1980 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 1981 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
1982}
1983
3abcdeda
SP
1984static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1985 void *context)
b628bde2 1986{
fe6d2a38 1987 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 1988 free_irq(vec, context);
b628bde2 1989}
6b7c5b94 1990
b628bde2
SP
1991static int be_msix_register(struct be_adapter *adapter)
1992{
3abcdeda
SP
1993 struct be_rx_obj *rxo;
1994 int status, i;
1995 char qname[10];
b628bde2 1996
3abcdeda
SP
1997 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1998 adapter);
6b7c5b94
SP
1999 if (status)
2000 goto err;
2001
3abcdeda
SP
2002 for_all_rx_queues(adapter, rxo, i) {
2003 sprintf(qname, "rxq%d", i);
2004 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2005 qname, rxo);
2006 if (status)
2007 goto err_msix;
2008 }
b628bde2 2009
6b7c5b94 2010 return 0;
b628bde2 2011
3abcdeda
SP
2012err_msix:
2013 be_free_irq(adapter, &adapter->tx_eq, adapter);
2014
2015 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2016 be_free_irq(adapter, &rxo->rx_eq, rxo);
2017
6b7c5b94
SP
2018err:
2019 dev_warn(&adapter->pdev->dev,
2020 "MSIX Request IRQ failed - err %d\n", status);
2021 pci_disable_msix(adapter->pdev);
2022 adapter->msix_enabled = false;
2023 return status;
2024}
2025
2026static int be_irq_register(struct be_adapter *adapter)
2027{
2028 struct net_device *netdev = adapter->netdev;
2029 int status;
2030
2031 if (adapter->msix_enabled) {
2032 status = be_msix_register(adapter);
2033 if (status == 0)
2034 goto done;
ba343c77
SB
2035 /* INTx is not supported for VF */
2036 if (!be_physfn(adapter))
2037 return status;
6b7c5b94
SP
2038 }
2039
2040 /* INTx */
2041 netdev->irq = adapter->pdev->irq;
2042 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2043 adapter);
2044 if (status) {
2045 dev_err(&adapter->pdev->dev,
2046 "INTx request IRQ failed - err %d\n", status);
2047 return status;
2048 }
2049done:
2050 adapter->isr_registered = true;
2051 return 0;
2052}
2053
2054static void be_irq_unregister(struct be_adapter *adapter)
2055{
2056 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2057 struct be_rx_obj *rxo;
2058 int i;
6b7c5b94
SP
2059
2060 if (!adapter->isr_registered)
2061 return;
2062
2063 /* INTx */
2064 if (!adapter->msix_enabled) {
2065 free_irq(netdev->irq, adapter);
2066 goto done;
2067 }
2068
2069 /* MSIx */
3abcdeda
SP
2070 be_free_irq(adapter, &adapter->tx_eq, adapter);
2071
2072 for_all_rx_queues(adapter, rxo, i)
2073 be_free_irq(adapter, &rxo->rx_eq, rxo);
2074
6b7c5b94
SP
2075done:
2076 adapter->isr_registered = false;
6b7c5b94
SP
2077}
2078
889cd4b2
SP
2079static int be_close(struct net_device *netdev)
2080{
2081 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2082 struct be_rx_obj *rxo;
889cd4b2 2083 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2084 int vec, i;
889cd4b2 2085
889cd4b2
SP
2086 be_async_mcc_disable(adapter);
2087
2088 netif_stop_queue(netdev);
2089 netif_carrier_off(netdev);
2090 adapter->link_up = false;
2091
fe6d2a38
SP
2092 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false);
889cd4b2
SP
2094
2095 if (adapter->msix_enabled) {
fe6d2a38 2096 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2097 synchronize_irq(vec);
3abcdeda
SP
2098
2099 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2100 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2101 synchronize_irq(vec);
2102 }
889cd4b2
SP
2103 } else {
2104 synchronize_irq(netdev->irq);
2105 }
2106 be_irq_unregister(adapter);
2107
3abcdeda
SP
2108 for_all_rx_queues(adapter, rxo, i)
2109 napi_disable(&rxo->rx_eq.napi);
2110
889cd4b2
SP
2111 napi_disable(&tx_eq->napi);
2112
2113 /* Wait for all pending tx completions to arrive so that
2114 * all tx skbs are freed.
2115 */
2116 be_tx_compl_clean(adapter);
2117
2118 return 0;
2119}
2120
6b7c5b94
SP
2121static int be_open(struct net_device *netdev)
2122{
2123 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2124 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2125 struct be_rx_obj *rxo;
a8f447bd 2126 bool link_up;
3abcdeda 2127 int status, i;
0388f251
SB
2128 u8 mac_speed;
2129 u16 link_speed;
5fb379ee 2130
3abcdeda
SP
2131 for_all_rx_queues(adapter, rxo, i) {
2132 be_post_rx_frags(rxo);
2133 napi_enable(&rxo->rx_eq.napi);
2134 }
5fb379ee
SP
2135 napi_enable(&tx_eq->napi);
2136
2137 be_irq_register(adapter);
2138
fe6d2a38
SP
2139 if (!lancer_chip(adapter))
2140 be_intr_set(adapter, true);
5fb379ee
SP
2141
2142 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2143 for_all_rx_queues(adapter, rxo, i) {
2144 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2145 be_cq_notify(adapter, rxo->cq.id, true, 0);
2146 }
8788fdc2 2147 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2148
7a1e9b20
SP
2149 /* Now that interrupts are on we can process async mcc */
2150 be_async_mcc_enable(adapter);
2151
0388f251
SB
2152 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2153 &link_speed);
a8f447bd 2154 if (status)
889cd4b2 2155 goto err;
a8f447bd 2156 be_link_status_update(adapter, link_up);
5fb379ee 2157
889cd4b2 2158 if (be_physfn(adapter)) {
1da87b7f 2159 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2160 if (status)
2161 goto err;
4f2aa89c 2162
ba343c77
SB
2163 status = be_cmd_set_flow_control(adapter,
2164 adapter->tx_fc, adapter->rx_fc);
2165 if (status)
889cd4b2 2166 goto err;
ba343c77 2167 }
4f2aa89c 2168
889cd4b2
SP
2169 return 0;
2170err:
2171 be_close(adapter->netdev);
2172 return -EIO;
5fb379ee
SP
2173}
2174
71d8d1b5
AK
2175static int be_setup_wol(struct be_adapter *adapter, bool enable)
2176{
2177 struct be_dma_mem cmd;
2178 int status = 0;
2179 u8 mac[ETH_ALEN];
2180
2181 memset(mac, 0, ETH_ALEN);
2182
2183 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2184 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2185 if (cmd.va == NULL)
2186 return -1;
2187 memset(cmd.va, 0, cmd.size);
2188
2189 if (enable) {
2190 status = pci_write_config_dword(adapter->pdev,
2191 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2192 if (status) {
2193 dev_err(&adapter->pdev->dev,
2381a55c 2194 "Could not enable Wake-on-lan\n");
71d8d1b5
AK
2195 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2196 cmd.dma);
2197 return status;
2198 }
2199 status = be_cmd_enable_magic_wol(adapter,
2200 adapter->netdev->dev_addr, &cmd);
2201 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2202 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2203 } else {
2204 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2205 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 }
2208
2209 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2210 return status;
2211}
2212
6d87f5c3
AK
2213/*
2214 * Generate a seed MAC address from the PF MAC Address using jhash.
2215 * MAC Address for VFs are assigned incrementally starting from the seed.
2216 * These addresses are programmed in the ASIC by the PF and the VF driver
2217 * queries for the MAC address during its probe.
2218 */
2219static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2220{
2221 u32 vf = 0;
3abcdeda 2222 int status = 0;
6d87f5c3
AK
2223 u8 mac[ETH_ALEN];
2224
2225 be_vf_eth_addr_generate(adapter, mac);
2226
2227 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle,
2230 &adapter->vf_cfg[vf].vf_pmac_id);
2231 if (status)
2232 dev_err(&adapter->pdev->dev,
2233 "Mac address add failed for VF %d\n", vf);
2234 else
2235 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2236
2237 mac[5] += 1;
2238 }
2239 return status;
2240}
2241
2242static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2243{
2244 u32 vf;
2245
2246 for (vf = 0; vf < num_vfs; vf++) {
2247 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248 be_cmd_pmac_del(adapter,
2249 adapter->vf_cfg[vf].vf_if_handle,
2250 adapter->vf_cfg[vf].vf_pmac_id);
2251 }
2252}
2253
5fb379ee
SP
2254static int be_setup(struct be_adapter *adapter)
2255{
5fb379ee 2256 struct net_device *netdev = adapter->netdev;
ba343c77 2257 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2258 int status;
ba343c77
SB
2259 u8 mac[ETH_ALEN];
2260
2261 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
6b7c5b94 2262
ba343c77
SB
2263 if (be_physfn(adapter)) {
2264 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2265 BE_IF_FLAGS_PROMISCUOUS |
2266 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2267 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda
SP
2268
2269 if (be_multi_rxq(adapter)) {
2270 cap_flags |= BE_IF_FLAGS_RSS;
2271 en_flags |= BE_IF_FLAGS_RSS;
2272 }
ba343c77 2273 }
73d540f2
SP
2274
2275 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2276 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2277 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2278 if (status != 0)
2279 goto do_none;
2280
ba343c77
SB
2281 if (be_physfn(adapter)) {
2282 while (vf < num_vfs) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2284 | BE_IF_FLAGS_BROADCAST;
2285 status = be_cmd_if_create(adapter, cap_flags, en_flags,
64600ea5
AK
2286 mac, true,
2287 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77
SB
2288 NULL, vf+1);
2289 if (status) {
2290 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n", vf);
2292 goto if_destroy;
2293 }
64600ea5 2294 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
ba343c77 2295 vf++;
84e5b9f7 2296 }
ba343c77
SB
2297 } else if (!be_physfn(adapter)) {
2298 status = be_cmd_mac_addr_query(adapter, mac,
2299 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300 if (!status) {
2301 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2302 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2303 }
2304 }
2305
6b7c5b94
SP
2306 status = be_tx_queues_create(adapter);
2307 if (status != 0)
2308 goto if_destroy;
2309
2310 status = be_rx_queues_create(adapter);
2311 if (status != 0)
2312 goto tx_qs_destroy;
2313
5fb379ee
SP
2314 status = be_mcc_queues_create(adapter);
2315 if (status != 0)
2316 goto rx_qs_destroy;
6b7c5b94 2317
6d87f5c3
AK
2318 if (be_physfn(adapter)) {
2319 status = be_vf_eth_addr_config(adapter);
2320 if (status)
2321 goto mcc_q_destroy;
2322 }
2323
0dffc83e
AK
2324 adapter->link_speed = -1;
2325
6b7c5b94
SP
2326 return 0;
2327
6d87f5c3
AK
2328mcc_q_destroy:
2329 if (be_physfn(adapter))
2330 be_vf_eth_addr_rem(adapter);
2331 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2332rx_qs_destroy:
2333 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2334tx_qs_destroy:
2335 be_tx_queues_destroy(adapter);
2336if_destroy:
ba343c77 2337 for (vf = 0; vf < num_vfs; vf++)
64600ea5
AK
2338 if (adapter->vf_cfg[vf].vf_if_handle)
2339 be_cmd_if_destroy(adapter,
2340 adapter->vf_cfg[vf].vf_if_handle);
8788fdc2 2341 be_cmd_if_destroy(adapter, adapter->if_handle);
6b7c5b94
SP
2342do_none:
2343 return status;
2344}
2345
5fb379ee
SP
2346static int be_clear(struct be_adapter *adapter)
2347{
6d87f5c3
AK
2348 if (be_physfn(adapter))
2349 be_vf_eth_addr_rem(adapter);
2350
1a8887d8 2351 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2352 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter);
2354
8788fdc2 2355 be_cmd_if_destroy(adapter, adapter->if_handle);
5fb379ee 2356
2243e2e9
SP
2357 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter);
5fb379ee
SP
2359 return 0;
2360}
2361
6b7c5b94 2362
84517482 2363#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2364static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2365 const u8 *p, u32 img_start, int image_size,
2366 int hdr_size)
fa9a6fed
SB
2367{
2368 u32 crc_offset;
2369 u8 flashed_crc[4];
2370 int status;
3f0d4560
AK
2371
2372 crc_offset = hdr_size + img_start + image_size - 4;
2373
fa9a6fed 2374 p += crc_offset;
3f0d4560
AK
2375
2376 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2377 (image_size - 4));
fa9a6fed
SB
2378 if (status) {
2379 dev_err(&adapter->pdev->dev,
2380 "could not get crc from flash, not flashing redboot\n");
2381 return false;
2382 }
2383
2384 /*update redboot only if crc does not match*/
2385 if (!memcmp(flashed_crc, p, 4))
2386 return false;
2387 else
2388 return true;
fa9a6fed
SB
2389}
2390
3f0d4560 2391static int be_flash_data(struct be_adapter *adapter,
84517482 2392 const struct firmware *fw,
3f0d4560
AK
2393 struct be_dma_mem *flash_cmd, int num_of_images)
2394
84517482 2395{
3f0d4560
AK
2396 int status = 0, i, filehdr_size = 0;
2397 u32 total_bytes = 0, flash_op;
84517482
AK
2398 int num_bytes;
2399 const u8 *p = fw->data;
2400 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2401 const struct flash_comp *pflashcomp;
9fe96934 2402 int num_comp;
3f0d4560 2403
215faf9c 2404 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2405 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2406 FLASH_IMAGE_MAX_SIZE_g3},
2407 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2408 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2409 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2414 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2415 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2420 FLASH_IMAGE_MAX_SIZE_g3},
2421 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2422 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2423 };
215faf9c 2424 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2426 FLASH_IMAGE_MAX_SIZE_g2},
2427 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2428 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2429 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2434 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2435 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2438 FLASH_IMAGE_MAX_SIZE_g2},
2439 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2440 FLASH_IMAGE_MAX_SIZE_g2}
2441 };
2442
2443 if (adapter->generation == BE_GEN3) {
2444 pflashcomp = gen3_flash_types;
2445 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2446 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2447 } else {
2448 pflashcomp = gen2_flash_types;
2449 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2450 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2451 }
9fe96934
SB
2452 for (i = 0; i < num_comp; i++) {
2453 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2454 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2455 continue;
3f0d4560
AK
2456 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457 (!be_flash_redboot(adapter, fw->data,
2458 pflashcomp[i].offset, pflashcomp[i].size,
2459 filehdr_size)))
2460 continue;
2461 p = fw->data;
2462 p += filehdr_size + pflashcomp[i].offset
2463 + (num_of_images * sizeof(struct image_hdr));
2464 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2465 return -1;
3f0d4560
AK
2466 total_bytes = pflashcomp[i].size;
2467 while (total_bytes) {
2468 if (total_bytes > 32*1024)
2469 num_bytes = 32*1024;
2470 else
2471 num_bytes = total_bytes;
2472 total_bytes -= num_bytes;
2473
2474 if (!total_bytes)
2475 flash_op = FLASHROM_OPER_FLASH;
2476 else
2477 flash_op = FLASHROM_OPER_SAVE;
2478 memcpy(req->params.data_buf, p, num_bytes);
2479 p += num_bytes;
2480 status = be_cmd_write_flashrom(adapter, flash_cmd,
2481 pflashcomp[i].optype, flash_op, num_bytes);
2482 if (status) {
2483 dev_err(&adapter->pdev->dev,
2484 "cmd to write to flash rom failed.\n");
2485 return -1;
2486 }
2487 yield();
84517482 2488 }
84517482 2489 }
84517482
AK
2490 return 0;
2491}
2492
3f0d4560
AK
2493static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2494{
2495 if (fhdr == NULL)
2496 return 0;
2497 if (fhdr->build[0] == '3')
2498 return BE_GEN3;
2499 else if (fhdr->build[0] == '2')
2500 return BE_GEN2;
2501 else
2502 return 0;
2503}
2504
84517482
AK
2505int be_load_fw(struct be_adapter *adapter, u8 *func)
2506{
2507 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2508 const struct firmware *fw;
3f0d4560
AK
2509 struct flash_file_hdr_g2 *fhdr;
2510 struct flash_file_hdr_g3 *fhdr3;
2511 struct image_hdr *img_hdr_ptr = NULL;
84517482 2512 struct be_dma_mem flash_cmd;
8b93b710 2513 int status, i = 0, num_imgs = 0;
84517482 2514 const u8 *p;
84517482 2515
d9efd2af
SB
2516 if (!netif_running(adapter->netdev)) {
2517 dev_err(&adapter->pdev->dev,
2518 "Firmware load not allowed (interface is down)\n");
2519 return -EPERM;
2520 }
2521
84517482
AK
2522 strcpy(fw_file, func);
2523
2524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2525 if (status)
2526 goto fw_exit;
2527
2528 p = fw->data;
3f0d4560 2529 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2530 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531
84517482
AK
2532 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2534 &flash_cmd.dma);
2535 if (!flash_cmd.va) {
2536 status = -ENOMEM;
2537 dev_err(&adapter->pdev->dev,
2538 "Memory allocation failure while flashing\n");
2539 goto fw_exit;
2540 }
2541
3f0d4560
AK
2542 if ((adapter->generation == BE_GEN3) &&
2543 (get_ufigen_type(fhdr) == BE_GEN3)) {
2544 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2545 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2546 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2547 img_hdr_ptr = (struct image_hdr *) (fw->data +
2548 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2549 i * sizeof(struct image_hdr)));
2550 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2551 status = be_flash_data(adapter, fw, &flash_cmd,
2552 num_imgs);
3f0d4560
AK
2553 }
2554 } else if ((adapter->generation == BE_GEN2) &&
2555 (get_ufigen_type(fhdr) == BE_GEN2)) {
2556 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2557 } else {
2558 dev_err(&adapter->pdev->dev,
2559 "UFI and Interface are not compatible for flashing\n");
2560 status = -1;
84517482
AK
2561 }
2562
2563 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2564 flash_cmd.dma);
2565 if (status) {
2566 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567 goto fw_exit;
2568 }
2569
af901ca1 2570 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2571
2572fw_exit:
2573 release_firmware(fw);
2574 return status;
2575}
2576
6b7c5b94
SP
2577static struct net_device_ops be_netdev_ops = {
2578 .ndo_open = be_open,
2579 .ndo_stop = be_close,
2580 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2581 .ndo_set_rx_mode = be_set_multicast_list,
2582 .ndo_set_mac_address = be_mac_addr_set,
2583 .ndo_change_mtu = be_change_mtu,
2584 .ndo_validate_addr = eth_validate_addr,
2585 .ndo_vlan_rx_register = be_vlan_register,
2586 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2587 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2588 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2589 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2590 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2591 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2592};
2593
2594static void be_netdev_init(struct net_device *netdev)
2595{
2596 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2597 struct be_rx_obj *rxo;
2598 int i;
6b7c5b94
SP
2599
2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
79032644
MM
2601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
49e4b847 2603 NETIF_F_GRO | NETIF_F_TSO6;
6b7c5b94 2604
79032644
MM
2605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2607
fe6d2a38
SP
2608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2610
6b7c5b94
SP
2611 netdev->flags |= IFF_MULTICAST;
2612
728a9972
AK
2613 adapter->rx_csum = true;
2614
9e90c961
AK
2615 /* Default settings for Rx and Tx flow control */
2616 adapter->rx_fc = true;
2617 adapter->tx_fc = true;
2618
c190e3c8
AK
2619 netif_set_gso_max_size(netdev, 65535);
2620
6b7c5b94
SP
2621 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2622
2623 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2624
3abcdeda
SP
2625 for_all_rx_queues(adapter, rxo, i)
2626 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2627 BE_NAPI_WEIGHT);
2628
5fb379ee 2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94
SP
2630 BE_NAPI_WEIGHT);
2631
6b7c5b94
SP
2632 netif_stop_queue(netdev);
2633}
2634
2635static void be_unmap_pci_bars(struct be_adapter *adapter)
2636{
8788fdc2
SP
2637 if (adapter->csr)
2638 iounmap(adapter->csr);
2639 if (adapter->db)
2640 iounmap(adapter->db);
ba343c77 2641 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2642 iounmap(adapter->pcicfg);
6b7c5b94
SP
2643}
2644
2645static int be_map_pci_bars(struct be_adapter *adapter)
2646{
2647 u8 __iomem *addr;
ba343c77 2648 int pcicfg_reg, db_reg;
6b7c5b94 2649
fe6d2a38
SP
2650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2653 if (addr == NULL)
2654 return -ENOMEM;
2655 adapter->db = addr;
2656 return 0;
2657 }
2658
ba343c77
SB
2659 if (be_physfn(adapter)) {
2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2661 pci_resource_len(adapter->pdev, 2));
2662 if (addr == NULL)
2663 return -ENOMEM;
2664 adapter->csr = addr;
2665 }
6b7c5b94 2666
ba343c77 2667 if (adapter->generation == BE_GEN2) {
7b139c83 2668 pcicfg_reg = 1;
ba343c77
SB
2669 db_reg = 4;
2670 } else {
7b139c83 2671 pcicfg_reg = 0;
ba343c77
SB
2672 if (be_physfn(adapter))
2673 db_reg = 4;
2674 else
2675 db_reg = 0;
2676 }
2677 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2678 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2679 if (addr == NULL)
2680 goto pci_map_err;
ba343c77
SB
2681 adapter->db = addr;
2682
2683 if (be_physfn(adapter)) {
2684 addr = ioremap_nocache(
2685 pci_resource_start(adapter->pdev, pcicfg_reg),
2686 pci_resource_len(adapter->pdev, pcicfg_reg));
2687 if (addr == NULL)
2688 goto pci_map_err;
2689 adapter->pcicfg = addr;
2690 } else
2691 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2692
2693 return 0;
2694pci_map_err:
2695 be_unmap_pci_bars(adapter);
2696 return -ENOMEM;
2697}
2698
2699
2700static void be_ctrl_cleanup(struct be_adapter *adapter)
2701{
8788fdc2 2702 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2703
2704 be_unmap_pci_bars(adapter);
2705
2706 if (mem->va)
2707 pci_free_consistent(adapter->pdev, mem->size,
2708 mem->va, mem->dma);
e7b909a6
SP
2709
2710 mem = &adapter->mc_cmd_mem;
2711 if (mem->va)
2712 pci_free_consistent(adapter->pdev, mem->size,
2713 mem->va, mem->dma);
6b7c5b94
SP
2714}
2715
6b7c5b94
SP
2716static int be_ctrl_init(struct be_adapter *adapter)
2717{
8788fdc2
SP
2718 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2719 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2720 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2721 int status;
6b7c5b94
SP
2722
2723 status = be_map_pci_bars(adapter);
2724 if (status)
e7b909a6 2725 goto done;
6b7c5b94
SP
2726
2727 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2729 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2730 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2731 status = -ENOMEM;
2732 goto unmap_pci_bars;
6b7c5b94 2733 }
e7b909a6 2734
6b7c5b94
SP
2735 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2736 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2737 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2738 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2739
2740 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2741 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2742 &mc_cmd_mem->dma);
2743 if (mc_cmd_mem->va == NULL) {
2744 status = -ENOMEM;
2745 goto free_mbox;
2746 }
2747 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2748
2984961c 2749 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2750 spin_lock_init(&adapter->mcc_lock);
2751 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2752
dd131e76 2753 init_completion(&adapter->flash_compl);
cf588477 2754 pci_save_state(adapter->pdev);
6b7c5b94 2755 return 0;
e7b909a6
SP
2756
2757free_mbox:
2758 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2759 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2760
2761unmap_pci_bars:
2762 be_unmap_pci_bars(adapter);
2763
2764done:
2765 return status;
6b7c5b94
SP
2766}
2767
2768static void be_stats_cleanup(struct be_adapter *adapter)
2769{
3abcdeda 2770 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2771
2772 if (cmd->va)
2773 pci_free_consistent(adapter->pdev, cmd->size,
2774 cmd->va, cmd->dma);
2775}
2776
2777static int be_stats_init(struct be_adapter *adapter)
2778{
3abcdeda 2779 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2780
2781 cmd->size = sizeof(struct be_cmd_req_get_stats);
2782 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2783 if (cmd->va == NULL)
2784 return -1;
d291b9af 2785 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2786 return 0;
2787}
2788
2789static void __devexit be_remove(struct pci_dev *pdev)
2790{
2791 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2792
6b7c5b94
SP
2793 if (!adapter)
2794 return;
2795
f203af70
SK
2796 cancel_delayed_work_sync(&adapter->work);
2797
6b7c5b94
SP
2798 unregister_netdev(adapter->netdev);
2799
5fb379ee
SP
2800 be_clear(adapter);
2801
6b7c5b94
SP
2802 be_stats_cleanup(adapter);
2803
2804 be_ctrl_cleanup(adapter);
2805
ba343c77
SB
2806 be_sriov_disable(adapter);
2807
8d56ff11 2808 be_msix_disable(adapter);
6b7c5b94
SP
2809
2810 pci_set_drvdata(pdev, NULL);
2811 pci_release_regions(pdev);
2812 pci_disable_device(pdev);
2813
2814 free_netdev(adapter->netdev);
2815}
2816
2243e2e9 2817static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2818{
6b7c5b94 2819 int status;
2243e2e9 2820 u8 mac[ETH_ALEN];
6b7c5b94 2821
2243e2e9 2822 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2823 if (status)
2824 return status;
2825
3abcdeda
SP
2826 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2827 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2828 if (status)
2829 return status;
2830
2243e2e9 2831 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2832
2833 if (be_physfn(adapter)) {
2834 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2835 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2836
ba343c77
SB
2837 if (status)
2838 return status;
ca9e4988 2839
ba343c77
SB
2840 if (!is_valid_ether_addr(mac))
2841 return -EADDRNOTAVAIL;
2842
2843 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2844 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2845 }
6b7c5b94 2846
3486be29 2847 if (adapter->function_mode & 0x400)
82903e4b
AK
2848 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2849 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851
2243e2e9 2852 return 0;
6b7c5b94
SP
2853}
2854
fe6d2a38
SP
2855static int be_dev_family_check(struct be_adapter *adapter)
2856{
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891}
2892
6b7c5b94
SP
2893static int __devinit be_probe(struct pci_dev *pdev,
2894 const struct pci_device_id *pdev_id)
2895{
2896 int status = 0;
2897 struct be_adapter *adapter;
2898 struct net_device *netdev;
6b7c5b94
SP
2899
2900 status = pci_enable_device(pdev);
2901 if (status)
2902 goto do_none;
2903
2904 status = pci_request_regions(pdev, DRV_NAME);
2905 if (status)
2906 goto disable_dev;
2907 pci_set_master(pdev);
2908
2909 netdev = alloc_etherdev(sizeof(struct be_adapter));
2910 if (netdev == NULL) {
2911 status = -ENOMEM;
2912 goto rel_reg;
2913 }
2914 adapter = netdev_priv(netdev);
2915 adapter->pdev = pdev;
2916 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
2917
2918 status = be_dev_family_check(adapter);
63657b9c 2919 if (status)
fe6d2a38
SP
2920 goto free_netdev;
2921
6b7c5b94 2922 adapter->netdev = netdev;
2243e2e9 2923 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 2924
e930438c 2925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6b7c5b94
SP
2926 if (!status) {
2927 netdev->features |= NETIF_F_HIGHDMA;
2928 } else {
e930438c 2929 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6b7c5b94
SP
2930 if (status) {
2931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 goto free_netdev;
2933 }
2934 }
2935
ba343c77
SB
2936 be_sriov_enable(adapter);
2937
6b7c5b94
SP
2938 status = be_ctrl_init(adapter);
2939 if (status)
2940 goto free_netdev;
2941
2243e2e9 2942 /* sync up with fw's ready state */
ba343c77
SB
2943 if (be_physfn(adapter)) {
2944 status = be_cmd_POST(adapter);
2945 if (status)
2946 goto ctrl_clean;
ba343c77 2947 }
6b7c5b94 2948
2243e2e9
SP
2949 /* tell fw we're ready to fire cmds */
2950 status = be_cmd_fw_init(adapter);
6b7c5b94 2951 if (status)
2243e2e9
SP
2952 goto ctrl_clean;
2953
556ae191
SB
2954 if (be_physfn(adapter)) {
2955 status = be_cmd_reset_function(adapter);
2956 if (status)
2957 goto ctrl_clean;
2958 }
2959
2243e2e9
SP
2960 status = be_stats_init(adapter);
2961 if (status)
2962 goto ctrl_clean;
2963
2964 status = be_get_config(adapter);
6b7c5b94
SP
2965 if (status)
2966 goto stats_clean;
6b7c5b94 2967
3abcdeda
SP
2968 be_msix_enable(adapter);
2969
6b7c5b94 2970 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 2971
5fb379ee
SP
2972 status = be_setup(adapter);
2973 if (status)
3abcdeda 2974 goto msix_disable;
2243e2e9 2975
3abcdeda 2976 be_netdev_init(netdev);
6b7c5b94
SP
2977 status = register_netdev(netdev);
2978 if (status != 0)
5fb379ee 2979 goto unsetup;
63a76944 2980 netif_carrier_off(netdev);
6b7c5b94 2981
c4ca2374 2982 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 2983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
2984 return 0;
2985
5fb379ee
SP
2986unsetup:
2987 be_clear(adapter);
3abcdeda
SP
2988msix_disable:
2989 be_msix_disable(adapter);
6b7c5b94
SP
2990stats_clean:
2991 be_stats_cleanup(adapter);
2992ctrl_clean:
2993 be_ctrl_cleanup(adapter);
2994free_netdev:
ba343c77 2995 be_sriov_disable(adapter);
fe6d2a38 2996 free_netdev(netdev);
8d56ff11 2997 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
2998rel_reg:
2999 pci_release_regions(pdev);
3000disable_dev:
3001 pci_disable_device(pdev);
3002do_none:
c4ca2374 3003 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3004 return status;
3005}
3006
3007static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3008{
3009 struct be_adapter *adapter = pci_get_drvdata(pdev);
3010 struct net_device *netdev = adapter->netdev;
3011
71d8d1b5
AK
3012 if (adapter->wol)
3013 be_setup_wol(adapter, true);
3014
6b7c5b94
SP
3015 netif_device_detach(netdev);
3016 if (netif_running(netdev)) {
3017 rtnl_lock();
3018 be_close(netdev);
3019 rtnl_unlock();
3020 }
9e90c961 3021 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3022 be_clear(adapter);
6b7c5b94
SP
3023
3024 pci_save_state(pdev);
3025 pci_disable_device(pdev);
3026 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3027 return 0;
3028}
3029
3030static int be_resume(struct pci_dev *pdev)
3031{
3032 int status = 0;
3033 struct be_adapter *adapter = pci_get_drvdata(pdev);
3034 struct net_device *netdev = adapter->netdev;
3035
3036 netif_device_detach(netdev);
3037
3038 status = pci_enable_device(pdev);
3039 if (status)
3040 return status;
3041
3042 pci_set_power_state(pdev, 0);
3043 pci_restore_state(pdev);
3044
2243e2e9
SP
3045 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter);
3047 if (status)
3048 return status;
3049
9b0365f1 3050 be_setup(adapter);
6b7c5b94
SP
3051 if (netif_running(netdev)) {
3052 rtnl_lock();
3053 be_open(netdev);
3054 rtnl_unlock();
3055 }
3056 netif_device_attach(netdev);
71d8d1b5
AK
3057
3058 if (adapter->wol)
3059 be_setup_wol(adapter, false);
6b7c5b94
SP
3060 return 0;
3061}
3062
82456b03
SP
3063/*
3064 * An FLR will stop BE from DMAing any data.
3065 */
3066static void be_shutdown(struct pci_dev *pdev)
3067{
3068 struct be_adapter *adapter = pci_get_drvdata(pdev);
3069 struct net_device *netdev = adapter->netdev;
3070
3071 netif_device_detach(netdev);
3072
3073 be_cmd_reset_function(adapter);
3074
3075 if (adapter->wol)
3076 be_setup_wol(adapter, true);
3077
3078 pci_disable_device(pdev);
82456b03
SP
3079}
3080
cf588477
SP
3081static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3082 pci_channel_state_t state)
3083{
3084 struct be_adapter *adapter = pci_get_drvdata(pdev);
3085 struct net_device *netdev = adapter->netdev;
3086
3087 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3088
3089 adapter->eeh_err = true;
3090
3091 netif_device_detach(netdev);
3092
3093 if (netif_running(netdev)) {
3094 rtnl_lock();
3095 be_close(netdev);
3096 rtnl_unlock();
3097 }
3098 be_clear(adapter);
3099
3100 if (state == pci_channel_io_perm_failure)
3101 return PCI_ERS_RESULT_DISCONNECT;
3102
3103 pci_disable_device(pdev);
3104
3105 return PCI_ERS_RESULT_NEED_RESET;
3106}
3107
3108static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3109{
3110 struct be_adapter *adapter = pci_get_drvdata(pdev);
3111 int status;
3112
3113 dev_info(&adapter->pdev->dev, "EEH reset\n");
3114 adapter->eeh_err = false;
3115
3116 status = pci_enable_device(pdev);
3117 if (status)
3118 return PCI_ERS_RESULT_DISCONNECT;
3119
3120 pci_set_master(pdev);
3121 pci_set_power_state(pdev, 0);
3122 pci_restore_state(pdev);
3123
3124 /* Check if card is ok and fw is ready */
3125 status = be_cmd_POST(adapter);
3126 if (status)
3127 return PCI_ERS_RESULT_DISCONNECT;
3128
3129 return PCI_ERS_RESULT_RECOVERED;
3130}
3131
3132static void be_eeh_resume(struct pci_dev *pdev)
3133{
3134 int status = 0;
3135 struct be_adapter *adapter = pci_get_drvdata(pdev);
3136 struct net_device *netdev = adapter->netdev;
3137
3138 dev_info(&adapter->pdev->dev, "EEH resume\n");
3139
3140 pci_save_state(pdev);
3141
3142 /* tell fw we're ready to fire cmds */
3143 status = be_cmd_fw_init(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_setup(adapter);
3148 if (status)
3149 goto err;
3150
3151 if (netif_running(netdev)) {
3152 status = be_open(netdev);
3153 if (status)
3154 goto err;
3155 }
3156 netif_device_attach(netdev);
3157 return;
3158err:
3159 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3160}
3161
3162static struct pci_error_handlers be_eeh_handlers = {
3163 .error_detected = be_eeh_err_detected,
3164 .slot_reset = be_eeh_reset,
3165 .resume = be_eeh_resume,
3166};
3167
6b7c5b94
SP
3168static struct pci_driver be_driver = {
3169 .name = DRV_NAME,
3170 .id_table = be_dev_ids,
3171 .probe = be_probe,
3172 .remove = be_remove,
3173 .suspend = be_suspend,
cf588477 3174 .resume = be_resume,
82456b03 3175 .shutdown = be_shutdown,
cf588477 3176 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3177};
3178
3179static int __init be_init_module(void)
3180{
8e95a202
JP
3181 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3182 rx_frag_size != 2048) {
6b7c5b94
SP
3183 printk(KERN_WARNING DRV_NAME
3184 " : Module param rx_frag_size must be 2048/4096/8192."
3185 " Using 2048\n");
3186 rx_frag_size = 2048;
3187 }
6b7c5b94 3188
ba343c77
SB
3189 if (num_vfs > 32) {
3190 printk(KERN_WARNING DRV_NAME
3191 " : Module param num_vfs must not be greater than 32."
3192 "Using 32\n");
3193 num_vfs = 32;
3194 }
3195
6b7c5b94
SP
3196 return pci_register_driver(&be_driver);
3197}
3198module_init(be_init_module);
3199
3200static void __exit be_exit_module(void)
3201{
3202 pci_unregister_driver(&be_driver);
3203}
3204module_exit(be_exit_module);
This page took 0.815427 seconds and 5 git commands to generate.