be2net: don't rearm mcc cq when device is not open
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94
SP
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
29module_param(rx_frag_size, uint, S_IRUGO);
30MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31
6b7c5b94 32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
6b7c5b94
SP
37 { 0 }
38};
39MODULE_DEVICE_TABLE(pci, be_dev_ids);
40
41static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
42{
43 struct be_dma_mem *mem = &q->dma_mem;
44 if (mem->va)
45 pci_free_consistent(adapter->pdev, mem->size,
46 mem->va, mem->dma);
47}
48
49static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
50 u16 len, u16 entry_size)
51{
52 struct be_dma_mem *mem = &q->dma_mem;
53
54 memset(q, 0, sizeof(*q));
55 q->len = len;
56 q->entry_size = entry_size;
57 mem->size = len * entry_size;
58 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
59 if (!mem->va)
60 return -1;
61 memset(mem->va, 0, mem->size);
62 return 0;
63}
64
8788fdc2 65static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 66{
8788fdc2 67 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
68 u32 reg = ioread32(addr);
69 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 70
cf588477
SP
71 if (adapter->eeh_err)
72 return;
73
5f0b849e 74 if (!enabled && enable)
6b7c5b94 75 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 76 else if (enabled && !enable)
6b7c5b94 77 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 78 else
6b7c5b94 79 return;
5f0b849e 80
6b7c5b94
SP
81 iowrite32(reg, addr);
82}
83
8788fdc2 84static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
85{
86 u32 val = 0;
87 val |= qid & DB_RQ_RING_ID_MASK;
88 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
8788fdc2 89 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
90}
91
8788fdc2 92static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
93{
94 u32 val = 0;
95 val |= qid & DB_TXULP_RING_ID_MASK;
96 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
8788fdc2 97 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
98}
99
8788fdc2 100static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
101 bool arm, bool clear_int, u16 num_popped)
102{
103 u32 val = 0;
104 val |= qid & DB_EQ_RING_ID_MASK;
cf588477
SP
105
106 if (adapter->eeh_err)
107 return;
108
6b7c5b94
SP
109 if (arm)
110 val |= 1 << DB_EQ_REARM_SHIFT;
111 if (clear_int)
112 val |= 1 << DB_EQ_CLR_SHIFT;
113 val |= 1 << DB_EQ_EVNT_SHIFT;
114 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 115 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
116}
117
8788fdc2 118void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
119{
120 u32 val = 0;
121 val |= qid & DB_CQ_RING_ID_MASK;
cf588477
SP
122
123 if (adapter->eeh_err)
124 return;
125
6b7c5b94
SP
126 if (arm)
127 val |= 1 << DB_CQ_REARM_SHIFT;
128 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 129 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
130}
131
6b7c5b94
SP
132static int be_mac_addr_set(struct net_device *netdev, void *p)
133{
134 struct be_adapter *adapter = netdev_priv(netdev);
135 struct sockaddr *addr = p;
136 int status = 0;
137
ca9e4988
AK
138 if (!is_valid_ether_addr(addr->sa_data))
139 return -EADDRNOTAVAIL;
140
a65027e4
SP
141 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
142 if (status)
143 return status;
6b7c5b94 144
a65027e4
SP
145 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
146 adapter->if_handle, &adapter->pmac_id);
6b7c5b94
SP
147 if (!status)
148 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
149
150 return status;
151}
152
b31c50a7 153void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94
SP
154{
155 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
156 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
157 struct be_port_rxf_stats *port_stats =
158 &rxf_stats->port[adapter->port_num];
78122a52 159 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 160 struct be_erx_stats *erx_stats = &hw_stats->erx;
6b7c5b94
SP
161
162 dev_stats->rx_packets = port_stats->rx_total_frames;
163 dev_stats->tx_packets = port_stats->tx_unicastframes +
164 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
165 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
166 (u64) port_stats->rx_bytes_lsd;
167 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
168 (u64) port_stats->tx_bytes_lsd;
169
170 /* bad pkts received */
171 dev_stats->rx_errors = port_stats->rx_crc_errors +
172 port_stats->rx_alignment_symbol_errors +
173 port_stats->rx_in_range_errors +
68110868
SP
174 port_stats->rx_out_range_errors +
175 port_stats->rx_frame_too_long +
176 port_stats->rx_dropped_too_small +
177 port_stats->rx_dropped_too_short +
178 port_stats->rx_dropped_header_too_small +
179 port_stats->rx_dropped_tcp_length +
180 port_stats->rx_dropped_runt +
181 port_stats->rx_tcp_checksum_errs +
182 port_stats->rx_ip_checksum_errs +
183 port_stats->rx_udp_checksum_errs;
184
185 /* no space in linux buffers: best possible approximation */
01ed30da
SP
186 dev_stats->rx_dropped =
187 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
6b7c5b94
SP
188
189 /* detailed rx errors */
190 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
191 port_stats->rx_out_range_errors +
192 port_stats->rx_frame_too_long;
193
6b7c5b94
SP
194 /* receive ring buffer overflow */
195 dev_stats->rx_over_errors = 0;
68110868 196
6b7c5b94
SP
197 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
198
199 /* frame alignment errors */
200 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 201
6b7c5b94
SP
202 /* receiver fifo overrun */
203 /* drops_no_pbuf is no per i/f, it's per BE card */
204 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
205 port_stats->rx_input_fifo_overflow +
206 rxf_stats->rx_drops_no_pbuf;
207 /* receiver missed packetd */
208 dev_stats->rx_missed_errors = 0;
68110868
SP
209
210 /* packet transmit problems */
211 dev_stats->tx_errors = 0;
212
213 /* no space available in linux */
214 dev_stats->tx_dropped = 0;
215
c5b9b92e 216 dev_stats->multicast = port_stats->rx_multicast_frames;
68110868
SP
217 dev_stats->collisions = 0;
218
6b7c5b94
SP
219 /* detailed tx_errors */
220 dev_stats->tx_aborted_errors = 0;
221 dev_stats->tx_carrier_errors = 0;
222 dev_stats->tx_fifo_errors = 0;
223 dev_stats->tx_heartbeat_errors = 0;
224 dev_stats->tx_window_errors = 0;
225}
226
8788fdc2 227void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 228{
6b7c5b94
SP
229 struct net_device *netdev = adapter->netdev;
230
6b7c5b94 231 /* If link came up or went down */
a8f447bd 232 if (adapter->link_up != link_up) {
0dffc83e 233 adapter->link_speed = -1;
a8f447bd 234 if (link_up) {
6b7c5b94
SP
235 netif_start_queue(netdev);
236 netif_carrier_on(netdev);
237 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd
SP
238 } else {
239 netif_stop_queue(netdev);
240 netif_carrier_off(netdev);
241 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 242 }
a8f447bd 243 adapter->link_up = link_up;
6b7c5b94 244 }
6b7c5b94
SP
245}
246
247/* Update the EQ delay n BE based on the RX frags consumed / sec */
248static void be_rx_eqd_update(struct be_adapter *adapter)
249{
6b7c5b94
SP
250 struct be_eq_obj *rx_eq = &adapter->rx_eq;
251 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
4097f663
SP
252 ulong now = jiffies;
253 u32 eqd;
254
255 if (!rx_eq->enable_aic)
256 return;
257
258 /* Wrapped around */
259 if (time_before(now, stats->rx_fps_jiffies)) {
260 stats->rx_fps_jiffies = now;
261 return;
262 }
6b7c5b94
SP
263
264 /* Update once a second */
4097f663 265 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
266 return;
267
268 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
4097f663 269 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 270
4097f663 271 stats->rx_fps_jiffies = now;
6b7c5b94
SP
272 stats->be_prev_rx_frags = stats->be_rx_frags;
273 eqd = stats->be_rx_fps / 110000;
274 eqd = eqd << 3;
275 if (eqd > rx_eq->max_eqd)
276 eqd = rx_eq->max_eqd;
277 if (eqd < rx_eq->min_eqd)
278 eqd = rx_eq->min_eqd;
279 if (eqd < 10)
280 eqd = 0;
281 if (eqd != rx_eq->cur_eqd)
8788fdc2 282 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
283
284 rx_eq->cur_eqd = eqd;
285}
286
6b7c5b94
SP
287static struct net_device_stats *be_get_stats(struct net_device *dev)
288{
78122a52 289 return &dev->stats;
6b7c5b94
SP
290}
291
65f71b8b
SH
292static u32 be_calc_rate(u64 bytes, unsigned long ticks)
293{
294 u64 rate = bytes;
295
296 do_div(rate, ticks / HZ);
297 rate <<= 3; /* bytes/sec -> bits/sec */
298 do_div(rate, 1000000ul); /* MB/Sec */
299
300 return rate;
301}
302
4097f663
SP
303static void be_tx_rate_update(struct be_adapter *adapter)
304{
305 struct be_drvr_stats *stats = drvr_stats(adapter);
306 ulong now = jiffies;
307
308 /* Wrapped around? */
309 if (time_before(now, stats->be_tx_jiffies)) {
310 stats->be_tx_jiffies = now;
311 return;
312 }
313
314 /* Update tx rate once in two seconds */
315 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
316 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
317 - stats->be_tx_bytes_prev,
318 now - stats->be_tx_jiffies);
4097f663
SP
319 stats->be_tx_jiffies = now;
320 stats->be_tx_bytes_prev = stats->be_tx_bytes;
321 }
322}
323
6b7c5b94
SP
324static void be_tx_stats_update(struct be_adapter *adapter,
325 u32 wrb_cnt, u32 copied, bool stopped)
326{
4097f663 327 struct be_drvr_stats *stats = drvr_stats(adapter);
6b7c5b94
SP
328 stats->be_tx_reqs++;
329 stats->be_tx_wrbs += wrb_cnt;
330 stats->be_tx_bytes += copied;
331 if (stopped)
332 stats->be_tx_stops++;
6b7c5b94
SP
333}
334
335/* Determine number of WRB entries needed to xmit data in an skb */
336static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
337{
ebc8d2ab
DM
338 int cnt = (skb->len > skb->data_len);
339
340 cnt += skb_shinfo(skb)->nr_frags;
341
6b7c5b94
SP
342 /* to account for hdr wrb */
343 cnt++;
344 if (cnt & 1) {
345 /* add a dummy to make it an even num */
346 cnt++;
347 *dummy = true;
348 } else
349 *dummy = false;
350 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
351 return cnt;
352}
353
354static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
355{
356 wrb->frag_pa_hi = upper_32_bits(addr);
357 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
358 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
359}
360
361static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
362 bool vlan, u32 wrb_cnt, u32 len)
363{
364 memset(hdr, 0, sizeof(*hdr));
365
366 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
367
368 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
369 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
370 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
371 hdr, skb_shinfo(skb)->gso_size);
372 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
373 if (is_tcp_pkt(skb))
374 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
375 else if (is_udp_pkt(skb))
376 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
377 }
378
379 if (vlan && vlan_tx_tag_present(skb)) {
380 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
381 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
382 hdr, vlan_tx_tag_get(skb));
383 }
384
385 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
386 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
387 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
388 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
389}
390
391
392static int make_tx_wrbs(struct be_adapter *adapter,
393 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
394{
395 u64 busaddr;
396 u32 i, copied = 0;
397 struct pci_dev *pdev = adapter->pdev;
398 struct sk_buff *first_skb = skb;
399 struct be_queue_info *txq = &adapter->tx_obj.q;
400 struct be_eth_wrb *wrb;
401 struct be_eth_hdr_wrb *hdr;
402
6b7c5b94 403 hdr = queue_head_node(txq);
c190e3c8 404 atomic_add(wrb_cnt, &txq->used);
6b7c5b94
SP
405 queue_head_inc(txq);
406
ebc8d2ab
DM
407 if (skb->len > skb->data_len) {
408 int len = skb->len - skb->data_len;
a73b796e
AD
409 busaddr = pci_map_single(pdev, skb->data, len,
410 PCI_DMA_TODEVICE);
ebc8d2ab
DM
411 wrb = queue_head_node(txq);
412 wrb_fill(wrb, busaddr, len);
413 be_dws_cpu_to_le(wrb, sizeof(*wrb));
414 queue_head_inc(txq);
415 copied += len;
416 }
6b7c5b94 417
ebc8d2ab
DM
418 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
419 struct skb_frag_struct *frag =
420 &skb_shinfo(skb)->frags[i];
a73b796e
AD
421 busaddr = pci_map_page(pdev, frag->page,
422 frag->page_offset,
423 frag->size, PCI_DMA_TODEVICE);
ebc8d2ab
DM
424 wrb = queue_head_node(txq);
425 wrb_fill(wrb, busaddr, frag->size);
426 be_dws_cpu_to_le(wrb, sizeof(*wrb));
427 queue_head_inc(txq);
428 copied += frag->size;
6b7c5b94
SP
429 }
430
431 if (dummy_wrb) {
432 wrb = queue_head_node(txq);
433 wrb_fill(wrb, 0, 0);
434 be_dws_cpu_to_le(wrb, sizeof(*wrb));
435 queue_head_inc(txq);
436 }
437
438 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
439 wrb_cnt, copied);
440 be_dws_cpu_to_le(hdr, sizeof(*hdr));
441
442 return copied;
443}
444
61357325 445static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 446 struct net_device *netdev)
6b7c5b94
SP
447{
448 struct be_adapter *adapter = netdev_priv(netdev);
449 struct be_tx_obj *tx_obj = &adapter->tx_obj;
450 struct be_queue_info *txq = &tx_obj->q;
451 u32 wrb_cnt = 0, copied = 0;
452 u32 start = txq->head;
453 bool dummy_wrb, stopped = false;
454
455 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
456
457 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
458 if (copied) {
459 /* record the sent skb in the sent_skb table */
460 BUG_ON(tx_obj->sent_skb_list[start]);
461 tx_obj->sent_skb_list[start] = skb;
462
463 /* Ensure txq has space for the next skb; Else stop the queue
464 * *BEFORE* ringing the tx doorbell, so that we serialze the
465 * tx compls of the current transmit which'll wake up the queue
466 */
467 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
468 txq->len) {
469 netif_stop_queue(netdev);
470 stopped = true;
471 }
6b7c5b94 472
c190e3c8 473 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 474
c190e3c8
AK
475 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
476 } else {
477 txq->head = start;
478 dev_kfree_skb_any(skb);
6b7c5b94 479 }
6b7c5b94
SP
480 return NETDEV_TX_OK;
481}
482
483static int be_change_mtu(struct net_device *netdev, int new_mtu)
484{
485 struct be_adapter *adapter = netdev_priv(netdev);
486 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
487 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
488 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
489 dev_info(&adapter->pdev->dev,
490 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
491 BE_MIN_MTU,
492 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
493 return -EINVAL;
494 }
495 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
496 netdev->mtu, new_mtu);
497 netdev->mtu = new_mtu;
498 return 0;
499}
500
501/*
82903e4b
AK
502 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
503 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 504 */
b31c50a7 505static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 506{
6b7c5b94
SP
507 u16 vtag[BE_NUM_VLANS_SUPPORTED];
508 u16 ntags = 0, i;
82903e4b 509 int status = 0;
6b7c5b94 510
82903e4b 511 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94
SP
512 /* Construct VLAN Table to give to HW */
513 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
514 if (adapter->vlan_tag[i]) {
515 vtag[ntags] = cpu_to_le16(i);
516 ntags++;
517 }
518 }
b31c50a7
SP
519 status = be_cmd_vlan_config(adapter, adapter->if_handle,
520 vtag, ntags, 1, 0);
6b7c5b94 521 } else {
b31c50a7
SP
522 status = be_cmd_vlan_config(adapter, adapter->if_handle,
523 NULL, 0, 1, 1);
6b7c5b94 524 }
b31c50a7 525 return status;
6b7c5b94
SP
526}
527
528static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
529{
530 struct be_adapter *adapter = netdev_priv(netdev);
531 struct be_eq_obj *rx_eq = &adapter->rx_eq;
532 struct be_eq_obj *tx_eq = &adapter->tx_eq;
6b7c5b94 533
8788fdc2
SP
534 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
535 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
6b7c5b94 536 adapter->vlan_grp = grp;
8788fdc2
SP
537 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
538 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
6b7c5b94
SP
539}
540
541static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
542{
543 struct be_adapter *adapter = netdev_priv(netdev);
544
6b7c5b94 545 adapter->vlan_tag[vid] = 1;
82903e4b
AK
546 adapter->vlans_added++;
547 if (adapter->vlans_added <= (adapter->max_vlans + 1))
548 be_vid_config(adapter);
6b7c5b94
SP
549}
550
551static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
552{
553 struct be_adapter *adapter = netdev_priv(netdev);
554
6b7c5b94 555 adapter->vlan_tag[vid] = 0;
6b7c5b94 556 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
82903e4b
AK
557 adapter->vlans_added--;
558 if (adapter->vlans_added <= adapter->max_vlans)
559 be_vid_config(adapter);
6b7c5b94
SP
560}
561
24307eef 562static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
563{
564 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 565
24307eef 566 if (netdev->flags & IFF_PROMISC) {
8788fdc2 567 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
568 adapter->promiscuous = true;
569 goto done;
6b7c5b94
SP
570 }
571
24307eef
SP
572 /* BE was previously in promiscous mode; disable it */
573 if (adapter->promiscuous) {
574 adapter->promiscuous = false;
8788fdc2 575 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
576 }
577
e7b909a6 578 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
579 if (netdev->flags & IFF_ALLMULTI ||
580 netdev_mc_count(netdev) > BE_MAX_MC) {
e7b909a6
SP
581 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
582 &adapter->mc_cmd_mem);
24307eef 583 goto done;
6b7c5b94 584 }
6b7c5b94 585
8788fdc2 586 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
4cd24eaf 587 netdev_mc_count(netdev), &adapter->mc_cmd_mem);
24307eef
SP
588done:
589 return;
6b7c5b94
SP
590}
591
4097f663 592static void be_rx_rate_update(struct be_adapter *adapter)
6b7c5b94 593{
4097f663
SP
594 struct be_drvr_stats *stats = drvr_stats(adapter);
595 ulong now = jiffies;
6b7c5b94 596
4097f663
SP
597 /* Wrapped around */
598 if (time_before(now, stats->be_rx_jiffies)) {
599 stats->be_rx_jiffies = now;
600 return;
601 }
6b7c5b94
SP
602
603 /* Update the rate once in two seconds */
4097f663 604 if ((now - stats->be_rx_jiffies) < 2 * HZ)
6b7c5b94
SP
605 return;
606
65f71b8b
SH
607 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
608 - stats->be_rx_bytes_prev,
609 now - stats->be_rx_jiffies);
4097f663 610 stats->be_rx_jiffies = now;
6b7c5b94
SP
611 stats->be_rx_bytes_prev = stats->be_rx_bytes;
612}
613
4097f663
SP
614static void be_rx_stats_update(struct be_adapter *adapter,
615 u32 pktsize, u16 numfrags)
616{
617 struct be_drvr_stats *stats = drvr_stats(adapter);
618
619 stats->be_rx_compl++;
620 stats->be_rx_frags += numfrags;
621 stats->be_rx_bytes += pktsize;
622}
623
728a9972
AK
624static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
625{
626 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
627
628 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
629 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
630 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
631 if (ip_version) {
632 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
633 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
634 }
635 ipv6_chk = (ip_version && (tcpf || udpf));
636
637 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
638}
639
6b7c5b94
SP
640static struct be_rx_page_info *
641get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
642{
643 struct be_rx_page_info *rx_page_info;
644 struct be_queue_info *rxq = &adapter->rx_obj.q;
645
646 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
647 BUG_ON(!rx_page_info->page);
648
205859a2 649 if (rx_page_info->last_page_user) {
6b7c5b94
SP
650 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
651 adapter->big_page_size, PCI_DMA_FROMDEVICE);
205859a2
AK
652 rx_page_info->last_page_user = false;
653 }
6b7c5b94
SP
654
655 atomic_dec(&rxq->used);
656 return rx_page_info;
657}
658
659/* Throwaway the data in the Rx completion */
660static void be_rx_compl_discard(struct be_adapter *adapter,
661 struct be_eth_rx_compl *rxcp)
662{
663 struct be_queue_info *rxq = &adapter->rx_obj.q;
664 struct be_rx_page_info *page_info;
665 u16 rxq_idx, i, num_rcvd;
666
667 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
668 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
669
670 for (i = 0; i < num_rcvd; i++) {
671 page_info = get_rx_page_info(adapter, rxq_idx);
672 put_page(page_info->page);
673 memset(page_info, 0, sizeof(*page_info));
674 index_inc(&rxq_idx, rxq->len);
675 }
676}
677
678/*
679 * skb_fill_rx_data forms a complete skb for an ether frame
680 * indicated by rxcp.
681 */
682static void skb_fill_rx_data(struct be_adapter *adapter,
683 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
684{
685 struct be_queue_info *rxq = &adapter->rx_obj.q;
686 struct be_rx_page_info *page_info;
bd46cb6c 687 u16 rxq_idx, i, num_rcvd, j;
fa77406a 688 u32 pktsize, hdr_len, curr_frag_len, size;
6b7c5b94
SP
689 u8 *start;
690
691 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
692 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
693 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
694
695 page_info = get_rx_page_info(adapter, rxq_idx);
696
697 start = page_address(page_info->page) + page_info->page_offset;
698 prefetch(start);
699
700 /* Copy data in the first descriptor of this completion */
701 curr_frag_len = min(pktsize, rx_frag_size);
702
703 /* Copy the header portion into skb_data */
704 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
705 memcpy(skb->data, start, hdr_len);
706 skb->len = curr_frag_len;
707 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
708 /* Complete packet has now been moved to data */
709 put_page(page_info->page);
710 skb->data_len = 0;
711 skb->tail += curr_frag_len;
712 } else {
713 skb_shinfo(skb)->nr_frags = 1;
714 skb_shinfo(skb)->frags[0].page = page_info->page;
715 skb_shinfo(skb)->frags[0].page_offset =
716 page_info->page_offset + hdr_len;
717 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
718 skb->data_len = curr_frag_len - hdr_len;
719 skb->tail += hdr_len;
720 }
205859a2 721 page_info->page = NULL;
6b7c5b94
SP
722
723 if (pktsize <= rx_frag_size) {
724 BUG_ON(num_rcvd != 1);
76fbb429 725 goto done;
6b7c5b94
SP
726 }
727
728 /* More frags present for this completion */
fa77406a 729 size = pktsize;
bd46cb6c 730 for (i = 1, j = 0; i < num_rcvd; i++) {
fa77406a 731 size -= curr_frag_len;
6b7c5b94
SP
732 index_inc(&rxq_idx, rxq->len);
733 page_info = get_rx_page_info(adapter, rxq_idx);
734
fa77406a 735 curr_frag_len = min(size, rx_frag_size);
6b7c5b94 736
bd46cb6c
AK
737 /* Coalesce all frags from the same physical page in one slot */
738 if (page_info->page_offset == 0) {
739 /* Fresh page */
740 j++;
741 skb_shinfo(skb)->frags[j].page = page_info->page;
742 skb_shinfo(skb)->frags[j].page_offset =
743 page_info->page_offset;
744 skb_shinfo(skb)->frags[j].size = 0;
745 skb_shinfo(skb)->nr_frags++;
746 } else {
747 put_page(page_info->page);
748 }
749
750 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
751 skb->len += curr_frag_len;
752 skb->data_len += curr_frag_len;
6b7c5b94 753
205859a2 754 page_info->page = NULL;
6b7c5b94 755 }
bd46cb6c 756 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 757
76fbb429 758done:
4097f663 759 be_rx_stats_update(adapter, pktsize, num_rcvd);
6b7c5b94
SP
760 return;
761}
762
5be93b9a 763/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94
SP
764static void be_rx_compl_process(struct be_adapter *adapter,
765 struct be_eth_rx_compl *rxcp)
766{
767 struct sk_buff *skb;
dcb9b564
AK
768 u32 vlanf, vid;
769 u8 vtm;
6b7c5b94 770
89d71a66 771 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
a058a632 772 if (unlikely(!skb)) {
6b7c5b94
SP
773 if (net_ratelimit())
774 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
775 be_rx_compl_discard(adapter, rxcp);
776 return;
777 }
778
6b7c5b94
SP
779 skb_fill_rx_data(adapter, skb, rxcp);
780
728a9972 781 if (do_pkt_csum(rxcp, adapter->rx_csum))
6b7c5b94 782 skb->ip_summed = CHECKSUM_NONE;
728a9972
AK
783 else
784 skb->ip_summed = CHECKSUM_UNNECESSARY;
6b7c5b94
SP
785
786 skb->truesize = skb->len + sizeof(struct sk_buff);
787 skb->protocol = eth_type_trans(skb, adapter->netdev);
788 skb->dev = adapter->netdev;
789
a058a632
SP
790 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
791 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
792
793 /* vlanf could be wrongly set in some cards.
794 * ignore if vtm is not set */
795 if ((adapter->cap & 0x400) && !vtm)
796 vlanf = 0;
797
798 if (unlikely(vlanf)) {
82903e4b 799 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
800 kfree_skb(skb);
801 return;
802 }
803 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
804 vid = be16_to_cpu(vid);
805 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
806 } else {
807 netif_receive_skb(skb);
808 }
809
6b7c5b94
SP
810 return;
811}
812
5be93b9a
AK
813/* Process the RX completion indicated by rxcp when GRO is enabled */
814static void be_rx_compl_process_gro(struct be_adapter *adapter,
6b7c5b94
SP
815 struct be_eth_rx_compl *rxcp)
816{
817 struct be_rx_page_info *page_info;
5be93b9a 818 struct sk_buff *skb = NULL;
6b7c5b94 819 struct be_queue_info *rxq = &adapter->rx_obj.q;
5be93b9a 820 struct be_eq_obj *eq_obj = &adapter->rx_eq;
6b7c5b94 821 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
bd46cb6c 822 u16 i, rxq_idx = 0, vid, j;
dcb9b564 823 u8 vtm;
6b7c5b94
SP
824
825 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
826 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
827 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
828 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
dcb9b564
AK
829 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
830
831 /* vlanf could be wrongly set in some cards.
832 * ignore if vtm is not set */
e1187b3b 833 if ((adapter->cap & 0x400) && !vtm)
dcb9b564 834 vlanf = 0;
6b7c5b94 835
5be93b9a
AK
836 skb = napi_get_frags(&eq_obj->napi);
837 if (!skb) {
838 be_rx_compl_discard(adapter, rxcp);
839 return;
840 }
841
6b7c5b94 842 remaining = pkt_size;
bd46cb6c 843 for (i = 0, j = -1; i < num_rcvd; i++) {
6b7c5b94
SP
844 page_info = get_rx_page_info(adapter, rxq_idx);
845
846 curr_frag_len = min(remaining, rx_frag_size);
847
bd46cb6c
AK
848 /* Coalesce all frags from the same physical page in one slot */
849 if (i == 0 || page_info->page_offset == 0) {
850 /* First frag or Fresh page */
851 j++;
5be93b9a
AK
852 skb_shinfo(skb)->frags[j].page = page_info->page;
853 skb_shinfo(skb)->frags[j].page_offset =
854 page_info->page_offset;
855 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
856 } else {
857 put_page(page_info->page);
858 }
5be93b9a 859 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 860
bd46cb6c 861 remaining -= curr_frag_len;
6b7c5b94 862 index_inc(&rxq_idx, rxq->len);
6b7c5b94
SP
863 memset(page_info, 0, sizeof(*page_info));
864 }
bd46cb6c 865 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 866
5be93b9a
AK
867 skb_shinfo(skb)->nr_frags = j + 1;
868 skb->len = pkt_size;
869 skb->data_len = pkt_size;
870 skb->truesize += pkt_size;
871 skb->ip_summed = CHECKSUM_UNNECESSARY;
872
6b7c5b94 873 if (likely(!vlanf)) {
5be93b9a 874 napi_gro_frags(&eq_obj->napi);
6b7c5b94
SP
875 } else {
876 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
877 vid = be16_to_cpu(vid);
878
82903e4b 879 if (!adapter->vlan_grp || adapter->vlans_added == 0)
6b7c5b94
SP
880 return;
881
5be93b9a 882 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
6b7c5b94
SP
883 }
884
4097f663 885 be_rx_stats_update(adapter, pkt_size, num_rcvd);
6b7c5b94
SP
886 return;
887}
888
889static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
890{
891 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
892
893 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
894 return NULL;
895
896 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
897
6b7c5b94
SP
898 queue_tail_inc(&adapter->rx_obj.cq);
899 return rxcp;
900}
901
a7a0ef31
SP
902/* To reset the valid bit, we need to reset the whole word as
903 * when walking the queue the valid entries are little-endian
904 * and invalid entries are host endian
905 */
906static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
907{
908 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
909}
910
6b7c5b94
SP
911static inline struct page *be_alloc_pages(u32 size)
912{
913 gfp_t alloc_flags = GFP_ATOMIC;
914 u32 order = get_order(size);
915 if (order > 0)
916 alloc_flags |= __GFP_COMP;
917 return alloc_pages(alloc_flags, order);
918}
919
920/*
921 * Allocate a page, split it to fragments of size rx_frag_size and post as
922 * receive buffers to BE
923 */
924static void be_post_rx_frags(struct be_adapter *adapter)
925{
926 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
26d92f92 927 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
6b7c5b94
SP
928 struct be_queue_info *rxq = &adapter->rx_obj.q;
929 struct page *pagep = NULL;
930 struct be_eth_rx_d *rxd;
931 u64 page_dmaaddr = 0, frag_dmaaddr;
932 u32 posted, page_offset = 0;
933
6b7c5b94
SP
934 page_info = &page_info_tbl[rxq->head];
935 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
936 if (!pagep) {
937 pagep = be_alloc_pages(adapter->big_page_size);
938 if (unlikely(!pagep)) {
939 drvr_stats(adapter)->be_ethrx_post_fail++;
940 break;
941 }
942 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
943 adapter->big_page_size,
944 PCI_DMA_FROMDEVICE);
945 page_info->page_offset = 0;
946 } else {
947 get_page(pagep);
948 page_info->page_offset = page_offset + rx_frag_size;
949 }
950 page_offset = page_info->page_offset;
951 page_info->page = pagep;
952 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
953 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
954
955 rxd = queue_head_node(rxq);
956 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
957 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
958
959 /* Any space left in the current big page for another frag? */
960 if ((page_offset + rx_frag_size + rx_frag_size) >
961 adapter->big_page_size) {
962 pagep = NULL;
963 page_info->last_page_user = true;
964 }
26d92f92
SP
965
966 prev_page_info = page_info;
967 queue_head_inc(rxq);
6b7c5b94
SP
968 page_info = &page_info_tbl[rxq->head];
969 }
970 if (pagep)
26d92f92 971 prev_page_info->last_page_user = true;
6b7c5b94
SP
972
973 if (posted) {
6b7c5b94 974 atomic_add(posted, &rxq->used);
8788fdc2 975 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
976 } else if (atomic_read(&rxq->used) == 0) {
977 /* Let be_worker replenish when memory is available */
978 adapter->rx_post_starved = true;
6b7c5b94
SP
979 }
980
981 return;
982}
983
5fb379ee 984static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 985{
6b7c5b94
SP
986 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
987
988 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
989 return NULL;
990
991 be_dws_le_to_cpu(txcp, sizeof(*txcp));
992
993 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
994
995 queue_tail_inc(tx_cq);
996 return txcp;
997}
998
999static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1000{
1001 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1002 struct be_eth_wrb *wrb;
6b7c5b94
SP
1003 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1004 struct sk_buff *sent_skb;
a73b796e 1005 u64 busaddr;
6b7c5b94
SP
1006 u16 cur_index, num_wrbs = 0;
1007
1008 cur_index = txq->tail;
1009 sent_skb = sent_skbs[cur_index];
1010 BUG_ON(!sent_skb);
1011 sent_skbs[cur_index] = NULL;
a73b796e
AD
1012 wrb = queue_tail_node(txq);
1013 be_dws_le_to_cpu(wrb, sizeof(*wrb));
1014 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1015 if (busaddr != 0) {
1016 pci_unmap_single(adapter->pdev, busaddr,
1017 wrb->frag_len, PCI_DMA_TODEVICE);
1018 }
1019 num_wrbs++;
1020 queue_tail_inc(txq);
6b7c5b94 1021
a73b796e 1022 while (cur_index != last_index) {
6b7c5b94 1023 cur_index = txq->tail;
a73b796e
AD
1024 wrb = queue_tail_node(txq);
1025 be_dws_le_to_cpu(wrb, sizeof(*wrb));
1026 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1027 if (busaddr != 0) {
1028 pci_unmap_page(adapter->pdev, busaddr,
1029 wrb->frag_len, PCI_DMA_TODEVICE);
1030 }
6b7c5b94
SP
1031 num_wrbs++;
1032 queue_tail_inc(txq);
a73b796e 1033 }
6b7c5b94
SP
1034
1035 atomic_sub(num_wrbs, &txq->used);
a73b796e 1036
6b7c5b94
SP
1037 kfree_skb(sent_skb);
1038}
1039
859b1e4e
SP
1040static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1041{
1042 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1043
1044 if (!eqe->evt)
1045 return NULL;
1046
1047 eqe->evt = le32_to_cpu(eqe->evt);
1048 queue_tail_inc(&eq_obj->q);
1049 return eqe;
1050}
1051
1052static int event_handle(struct be_adapter *adapter,
1053 struct be_eq_obj *eq_obj)
1054{
1055 struct be_eq_entry *eqe;
1056 u16 num = 0;
1057
1058 while ((eqe = event_get(eq_obj)) != NULL) {
1059 eqe->evt = 0;
1060 num++;
1061 }
1062
1063 /* Deal with any spurious interrupts that come
1064 * without events
1065 */
1066 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1067 if (num)
1068 napi_schedule(&eq_obj->napi);
1069
1070 return num;
1071}
1072
1073/* Just read and notify events without processing them.
1074 * Used at the time of destroying event queues */
1075static void be_eq_clean(struct be_adapter *adapter,
1076 struct be_eq_obj *eq_obj)
1077{
1078 struct be_eq_entry *eqe;
1079 u16 num = 0;
1080
1081 while ((eqe = event_get(eq_obj)) != NULL) {
1082 eqe->evt = 0;
1083 num++;
1084 }
1085
1086 if (num)
1087 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1088}
1089
6b7c5b94
SP
1090static void be_rx_q_clean(struct be_adapter *adapter)
1091{
1092 struct be_rx_page_info *page_info;
1093 struct be_queue_info *rxq = &adapter->rx_obj.q;
1094 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1095 struct be_eth_rx_compl *rxcp;
1096 u16 tail;
1097
1098 /* First cleanup pending rx completions */
1099 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1100 be_rx_compl_discard(adapter, rxcp);
a7a0ef31 1101 be_rx_compl_reset(rxcp);
8788fdc2 1102 be_cq_notify(adapter, rx_cq->id, true, 1);
6b7c5b94
SP
1103 }
1104
1105 /* Then free posted rx buffer that were not used */
1106 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1107 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
6b7c5b94
SP
1108 page_info = get_rx_page_info(adapter, tail);
1109 put_page(page_info->page);
1110 memset(page_info, 0, sizeof(*page_info));
1111 }
1112 BUG_ON(atomic_read(&rxq->used));
1113}
1114
a8e9179a 1115static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1116{
a8e9179a 1117 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1118 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1119 struct be_eth_tx_compl *txcp;
1120 u16 end_idx, cmpl = 0, timeo = 0;
1121
1122 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1123 do {
1124 while ((txcp = be_tx_compl_get(tx_cq))) {
1125 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1126 wrb_index, txcp);
1127 be_tx_compl_process(adapter, end_idx);
1128 cmpl++;
1129 }
1130 if (cmpl) {
1131 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1132 cmpl = 0;
1133 }
1134
1135 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1136 break;
1137
1138 mdelay(1);
1139 } while (true);
1140
1141 if (atomic_read(&txq->used))
1142 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1143 atomic_read(&txq->used));
6b7c5b94
SP
1144}
1145
5fb379ee
SP
1146static void be_mcc_queues_destroy(struct be_adapter *adapter)
1147{
1148 struct be_queue_info *q;
5fb379ee 1149
8788fdc2 1150 q = &adapter->mcc_obj.q;
5fb379ee 1151 if (q->created)
8788fdc2 1152 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1153 be_queue_free(adapter, q);
1154
8788fdc2 1155 q = &adapter->mcc_obj.cq;
5fb379ee 1156 if (q->created)
8788fdc2 1157 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1158 be_queue_free(adapter, q);
1159}
1160
1161/* Must be called only after TX qs are created as MCC shares TX EQ */
1162static int be_mcc_queues_create(struct be_adapter *adapter)
1163{
1164 struct be_queue_info *q, *cq;
5fb379ee
SP
1165
1166 /* Alloc MCC compl queue */
8788fdc2 1167 cq = &adapter->mcc_obj.cq;
5fb379ee 1168 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1169 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1170 goto err;
1171
1172 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1173 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1174 goto mcc_cq_free;
1175
1176 /* Alloc MCC queue */
8788fdc2 1177 q = &adapter->mcc_obj.q;
5fb379ee
SP
1178 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1179 goto mcc_cq_destroy;
1180
1181 /* Ask BE to create MCC queue */
8788fdc2 1182 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1183 goto mcc_q_free;
1184
1185 return 0;
1186
1187mcc_q_free:
1188 be_queue_free(adapter, q);
1189mcc_cq_destroy:
8788fdc2 1190 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1191mcc_cq_free:
1192 be_queue_free(adapter, cq);
1193err:
1194 return -1;
1195}
1196
6b7c5b94
SP
1197static void be_tx_queues_destroy(struct be_adapter *adapter)
1198{
1199 struct be_queue_info *q;
1200
1201 q = &adapter->tx_obj.q;
a8e9179a 1202 if (q->created)
8788fdc2 1203 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1204 be_queue_free(adapter, q);
1205
1206 q = &adapter->tx_obj.cq;
1207 if (q->created)
8788fdc2 1208 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1209 be_queue_free(adapter, q);
1210
859b1e4e
SP
1211 /* Clear any residual events */
1212 be_eq_clean(adapter, &adapter->tx_eq);
1213
6b7c5b94
SP
1214 q = &adapter->tx_eq.q;
1215 if (q->created)
8788fdc2 1216 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1217 be_queue_free(adapter, q);
1218}
1219
1220static int be_tx_queues_create(struct be_adapter *adapter)
1221{
1222 struct be_queue_info *eq, *q, *cq;
1223
1224 adapter->tx_eq.max_eqd = 0;
1225 adapter->tx_eq.min_eqd = 0;
1226 adapter->tx_eq.cur_eqd = 96;
1227 adapter->tx_eq.enable_aic = false;
1228 /* Alloc Tx Event queue */
1229 eq = &adapter->tx_eq.q;
1230 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1231 return -1;
1232
1233 /* Ask BE to create Tx Event queue */
8788fdc2 1234 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94
SP
1235 goto tx_eq_free;
1236 /* Alloc TX eth compl queue */
1237 cq = &adapter->tx_obj.cq;
1238 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1239 sizeof(struct be_eth_tx_compl)))
1240 goto tx_eq_destroy;
1241
1242 /* Ask BE to create Tx eth compl queue */
8788fdc2 1243 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1244 goto tx_cq_free;
1245
1246 /* Alloc TX eth queue */
1247 q = &adapter->tx_obj.q;
1248 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1249 goto tx_cq_destroy;
1250
1251 /* Ask BE to create Tx eth queue */
8788fdc2 1252 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1253 goto tx_q_free;
1254 return 0;
1255
1256tx_q_free:
1257 be_queue_free(adapter, q);
1258tx_cq_destroy:
8788fdc2 1259 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1260tx_cq_free:
1261 be_queue_free(adapter, cq);
1262tx_eq_destroy:
8788fdc2 1263 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1264tx_eq_free:
1265 be_queue_free(adapter, eq);
1266 return -1;
1267}
1268
1269static void be_rx_queues_destroy(struct be_adapter *adapter)
1270{
1271 struct be_queue_info *q;
1272
1273 q = &adapter->rx_obj.q;
1274 if (q->created) {
8788fdc2 1275 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
6b7c5b94
SP
1276 be_rx_q_clean(adapter);
1277 }
1278 be_queue_free(adapter, q);
1279
1280 q = &adapter->rx_obj.cq;
1281 if (q->created)
8788fdc2 1282 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1283 be_queue_free(adapter, q);
1284
859b1e4e
SP
1285 /* Clear any residual events */
1286 be_eq_clean(adapter, &adapter->rx_eq);
1287
6b7c5b94
SP
1288 q = &adapter->rx_eq.q;
1289 if (q->created)
8788fdc2 1290 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1291 be_queue_free(adapter, q);
1292}
1293
1294static int be_rx_queues_create(struct be_adapter *adapter)
1295{
1296 struct be_queue_info *eq, *q, *cq;
1297 int rc;
1298
6b7c5b94
SP
1299 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1300 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1301 adapter->rx_eq.min_eqd = 0;
1302 adapter->rx_eq.cur_eqd = 0;
1303 adapter->rx_eq.enable_aic = true;
1304
1305 /* Alloc Rx Event queue */
1306 eq = &adapter->rx_eq.q;
1307 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1308 sizeof(struct be_eq_entry));
1309 if (rc)
1310 return rc;
1311
1312 /* Ask BE to create Rx Event queue */
8788fdc2 1313 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
6b7c5b94
SP
1314 if (rc)
1315 goto rx_eq_free;
1316
1317 /* Alloc RX eth compl queue */
1318 cq = &adapter->rx_obj.cq;
1319 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1320 sizeof(struct be_eth_rx_compl));
1321 if (rc)
1322 goto rx_eq_destroy;
1323
1324 /* Ask BE to create Rx eth compl queue */
8788fdc2 1325 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
6b7c5b94
SP
1326 if (rc)
1327 goto rx_cq_free;
1328
1329 /* Alloc RX eth queue */
1330 q = &adapter->rx_obj.q;
1331 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1332 if (rc)
1333 goto rx_cq_destroy;
1334
1335 /* Ask BE to create Rx eth queue */
8788fdc2 1336 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
6b7c5b94
SP
1337 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1338 if (rc)
1339 goto rx_q_free;
1340
1341 return 0;
1342rx_q_free:
1343 be_queue_free(adapter, q);
1344rx_cq_destroy:
8788fdc2 1345 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1346rx_cq_free:
1347 be_queue_free(adapter, cq);
1348rx_eq_destroy:
8788fdc2 1349 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1350rx_eq_free:
1351 be_queue_free(adapter, eq);
1352 return rc;
1353}
6b7c5b94 1354
b628bde2
SP
1355/* There are 8 evt ids per func. Retruns the evt id's bit number */
1356static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1357{
1358 return eq_id - 8 * be_pci_func(adapter);
1359}
1360
6b7c5b94
SP
1361static irqreturn_t be_intx(int irq, void *dev)
1362{
1363 struct be_adapter *adapter = dev;
8788fdc2 1364 int isr;
6b7c5b94 1365
8788fdc2 1366 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
55bdeed9 1367 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
c001c213 1368 if (!isr)
8788fdc2 1369 return IRQ_NONE;
6b7c5b94 1370
8788fdc2
SP
1371 event_handle(adapter, &adapter->tx_eq);
1372 event_handle(adapter, &adapter->rx_eq);
c001c213 1373
8788fdc2 1374 return IRQ_HANDLED;
6b7c5b94
SP
1375}
1376
1377static irqreturn_t be_msix_rx(int irq, void *dev)
1378{
1379 struct be_adapter *adapter = dev;
1380
8788fdc2 1381 event_handle(adapter, &adapter->rx_eq);
6b7c5b94
SP
1382
1383 return IRQ_HANDLED;
1384}
1385
5fb379ee 1386static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1387{
1388 struct be_adapter *adapter = dev;
1389
8788fdc2 1390 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1391
1392 return IRQ_HANDLED;
1393}
1394
5be93b9a 1395static inline bool do_gro(struct be_adapter *adapter,
6b7c5b94
SP
1396 struct be_eth_rx_compl *rxcp)
1397{
1398 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1399 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1400
1401 if (err)
1402 drvr_stats(adapter)->be_rxcp_err++;
1403
5be93b9a 1404 return (tcp_frame && !err) ? true : false;
6b7c5b94
SP
1405}
1406
1407int be_poll_rx(struct napi_struct *napi, int budget)
1408{
1409 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1410 struct be_adapter *adapter =
1411 container_of(rx_eq, struct be_adapter, rx_eq);
1412 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1413 struct be_eth_rx_compl *rxcp;
1414 u32 work_done;
1415
b7b83ac3 1416 adapter->stats.drvr_stats.be_rx_polls++;
6b7c5b94
SP
1417 for (work_done = 0; work_done < budget; work_done++) {
1418 rxcp = be_rx_compl_get(adapter);
1419 if (!rxcp)
1420 break;
1421
5be93b9a
AK
1422 if (do_gro(adapter, rxcp))
1423 be_rx_compl_process_gro(adapter, rxcp);
6b7c5b94
SP
1424 else
1425 be_rx_compl_process(adapter, rxcp);
a7a0ef31
SP
1426
1427 be_rx_compl_reset(rxcp);
6b7c5b94
SP
1428 }
1429
6b7c5b94
SP
1430 /* Refill the queue */
1431 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1432 be_post_rx_frags(adapter);
1433
1434 /* All consumed */
1435 if (work_done < budget) {
1436 napi_complete(napi);
8788fdc2 1437 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1438 } else {
1439 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1440 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1441 }
1442 return work_done;
1443}
1444
5fb379ee 1445void be_process_tx(struct be_adapter *adapter)
6b7c5b94 1446{
5fb379ee
SP
1447 struct be_queue_info *txq = &adapter->tx_obj.q;
1448 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94
SP
1449 struct be_eth_tx_compl *txcp;
1450 u32 num_cmpl = 0;
1451 u16 end_idx;
1452
5fb379ee 1453 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94
SP
1454 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1455 wrb_index, txcp);
1456 be_tx_compl_process(adapter, end_idx);
1457 num_cmpl++;
1458 }
1459
5fb379ee 1460 if (num_cmpl) {
8788fdc2 1461 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
5fb379ee
SP
1462
1463 /* As Tx wrbs have been freed up, wake up netdev queue if
1464 * it was stopped due to lack of tx wrbs.
1465 */
1466 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1467 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1468 netif_wake_queue(adapter->netdev);
1469 }
1470
1471 drvr_stats(adapter)->be_tx_events++;
1472 drvr_stats(adapter)->be_tx_compl += num_cmpl;
6b7c5b94 1473 }
5fb379ee
SP
1474}
1475
1476/* As TX and MCC share the same EQ check for both TX and MCC completions.
1477 * For TX/MCC we don't honour budget; consume everything
1478 */
1479static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1480{
1481 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1482 struct be_adapter *adapter =
1483 container_of(tx_eq, struct be_adapter, tx_eq);
6b7c5b94
SP
1484
1485 napi_complete(napi);
1486
5fb379ee 1487 be_process_tx(adapter);
6b7c5b94 1488
8788fdc2 1489 be_process_mcc(adapter);
6b7c5b94
SP
1490
1491 return 1;
1492}
1493
ea1dae11
SP
1494static void be_worker(struct work_struct *work)
1495{
1496 struct be_adapter *adapter =
1497 container_of(work, struct be_adapter, work.work);
ea1dae11 1498
b31c50a7 1499 be_cmd_get_stats(adapter, &adapter->stats.cmd);
ea1dae11
SP
1500
1501 /* Set EQ delay */
1502 be_rx_eqd_update(adapter);
1503
4097f663
SP
1504 be_tx_rate_update(adapter);
1505 be_rx_rate_update(adapter);
1506
ea1dae11
SP
1507 if (adapter->rx_post_starved) {
1508 adapter->rx_post_starved = false;
1509 be_post_rx_frags(adapter);
1510 }
1511
1512 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1513}
1514
8d56ff11
SP
1515static void be_msix_disable(struct be_adapter *adapter)
1516{
1517 if (adapter->msix_enabled) {
1518 pci_disable_msix(adapter->pdev);
1519 adapter->msix_enabled = false;
1520 }
1521}
1522
6b7c5b94
SP
1523static void be_msix_enable(struct be_adapter *adapter)
1524{
1525 int i, status;
1526
1527 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1528 adapter->msix_entries[i].entry = i;
1529
1530 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1531 BE_NUM_MSIX_VECTORS);
1532 if (status == 0)
1533 adapter->msix_enabled = true;
1534 return;
1535}
1536
1537static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1538{
b628bde2
SP
1539 return adapter->msix_entries[
1540 be_evt_bit_get(adapter, eq_id)].vector;
6b7c5b94
SP
1541}
1542
b628bde2
SP
1543static int be_request_irq(struct be_adapter *adapter,
1544 struct be_eq_obj *eq_obj,
1545 void *handler, char *desc)
6b7c5b94
SP
1546{
1547 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1548 int vec;
1549
1550 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1551 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1552 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1553}
1554
1555static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1556{
1557 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1558 free_irq(vec, adapter);
1559}
6b7c5b94 1560
b628bde2
SP
1561static int be_msix_register(struct be_adapter *adapter)
1562{
1563 int status;
1564
1565 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
6b7c5b94
SP
1566 if (status)
1567 goto err;
1568
b628bde2
SP
1569 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1570 if (status)
1571 goto free_tx_irq;
1572
6b7c5b94 1573 return 0;
b628bde2
SP
1574
1575free_tx_irq:
1576 be_free_irq(adapter, &adapter->tx_eq);
6b7c5b94
SP
1577err:
1578 dev_warn(&adapter->pdev->dev,
1579 "MSIX Request IRQ failed - err %d\n", status);
1580 pci_disable_msix(adapter->pdev);
1581 adapter->msix_enabled = false;
1582 return status;
1583}
1584
1585static int be_irq_register(struct be_adapter *adapter)
1586{
1587 struct net_device *netdev = adapter->netdev;
1588 int status;
1589
1590 if (adapter->msix_enabled) {
1591 status = be_msix_register(adapter);
1592 if (status == 0)
1593 goto done;
1594 }
1595
1596 /* INTx */
1597 netdev->irq = adapter->pdev->irq;
1598 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1599 adapter);
1600 if (status) {
1601 dev_err(&adapter->pdev->dev,
1602 "INTx request IRQ failed - err %d\n", status);
1603 return status;
1604 }
1605done:
1606 adapter->isr_registered = true;
1607 return 0;
1608}
1609
1610static void be_irq_unregister(struct be_adapter *adapter)
1611{
1612 struct net_device *netdev = adapter->netdev;
6b7c5b94
SP
1613
1614 if (!adapter->isr_registered)
1615 return;
1616
1617 /* INTx */
1618 if (!adapter->msix_enabled) {
1619 free_irq(netdev->irq, adapter);
1620 goto done;
1621 }
1622
1623 /* MSIx */
b628bde2
SP
1624 be_free_irq(adapter, &adapter->tx_eq);
1625 be_free_irq(adapter, &adapter->rx_eq);
6b7c5b94
SP
1626done:
1627 adapter->isr_registered = false;
1628 return;
1629}
1630
1631static int be_open(struct net_device *netdev)
1632{
1633 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94
SP
1634 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1635 struct be_eq_obj *tx_eq = &adapter->tx_eq;
a8f447bd
SP
1636 bool link_up;
1637 int status;
0388f251
SB
1638 u8 mac_speed;
1639 u16 link_speed;
5fb379ee
SP
1640
1641 /* First time posting */
1642 be_post_rx_frags(adapter);
1643
1644 napi_enable(&rx_eq->napi);
1645 napi_enable(&tx_eq->napi);
1646
1647 be_irq_register(adapter);
1648
8788fdc2 1649 be_intr_set(adapter, true);
5fb379ee
SP
1650
1651 /* The evt queues are created in unarmed state; arm them */
8788fdc2
SP
1652 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1653 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee
SP
1654
1655 /* Rx compl queue may be in unarmed state; rearm it */
8788fdc2 1656 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
5fb379ee 1657
7a1e9b20
SP
1658 /* Now that interrupts are on we can process async mcc */
1659 be_async_mcc_enable(adapter);
1660
0388f251
SB
1661 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1662 &link_speed);
a8f447bd 1663 if (status)
4f2aa89c 1664 goto ret_sts;
a8f447bd 1665 be_link_status_update(adapter, link_up);
5fb379ee 1666
4f2aa89c
AK
1667 status = be_vid_config(adapter);
1668 if (status)
1669 goto ret_sts;
1670
1671 status = be_cmd_set_flow_control(adapter,
1672 adapter->tx_fc, adapter->rx_fc);
1673 if (status)
1674 goto ret_sts;
1675
5fb379ee 1676 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
4f2aa89c
AK
1677ret_sts:
1678 return status;
5fb379ee
SP
1679}
1680
71d8d1b5
AK
1681static int be_setup_wol(struct be_adapter *adapter, bool enable)
1682{
1683 struct be_dma_mem cmd;
1684 int status = 0;
1685 u8 mac[ETH_ALEN];
1686
1687 memset(mac, 0, ETH_ALEN);
1688
1689 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
1690 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1691 if (cmd.va == NULL)
1692 return -1;
1693 memset(cmd.va, 0, cmd.size);
1694
1695 if (enable) {
1696 status = pci_write_config_dword(adapter->pdev,
1697 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
1698 if (status) {
1699 dev_err(&adapter->pdev->dev,
1700 "Could not enable Wake-on-lan \n");
1701 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
1702 cmd.dma);
1703 return status;
1704 }
1705 status = be_cmd_enable_magic_wol(adapter,
1706 adapter->netdev->dev_addr, &cmd);
1707 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
1708 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
1709 } else {
1710 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
1711 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
1712 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
1713 }
1714
1715 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1716 return status;
1717}
1718
5fb379ee
SP
1719static int be_setup(struct be_adapter *adapter)
1720{
5fb379ee 1721 struct net_device *netdev = adapter->netdev;
73d540f2 1722 u32 cap_flags, en_flags;
6b7c5b94
SP
1723 int status;
1724
73d540f2
SP
1725 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1726 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1727 BE_IF_FLAGS_PROMISCUOUS |
1728 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1729 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1730 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1731
1732 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1733 netdev->dev_addr, false/* pmac_invalid */,
1734 &adapter->if_handle, &adapter->pmac_id);
6b7c5b94
SP
1735 if (status != 0)
1736 goto do_none;
1737
6b7c5b94
SP
1738 status = be_tx_queues_create(adapter);
1739 if (status != 0)
1740 goto if_destroy;
1741
1742 status = be_rx_queues_create(adapter);
1743 if (status != 0)
1744 goto tx_qs_destroy;
1745
5fb379ee
SP
1746 status = be_mcc_queues_create(adapter);
1747 if (status != 0)
1748 goto rx_qs_destroy;
6b7c5b94 1749
0dffc83e
AK
1750 adapter->link_speed = -1;
1751
6b7c5b94
SP
1752 return 0;
1753
5fb379ee
SP
1754rx_qs_destroy:
1755 be_rx_queues_destroy(adapter);
6b7c5b94
SP
1756tx_qs_destroy:
1757 be_tx_queues_destroy(adapter);
1758if_destroy:
8788fdc2 1759 be_cmd_if_destroy(adapter, adapter->if_handle);
6b7c5b94
SP
1760do_none:
1761 return status;
1762}
1763
5fb379ee
SP
1764static int be_clear(struct be_adapter *adapter)
1765{
1a8887d8 1766 be_mcc_queues_destroy(adapter);
5fb379ee
SP
1767 be_rx_queues_destroy(adapter);
1768 be_tx_queues_destroy(adapter);
1769
8788fdc2 1770 be_cmd_if_destroy(adapter, adapter->if_handle);
5fb379ee 1771
2243e2e9
SP
1772 /* tell fw we're done with firing cmds */
1773 be_cmd_fw_clean(adapter);
5fb379ee
SP
1774 return 0;
1775}
1776
6b7c5b94
SP
1777static int be_close(struct net_device *netdev)
1778{
1779 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94
SP
1780 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1781 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1782 int vec;
1783
b305be78 1784 cancel_delayed_work_sync(&adapter->work);
6b7c5b94 1785
7a1e9b20
SP
1786 be_async_mcc_disable(adapter);
1787
6b7c5b94
SP
1788 netif_stop_queue(netdev);
1789 netif_carrier_off(netdev);
a8f447bd 1790 adapter->link_up = false;
6b7c5b94 1791
8788fdc2 1792 be_intr_set(adapter, false);
6b7c5b94
SP
1793
1794 if (adapter->msix_enabled) {
1795 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1796 synchronize_irq(vec);
1797 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1798 synchronize_irq(vec);
1799 } else {
1800 synchronize_irq(netdev->irq);
1801 }
1802 be_irq_unregister(adapter);
1803
1804 napi_disable(&rx_eq->napi);
1805 napi_disable(&tx_eq->napi);
1806
a8e9179a
SP
1807 /* Wait for all pending tx completions to arrive so that
1808 * all tx skbs are freed.
1809 */
1810 be_tx_compl_clean(adapter);
1811
6b7c5b94
SP
1812 return 0;
1813}
1814
84517482
AK
1815#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1816char flash_cookie[2][16] = {"*** SE FLAS",
1817 "H DIRECTORY *** "};
fa9a6fed
SB
1818
1819static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
1820 const u8 *p, u32 img_start, int image_size,
1821 int hdr_size)
fa9a6fed
SB
1822{
1823 u32 crc_offset;
1824 u8 flashed_crc[4];
1825 int status;
3f0d4560
AK
1826
1827 crc_offset = hdr_size + img_start + image_size - 4;
1828
fa9a6fed 1829 p += crc_offset;
3f0d4560
AK
1830
1831 status = be_cmd_get_flash_crc(adapter, flashed_crc,
1832 (img_start + image_size - 4));
fa9a6fed
SB
1833 if (status) {
1834 dev_err(&adapter->pdev->dev,
1835 "could not get crc from flash, not flashing redboot\n");
1836 return false;
1837 }
1838
1839 /*update redboot only if crc does not match*/
1840 if (!memcmp(flashed_crc, p, 4))
1841 return false;
1842 else
1843 return true;
fa9a6fed
SB
1844}
1845
3f0d4560 1846static int be_flash_data(struct be_adapter *adapter,
84517482 1847 const struct firmware *fw,
3f0d4560
AK
1848 struct be_dma_mem *flash_cmd, int num_of_images)
1849
84517482 1850{
3f0d4560
AK
1851 int status = 0, i, filehdr_size = 0;
1852 u32 total_bytes = 0, flash_op;
84517482
AK
1853 int num_bytes;
1854 const u8 *p = fw->data;
1855 struct be_cmd_write_flashrom *req = flash_cmd->va;
3f0d4560
AK
1856 struct flash_comp *pflashcomp;
1857
1858 struct flash_comp gen3_flash_types[8] = {
1859 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
1860 FLASH_IMAGE_MAX_SIZE_g3},
1861 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
1862 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
1863 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
1864 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1865 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
1866 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1867 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
1868 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1869 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
1870 FLASH_IMAGE_MAX_SIZE_g3},
1871 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
1872 FLASH_IMAGE_MAX_SIZE_g3},
1873 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
1874 FLASH_IMAGE_MAX_SIZE_g3}
1875 };
1876 struct flash_comp gen2_flash_types[8] = {
1877 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
1878 FLASH_IMAGE_MAX_SIZE_g2},
1879 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
1880 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
1881 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
1882 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1883 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
1884 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1885 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
1886 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1887 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
1888 FLASH_IMAGE_MAX_SIZE_g2},
1889 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
1890 FLASH_IMAGE_MAX_SIZE_g2},
1891 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
1892 FLASH_IMAGE_MAX_SIZE_g2}
1893 };
1894
1895 if (adapter->generation == BE_GEN3) {
1896 pflashcomp = gen3_flash_types;
1897 filehdr_size = sizeof(struct flash_file_hdr_g3);
1898 } else {
1899 pflashcomp = gen2_flash_types;
1900 filehdr_size = sizeof(struct flash_file_hdr_g2);
84517482 1901 }
3f0d4560
AK
1902 for (i = 0; i < 8; i++) {
1903 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
1904 (!be_flash_redboot(adapter, fw->data,
1905 pflashcomp[i].offset, pflashcomp[i].size,
1906 filehdr_size)))
1907 continue;
1908 p = fw->data;
1909 p += filehdr_size + pflashcomp[i].offset
1910 + (num_of_images * sizeof(struct image_hdr));
1911 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 1912 return -1;
3f0d4560
AK
1913 total_bytes = pflashcomp[i].size;
1914 while (total_bytes) {
1915 if (total_bytes > 32*1024)
1916 num_bytes = 32*1024;
1917 else
1918 num_bytes = total_bytes;
1919 total_bytes -= num_bytes;
1920
1921 if (!total_bytes)
1922 flash_op = FLASHROM_OPER_FLASH;
1923 else
1924 flash_op = FLASHROM_OPER_SAVE;
1925 memcpy(req->params.data_buf, p, num_bytes);
1926 p += num_bytes;
1927 status = be_cmd_write_flashrom(adapter, flash_cmd,
1928 pflashcomp[i].optype, flash_op, num_bytes);
1929 if (status) {
1930 dev_err(&adapter->pdev->dev,
1931 "cmd to write to flash rom failed.\n");
1932 return -1;
1933 }
1934 yield();
84517482 1935 }
84517482 1936 }
84517482
AK
1937 return 0;
1938}
1939
3f0d4560
AK
1940static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
1941{
1942 if (fhdr == NULL)
1943 return 0;
1944 if (fhdr->build[0] == '3')
1945 return BE_GEN3;
1946 else if (fhdr->build[0] == '2')
1947 return BE_GEN2;
1948 else
1949 return 0;
1950}
1951
84517482
AK
1952int be_load_fw(struct be_adapter *adapter, u8 *func)
1953{
1954 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1955 const struct firmware *fw;
3f0d4560
AK
1956 struct flash_file_hdr_g2 *fhdr;
1957 struct flash_file_hdr_g3 *fhdr3;
1958 struct image_hdr *img_hdr_ptr = NULL;
84517482 1959 struct be_dma_mem flash_cmd;
3f0d4560 1960 int status, i = 0;
84517482 1961 const u8 *p;
84517482
AK
1962 char fw_ver[FW_VER_LEN];
1963 char fw_cfg;
1964
1965 status = be_cmd_get_fw_ver(adapter, fw_ver);
1966 if (status)
1967 return status;
1968
1969 fw_cfg = *(fw_ver + 2);
1970 if (fw_cfg == '0')
1971 fw_cfg = '1';
1972 strcpy(fw_file, func);
1973
1974 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
1975 if (status)
1976 goto fw_exit;
1977
1978 p = fw->data;
3f0d4560 1979 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
1980 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1981
84517482
AK
1982 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1983 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1984 &flash_cmd.dma);
1985 if (!flash_cmd.va) {
1986 status = -ENOMEM;
1987 dev_err(&adapter->pdev->dev,
1988 "Memory allocation failure while flashing\n");
1989 goto fw_exit;
1990 }
1991
3f0d4560
AK
1992 if ((adapter->generation == BE_GEN3) &&
1993 (get_ufigen_type(fhdr) == BE_GEN3)) {
1994 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
1995 for (i = 0; i < fhdr3->num_imgs; i++) {
1996 img_hdr_ptr = (struct image_hdr *) (fw->data +
1997 (sizeof(struct flash_file_hdr_g3) +
1998 i * sizeof(struct image_hdr)));
1999 if (img_hdr_ptr->imageid == 1) {
2000 status = be_flash_data(adapter, fw,
2001 &flash_cmd, fhdr3->num_imgs);
2002 }
2003
2004 }
2005 } else if ((adapter->generation == BE_GEN2) &&
2006 (get_ufigen_type(fhdr) == BE_GEN2)) {
2007 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2008 } else {
2009 dev_err(&adapter->pdev->dev,
2010 "UFI and Interface are not compatible for flashing\n");
2011 status = -1;
84517482
AK
2012 }
2013
2014 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2015 flash_cmd.dma);
2016 if (status) {
2017 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2018 goto fw_exit;
2019 }
2020
af901ca1 2021 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2022
2023fw_exit:
2024 release_firmware(fw);
2025 return status;
2026}
2027
6b7c5b94
SP
2028static struct net_device_ops be_netdev_ops = {
2029 .ndo_open = be_open,
2030 .ndo_stop = be_close,
2031 .ndo_start_xmit = be_xmit,
2032 .ndo_get_stats = be_get_stats,
2033 .ndo_set_rx_mode = be_set_multicast_list,
2034 .ndo_set_mac_address = be_mac_addr_set,
2035 .ndo_change_mtu = be_change_mtu,
2036 .ndo_validate_addr = eth_validate_addr,
2037 .ndo_vlan_rx_register = be_vlan_register,
2038 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2039 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2040};
2041
2042static void be_netdev_init(struct net_device *netdev)
2043{
2044 struct be_adapter *adapter = netdev_priv(netdev);
2045
2046 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
583e3f34
AK
2047 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2048 NETIF_F_GRO;
6b7c5b94 2049
51c59870
AK
2050 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2051
6b7c5b94
SP
2052 netdev->flags |= IFF_MULTICAST;
2053
728a9972
AK
2054 adapter->rx_csum = true;
2055
9e90c961
AK
2056 /* Default settings for Rx and Tx flow control */
2057 adapter->rx_fc = true;
2058 adapter->tx_fc = true;
2059
c190e3c8
AK
2060 netif_set_gso_max_size(netdev, 65535);
2061
6b7c5b94
SP
2062 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2063
2064 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2065
6b7c5b94
SP
2066 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2067 BE_NAPI_WEIGHT);
5fb379ee 2068 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94
SP
2069 BE_NAPI_WEIGHT);
2070
2071 netif_carrier_off(netdev);
2072 netif_stop_queue(netdev);
2073}
2074
2075static void be_unmap_pci_bars(struct be_adapter *adapter)
2076{
8788fdc2
SP
2077 if (adapter->csr)
2078 iounmap(adapter->csr);
2079 if (adapter->db)
2080 iounmap(adapter->db);
2081 if (adapter->pcicfg)
2082 iounmap(adapter->pcicfg);
6b7c5b94
SP
2083}
2084
2085static int be_map_pci_bars(struct be_adapter *adapter)
2086{
2087 u8 __iomem *addr;
7b139c83 2088 int pcicfg_reg;
6b7c5b94
SP
2089
2090 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2091 pci_resource_len(adapter->pdev, 2));
2092 if (addr == NULL)
2093 return -ENOMEM;
8788fdc2 2094 adapter->csr = addr;
6b7c5b94
SP
2095
2096 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
2097 128 * 1024);
2098 if (addr == NULL)
2099 goto pci_map_err;
8788fdc2 2100 adapter->db = addr;
6b7c5b94 2101
7b139c83
AK
2102 if (adapter->generation == BE_GEN2)
2103 pcicfg_reg = 1;
2104 else
2105 pcicfg_reg = 0;
2106
2107 addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2108 pci_resource_len(adapter->pdev, pcicfg_reg));
6b7c5b94
SP
2109 if (addr == NULL)
2110 goto pci_map_err;
8788fdc2 2111 adapter->pcicfg = addr;
6b7c5b94
SP
2112
2113 return 0;
2114pci_map_err:
2115 be_unmap_pci_bars(adapter);
2116 return -ENOMEM;
2117}
2118
2119
2120static void be_ctrl_cleanup(struct be_adapter *adapter)
2121{
8788fdc2 2122 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2123
2124 be_unmap_pci_bars(adapter);
2125
2126 if (mem->va)
2127 pci_free_consistent(adapter->pdev, mem->size,
2128 mem->va, mem->dma);
e7b909a6
SP
2129
2130 mem = &adapter->mc_cmd_mem;
2131 if (mem->va)
2132 pci_free_consistent(adapter->pdev, mem->size,
2133 mem->va, mem->dma);
6b7c5b94
SP
2134}
2135
6b7c5b94
SP
2136static int be_ctrl_init(struct be_adapter *adapter)
2137{
8788fdc2
SP
2138 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2139 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2140 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2141 int status;
6b7c5b94
SP
2142
2143 status = be_map_pci_bars(adapter);
2144 if (status)
e7b909a6 2145 goto done;
6b7c5b94
SP
2146
2147 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2148 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2149 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2150 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2151 status = -ENOMEM;
2152 goto unmap_pci_bars;
6b7c5b94 2153 }
e7b909a6 2154
6b7c5b94
SP
2155 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2156 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2157 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2158 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2159
2160 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2161 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2162 &mc_cmd_mem->dma);
2163 if (mc_cmd_mem->va == NULL) {
2164 status = -ENOMEM;
2165 goto free_mbox;
2166 }
2167 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2168
8788fdc2
SP
2169 spin_lock_init(&adapter->mbox_lock);
2170 spin_lock_init(&adapter->mcc_lock);
2171 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2172
cf588477 2173 pci_save_state(adapter->pdev);
6b7c5b94 2174 return 0;
e7b909a6
SP
2175
2176free_mbox:
2177 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2178 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2179
2180unmap_pci_bars:
2181 be_unmap_pci_bars(adapter);
2182
2183done:
2184 return status;
6b7c5b94
SP
2185}
2186
2187static void be_stats_cleanup(struct be_adapter *adapter)
2188{
2189 struct be_stats_obj *stats = &adapter->stats;
2190 struct be_dma_mem *cmd = &stats->cmd;
2191
2192 if (cmd->va)
2193 pci_free_consistent(adapter->pdev, cmd->size,
2194 cmd->va, cmd->dma);
2195}
2196
2197static int be_stats_init(struct be_adapter *adapter)
2198{
2199 struct be_stats_obj *stats = &adapter->stats;
2200 struct be_dma_mem *cmd = &stats->cmd;
2201
2202 cmd->size = sizeof(struct be_cmd_req_get_stats);
2203 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2204 if (cmd->va == NULL)
2205 return -1;
d291b9af 2206 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2207 return 0;
2208}
2209
2210static void __devexit be_remove(struct pci_dev *pdev)
2211{
2212 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2213
6b7c5b94
SP
2214 if (!adapter)
2215 return;
2216
2217 unregister_netdev(adapter->netdev);
2218
5fb379ee
SP
2219 be_clear(adapter);
2220
6b7c5b94
SP
2221 be_stats_cleanup(adapter);
2222
2223 be_ctrl_cleanup(adapter);
2224
8d56ff11 2225 be_msix_disable(adapter);
6b7c5b94
SP
2226
2227 pci_set_drvdata(pdev, NULL);
2228 pci_release_regions(pdev);
2229 pci_disable_device(pdev);
2230
2231 free_netdev(adapter->netdev);
2232}
2233
2243e2e9 2234static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2235{
6b7c5b94 2236 int status;
2243e2e9 2237 u8 mac[ETH_ALEN];
6b7c5b94 2238
2243e2e9 2239 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2240 if (status)
2241 return status;
2242
2243e2e9
SP
2243 status = be_cmd_query_fw_cfg(adapter,
2244 &adapter->port_num, &adapter->cap);
43a04fdc
SP
2245 if (status)
2246 return status;
2247
2243e2e9
SP
2248 memset(mac, 0, ETH_ALEN);
2249 status = be_cmd_mac_addr_query(adapter, mac,
2250 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
6b7c5b94
SP
2251 if (status)
2252 return status;
ca9e4988
AK
2253
2254 if (!is_valid_ether_addr(mac))
2255 return -EADDRNOTAVAIL;
2256
2243e2e9 2257 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
35a65285 2258 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
6b7c5b94 2259
82903e4b
AK
2260 if (adapter->cap & 0x400)
2261 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2262 else
2263 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2264
2243e2e9 2265 return 0;
6b7c5b94
SP
2266}
2267
2268static int __devinit be_probe(struct pci_dev *pdev,
2269 const struct pci_device_id *pdev_id)
2270{
2271 int status = 0;
2272 struct be_adapter *adapter;
2273 struct net_device *netdev;
6b7c5b94
SP
2274
2275 status = pci_enable_device(pdev);
2276 if (status)
2277 goto do_none;
2278
2279 status = pci_request_regions(pdev, DRV_NAME);
2280 if (status)
2281 goto disable_dev;
2282 pci_set_master(pdev);
2283
2284 netdev = alloc_etherdev(sizeof(struct be_adapter));
2285 if (netdev == NULL) {
2286 status = -ENOMEM;
2287 goto rel_reg;
2288 }
2289 adapter = netdev_priv(netdev);
7b139c83
AK
2290
2291 switch (pdev->device) {
2292 case BE_DEVICE_ID1:
2293 case OC_DEVICE_ID1:
2294 adapter->generation = BE_GEN2;
2295 break;
2296 case BE_DEVICE_ID2:
2297 case OC_DEVICE_ID2:
2298 adapter->generation = BE_GEN3;
2299 break;
2300 default:
2301 adapter->generation = 0;
2302 }
2303
6b7c5b94
SP
2304 adapter->pdev = pdev;
2305 pci_set_drvdata(pdev, adapter);
2306 adapter->netdev = netdev;
2243e2e9
SP
2307 be_netdev_init(netdev);
2308 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94
SP
2309
2310 be_msix_enable(adapter);
2311
e930438c 2312 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6b7c5b94
SP
2313 if (!status) {
2314 netdev->features |= NETIF_F_HIGHDMA;
2315 } else {
e930438c 2316 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6b7c5b94
SP
2317 if (status) {
2318 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2319 goto free_netdev;
2320 }
2321 }
2322
6b7c5b94
SP
2323 status = be_ctrl_init(adapter);
2324 if (status)
2325 goto free_netdev;
2326
2243e2e9
SP
2327 /* sync up with fw's ready state */
2328 status = be_cmd_POST(adapter);
6b7c5b94
SP
2329 if (status)
2330 goto ctrl_clean;
2331
2243e2e9
SP
2332 /* tell fw we're ready to fire cmds */
2333 status = be_cmd_fw_init(adapter);
6b7c5b94 2334 if (status)
2243e2e9
SP
2335 goto ctrl_clean;
2336
2337 status = be_cmd_reset_function(adapter);
2338 if (status)
2339 goto ctrl_clean;
6b7c5b94 2340
2243e2e9
SP
2341 status = be_stats_init(adapter);
2342 if (status)
2343 goto ctrl_clean;
2344
2345 status = be_get_config(adapter);
6b7c5b94
SP
2346 if (status)
2347 goto stats_clean;
6b7c5b94
SP
2348
2349 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 2350
5fb379ee
SP
2351 status = be_setup(adapter);
2352 if (status)
2353 goto stats_clean;
2243e2e9 2354
6b7c5b94
SP
2355 status = register_netdev(netdev);
2356 if (status != 0)
5fb379ee 2357 goto unsetup;
6b7c5b94 2358
c4ca2374 2359 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
6b7c5b94
SP
2360 return 0;
2361
5fb379ee
SP
2362unsetup:
2363 be_clear(adapter);
6b7c5b94
SP
2364stats_clean:
2365 be_stats_cleanup(adapter);
2366ctrl_clean:
2367 be_ctrl_cleanup(adapter);
2368free_netdev:
8d56ff11 2369 be_msix_disable(adapter);
6b7c5b94 2370 free_netdev(adapter->netdev);
8d56ff11 2371 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
2372rel_reg:
2373 pci_release_regions(pdev);
2374disable_dev:
2375 pci_disable_device(pdev);
2376do_none:
c4ca2374 2377 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
2378 return status;
2379}
2380
2381static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2382{
2383 struct be_adapter *adapter = pci_get_drvdata(pdev);
2384 struct net_device *netdev = adapter->netdev;
2385
71d8d1b5
AK
2386 if (adapter->wol)
2387 be_setup_wol(adapter, true);
2388
6b7c5b94
SP
2389 netif_device_detach(netdev);
2390 if (netif_running(netdev)) {
2391 rtnl_lock();
2392 be_close(netdev);
2393 rtnl_unlock();
2394 }
9e90c961 2395 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 2396 be_clear(adapter);
6b7c5b94
SP
2397
2398 pci_save_state(pdev);
2399 pci_disable_device(pdev);
2400 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2401 return 0;
2402}
2403
2404static int be_resume(struct pci_dev *pdev)
2405{
2406 int status = 0;
2407 struct be_adapter *adapter = pci_get_drvdata(pdev);
2408 struct net_device *netdev = adapter->netdev;
2409
2410 netif_device_detach(netdev);
2411
2412 status = pci_enable_device(pdev);
2413 if (status)
2414 return status;
2415
2416 pci_set_power_state(pdev, 0);
2417 pci_restore_state(pdev);
2418
2243e2e9
SP
2419 /* tell fw we're ready to fire cmds */
2420 status = be_cmd_fw_init(adapter);
2421 if (status)
2422 return status;
2423
9b0365f1 2424 be_setup(adapter);
6b7c5b94
SP
2425 if (netif_running(netdev)) {
2426 rtnl_lock();
2427 be_open(netdev);
2428 rtnl_unlock();
2429 }
2430 netif_device_attach(netdev);
71d8d1b5
AK
2431
2432 if (adapter->wol)
2433 be_setup_wol(adapter, false);
6b7c5b94
SP
2434 return 0;
2435}
2436
cf588477
SP
2437static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2438 pci_channel_state_t state)
2439{
2440 struct be_adapter *adapter = pci_get_drvdata(pdev);
2441 struct net_device *netdev = adapter->netdev;
2442
2443 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2444
2445 adapter->eeh_err = true;
2446
2447 netif_device_detach(netdev);
2448
2449 if (netif_running(netdev)) {
2450 rtnl_lock();
2451 be_close(netdev);
2452 rtnl_unlock();
2453 }
2454 be_clear(adapter);
2455
2456 if (state == pci_channel_io_perm_failure)
2457 return PCI_ERS_RESULT_DISCONNECT;
2458
2459 pci_disable_device(pdev);
2460
2461 return PCI_ERS_RESULT_NEED_RESET;
2462}
2463
2464static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2465{
2466 struct be_adapter *adapter = pci_get_drvdata(pdev);
2467 int status;
2468
2469 dev_info(&adapter->pdev->dev, "EEH reset\n");
2470 adapter->eeh_err = false;
2471
2472 status = pci_enable_device(pdev);
2473 if (status)
2474 return PCI_ERS_RESULT_DISCONNECT;
2475
2476 pci_set_master(pdev);
2477 pci_set_power_state(pdev, 0);
2478 pci_restore_state(pdev);
2479
2480 /* Check if card is ok and fw is ready */
2481 status = be_cmd_POST(adapter);
2482 if (status)
2483 return PCI_ERS_RESULT_DISCONNECT;
2484
2485 return PCI_ERS_RESULT_RECOVERED;
2486}
2487
2488static void be_eeh_resume(struct pci_dev *pdev)
2489{
2490 int status = 0;
2491 struct be_adapter *adapter = pci_get_drvdata(pdev);
2492 struct net_device *netdev = adapter->netdev;
2493
2494 dev_info(&adapter->pdev->dev, "EEH resume\n");
2495
2496 pci_save_state(pdev);
2497
2498 /* tell fw we're ready to fire cmds */
2499 status = be_cmd_fw_init(adapter);
2500 if (status)
2501 goto err;
2502
2503 status = be_setup(adapter);
2504 if (status)
2505 goto err;
2506
2507 if (netif_running(netdev)) {
2508 status = be_open(netdev);
2509 if (status)
2510 goto err;
2511 }
2512 netif_device_attach(netdev);
2513 return;
2514err:
2515 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2516 return;
2517}
2518
2519static struct pci_error_handlers be_eeh_handlers = {
2520 .error_detected = be_eeh_err_detected,
2521 .slot_reset = be_eeh_reset,
2522 .resume = be_eeh_resume,
2523};
2524
6b7c5b94
SP
2525static struct pci_driver be_driver = {
2526 .name = DRV_NAME,
2527 .id_table = be_dev_ids,
2528 .probe = be_probe,
2529 .remove = be_remove,
2530 .suspend = be_suspend,
cf588477
SP
2531 .resume = be_resume,
2532 .err_handler = &be_eeh_handlers
6b7c5b94
SP
2533};
2534
2535static int __init be_init_module(void)
2536{
8e95a202
JP
2537 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2538 rx_frag_size != 2048) {
6b7c5b94
SP
2539 printk(KERN_WARNING DRV_NAME
2540 " : Module param rx_frag_size must be 2048/4096/8192."
2541 " Using 2048\n");
2542 rx_frag_size = 2048;
2543 }
6b7c5b94
SP
2544
2545 return pci_register_driver(&be_driver);
2546}
2547module_init(be_init_module);
2548
2549static void __exit be_exit_module(void)
2550{
2551 pci_unregister_driver(&be_driver);
2552}
2553module_exit(be_exit_module);
This page took 0.286923 seconds and 5 git commands to generate.