Merge remote-tracking branch 'spi/for-next'
[deliverable/linux.git] / drivers / net / ethernet / brocade / bna / bnad.c
CommitLineData
8b230ed8 1/*
2732ba56 2 * Linux network driver for QLogic BR-series Converged Network Adapter.
8b230ed8
RM
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
2732ba56
RM
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
8b230ed8 16 * All rights reserved
2732ba56 17 * www.qlogic.com
8b230ed8 18 */
f859d7cb 19#include <linux/bitops.h>
8b230ed8
RM
20#include <linux/netdevice.h>
21#include <linux/skbuff.h>
22#include <linux/etherdevice.h>
23#include <linux/in.h>
24#include <linux/ethtool.h>
25#include <linux/if_vlan.h>
26#include <linux/if_ether.h>
27#include <linux/ip.h>
70c71606 28#include <linux/prefetch.h>
9d9779e7 29#include <linux/module.h>
8b230ed8
RM
30
31#include "bnad.h"
32#include "bna.h"
33#include "cna.h"
34
b7ee31c5 35static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
36
37/*
38 * Module params
39 */
40static uint bnad_msix_disable;
41module_param(bnad_msix_disable, uint, 0444);
42MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43
44static uint bnad_ioc_auto_recover = 1;
45module_param(bnad_ioc_auto_recover, uint, 0444);
46MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47
7afc5dbd
KG
48static uint bna_debugfs_enable = 1;
49module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
52
8b230ed8
RM
53/*
54 * Global variables
55 */
482da0fa 56static u32 bnad_rxqs_per_cq = 2;
285eb9c3 57static atomic_t bna_id;
e2f9ecfc
IV
58static const u8 bnad_bcast_addr[] __aligned(2) =
59 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
8b230ed8
RM
60
61/*
62 * Local MACROS
63 */
8b230ed8
RM
64#define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
8811e267 66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
8b230ed8
RM
67 ((_bnad)->pcidev->irq))
68
5216562a 69#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
8b230ed8
RM
70do { \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
5216562a 74 (_res_info)->res_u.mem_info.len = (_size); \
8b230ed8
RM
75} while (0)
76
77/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
b3cc6e88 81bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
8b230ed8 82{
5216562a 83 struct bna_cq_entry *cmpl;
8b230ed8
RM
84 int i;
85
8b230ed8 86 for (i = 0; i < ccb->q_depth; i++) {
5216562a 87 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
8b230ed8 88 cmpl->valid = 0;
8b230ed8
RM
89 }
90}
91
5216562a
RM
92/* Tx Datapath functions */
93
94
95/* Caller should ensure that the entry at unmap_q[index] is valid */
271e8b79 96static u32
5216562a
RM
97bnad_tx_buff_unmap(struct bnad *bnad,
98 struct bnad_tx_unmap *unmap_q,
99 u32 q_depth, u32 index)
271e8b79 100{
5216562a
RM
101 struct bnad_tx_unmap *unmap;
102 struct sk_buff *skb;
103 int vector, nvecs;
104
105 unmap = &unmap_q[index];
106 nvecs = unmap->nvecs;
107
108 skb = unmap->skb;
109 unmap->skb = NULL;
110 unmap->nvecs = 0;
111 dma_unmap_single(&bnad->pcidev->dev,
112 dma_unmap_addr(&unmap->vectors[0], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
115 nvecs--;
116
117 vector = 0;
118 while (nvecs) {
119 vector++;
120 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
121 vector = 0;
122 BNA_QE_INDX_INC(index, q_depth);
123 unmap = &unmap_q[index];
124 }
271e8b79 125
5216562a
RM
126 dma_unmap_page(&bnad->pcidev->dev,
127 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
24f5d33d
RM
128 dma_unmap_len(&unmap->vectors[vector], dma_len),
129 DMA_TO_DEVICE);
5216562a
RM
130 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
131 nvecs--;
271e8b79
RM
132 }
133
5216562a
RM
134 BNA_QE_INDX_INC(index, q_depth);
135
271e8b79
RM
136 return index;
137}
138
8b230ed8
RM
139/*
140 * Frees all pending Tx Bufs
141 * At this point no activity is expected on the Q,
142 * so DMA unmap & freeing is fine.
143 */
144static void
5216562a 145bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8 146{
5216562a
RM
147 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
148 struct sk_buff *skb;
149 int i;
8b230ed8 150
5216562a
RM
151 for (i = 0; i < tcb->q_depth; i++) {
152 skb = unmap_q[i].skb;
938fa488 153 if (!skb)
8b230ed8 154 continue;
5216562a 155 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
938fa488 156
8b230ed8
RM
157 dev_kfree_skb_any(skb);
158 }
159}
160
8b230ed8 161/*
b3cc6e88 162 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
8b230ed8
RM
163 * Can be called in a) Interrupt context
164 * b) Sending context
8b230ed8
RM
165 */
166static u32
5216562a 167bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8 168{
5216562a
RM
169 u32 sent_packets = 0, sent_bytes = 0;
170 u32 wis, unmap_wis, hw_cons, cons, q_depth;
171 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
172 struct bnad_tx_unmap *unmap;
173 struct sk_buff *skb;
8b230ed8 174
d95d1081 175 /* Just return if TX is stopped */
be7fa326 176 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
177 return 0;
178
5216562a
RM
179 hw_cons = *(tcb->hw_consumer_index);
180 cons = tcb->consumer_index;
181 q_depth = tcb->q_depth;
8b230ed8 182
5216562a 183 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
8b230ed8
RM
184 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
185
8b230ed8 186 while (wis) {
5216562a
RM
187 unmap = &unmap_q[cons];
188
189 skb = unmap->skb;
8b230ed8 190
8b230ed8
RM
191 sent_packets++;
192 sent_bytes += skb->len;
8b230ed8 193
5216562a
RM
194 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
195 wis -= unmap_wis;
8b230ed8 196
5216562a 197 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
8b230ed8
RM
198 dev_kfree_skb_any(skb);
199 }
200
201 /* Update consumer pointers. */
5216562a 202 tcb->consumer_index = hw_cons;
8b230ed8
RM
203
204 tcb->txq->tx_packets += sent_packets;
205 tcb->txq->tx_bytes += sent_bytes;
206
207 return sent_packets;
208}
209
8b230ed8 210static u32
b3cc6e88 211bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8
RM
212{
213 struct net_device *netdev = bnad->netdev;
be7fa326 214 u32 sent = 0;
8b230ed8
RM
215
216 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
217 return 0;
218
b3cc6e88 219 sent = bnad_txcmpl_process(bnad, tcb);
8b230ed8
RM
220 if (sent) {
221 if (netif_queue_stopped(netdev) &&
222 netif_carrier_ok(netdev) &&
223 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
224 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
225 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
226 netif_wake_queue(netdev);
227 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
228 }
8b230ed8 229 }
be7fa326
RM
230 }
231
232 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 233 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8 234
4e857c58 235 smp_mb__before_atomic();
8b230ed8
RM
236 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
237
238 return sent;
239}
240
241/* MSIX Tx Completion Handler */
242static irqreturn_t
243bnad_msix_tx(int irq, void *data)
244{
245 struct bna_tcb *tcb = (struct bna_tcb *)data;
246 struct bnad *bnad = tcb->bnad;
247
b3cc6e88 248 bnad_tx_complete(bnad, tcb);
8b230ed8
RM
249
250 return IRQ_HANDLED;
251}
252
30f9fc94
RM
253static inline void
254bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
255{
256 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
257
258 unmap_q->reuse_pi = -1;
259 unmap_q->alloc_order = -1;
260 unmap_q->map_size = 0;
261 unmap_q->type = BNAD_RXBUF_NONE;
262}
263
264/* Default is page-based allocation. Multi-buffer support - TBD */
265static int
266bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
267{
268 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
e29aa339 269 int order;
30f9fc94
RM
270
271 bnad_rxq_alloc_uninit(bnad, rcb);
272
e29aa339
RM
273 order = get_order(rcb->rxq->buffer_size);
274
275 unmap_q->type = BNAD_RXBUF_PAGE;
30f9fc94
RM
276
277 if (bna_is_small_rxq(rcb->id)) {
278 unmap_q->alloc_order = 0;
279 unmap_q->map_size = rcb->rxq->buffer_size;
280 } else {
e29aa339
RM
281 if (rcb->rxq->multi_buffer) {
282 unmap_q->alloc_order = 0;
283 unmap_q->map_size = rcb->rxq->buffer_size;
284 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
285 } else {
286 unmap_q->alloc_order = order;
287 unmap_q->map_size =
288 (rcb->rxq->buffer_size > 2048) ?
289 PAGE_SIZE << order : 2048;
290 }
30f9fc94
RM
291 }
292
ebb56d37 293 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
30f9fc94 294
30f9fc94
RM
295 return 0;
296}
297
298static inline void
299bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
300{
301 if (!unmap->page)
302 return;
303
304 dma_unmap_page(&bnad->pcidev->dev,
305 dma_unmap_addr(&unmap->vector, dma_addr),
306 unmap->vector.len, DMA_FROM_DEVICE);
307 put_page(unmap->page);
308 unmap->page = NULL;
309 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
310 unmap->vector.len = 0;
311}
312
313static inline void
314bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
315{
316 if (!unmap->skb)
317 return;
318
319 dma_unmap_single(&bnad->pcidev->dev,
320 dma_unmap_addr(&unmap->vector, dma_addr),
321 unmap->vector.len, DMA_FROM_DEVICE);
322 dev_kfree_skb_any(unmap->skb);
323 unmap->skb = NULL;
324 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
325 unmap->vector.len = 0;
326}
327
8b230ed8 328static void
b3cc6e88 329bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8 330{
30f9fc94 331 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
5216562a
RM
332 int i;
333
334 for (i = 0; i < rcb->q_depth; i++) {
30f9fc94 335 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
8b230ed8 336
e29aa339 337 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
30f9fc94 338 bnad_rxq_cleanup_skb(bnad, unmap);
e29aa339
RM
339 else
340 bnad_rxq_cleanup_page(bnad, unmap);
30f9fc94
RM
341 }
342 bnad_rxq_alloc_uninit(bnad, rcb);
343}
5216562a 344
30f9fc94
RM
345static u32
346bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
347{
348 u32 alloced, prod, q_depth;
349 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
350 struct bnad_rx_unmap *unmap, *prev;
351 struct bna_rxq_entry *rxent;
352 struct page *page;
353 u32 page_offset, alloc_size;
354 dma_addr_t dma_addr;
355
356 prod = rcb->producer_index;
357 q_depth = rcb->q_depth;
358
359 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
360 alloced = 0;
361
362 while (nalloc--) {
363 unmap = &unmap_q->unmap[prod];
364
365 if (unmap_q->reuse_pi < 0) {
366 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
367 unmap_q->alloc_order);
368 page_offset = 0;
369 } else {
370 prev = &unmap_q->unmap[unmap_q->reuse_pi];
371 page = prev->page;
372 page_offset = prev->page_offset + unmap_q->map_size;
373 get_page(page);
374 }
375
376 if (unlikely(!page)) {
377 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
378 rcb->rxq->rxbuf_alloc_failed++;
379 goto finishing;
380 }
381
382 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
ba5ca784
IV
383 unmap_q->map_size, DMA_FROM_DEVICE);
384 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
385 put_page(page);
386 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
387 rcb->rxq->rxbuf_map_failed++;
388 goto finishing;
389 }
30f9fc94
RM
390
391 unmap->page = page;
392 unmap->page_offset = page_offset;
393 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
394 unmap->vector.len = unmap_q->map_size;
395 page_offset += unmap_q->map_size;
396
397 if (page_offset < alloc_size)
398 unmap_q->reuse_pi = prod;
399 else
400 unmap_q->reuse_pi = -1;
401
402 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
403 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
404 BNA_QE_INDX_INC(prod, q_depth);
405 alloced++;
406 }
407
408finishing:
409 if (likely(alloced)) {
410 rcb->producer_index = prod;
411 smp_mb();
412 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
413 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 414 }
30f9fc94
RM
415
416 return alloced;
8b230ed8
RM
417}
418
30f9fc94
RM
419static u32
420bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
8b230ed8 421{
30f9fc94
RM
422 u32 alloced, prod, q_depth, buff_sz;
423 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
5216562a 424 struct bnad_rx_unmap *unmap;
8b230ed8
RM
425 struct bna_rxq_entry *rxent;
426 struct sk_buff *skb;
427 dma_addr_t dma_addr;
428
5216562a 429 buff_sz = rcb->rxq->buffer_size;
5216562a
RM
430 prod = rcb->producer_index;
431 q_depth = rcb->q_depth;
8b230ed8 432
30f9fc94
RM
433 alloced = 0;
434 while (nalloc--) {
435 unmap = &unmap_q->unmap[prod];
436
437 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
438
8b230ed8
RM
439 if (unlikely(!skb)) {
440 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
3caa1e95 441 rcb->rxq->rxbuf_alloc_failed++;
8b230ed8
RM
442 goto finishing;
443 }
ba5ca784 444
5ea74318 445 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
5216562a 446 buff_sz, DMA_FROM_DEVICE);
ba5ca784
IV
447 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
448 dev_kfree_skb_any(skb);
449 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
450 rcb->rxq->rxbuf_map_failed++;
451 goto finishing;
452 }
8b230ed8 453
5216562a
RM
454 unmap->skb = skb;
455 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
456 unmap->vector.len = buff_sz;
30f9fc94
RM
457
458 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
459 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
5216562a 460 BNA_QE_INDX_INC(prod, q_depth);
8b230ed8
RM
461 alloced++;
462 }
463
464finishing:
465 if (likely(alloced)) {
5216562a 466 rcb->producer_index = prod;
8b230ed8 467 smp_mb();
5bcf6ac0 468 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
be7fa326 469 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 470 }
30f9fc94
RM
471
472 return alloced;
473}
474
475static inline void
476bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
477{
478 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
479 u32 to_alloc;
480
481 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
482 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
483 return;
484
e29aa339 485 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
30f9fc94 486 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
e29aa339
RM
487 else
488 bnad_rxq_refill_page(bnad, rcb, to_alloc);
8b230ed8
RM
489}
490
5e46631f
RM
491#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
492 BNA_CQ_EF_IPV6 | \
493 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
494 BNA_CQ_EF_L4_CKSUM_OK)
495
496#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
498#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
499 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
500#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
502#define flags_udp6 (BNA_CQ_EF_IPV6 | \
503 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
504
e29aa339
RM
505static void
506bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
507 u32 sop_ci, u32 nvecs)
30f9fc94 508{
e29aa339
RM
509 struct bnad_rx_unmap_q *unmap_q;
510 struct bnad_rx_unmap *unmap;
511 u32 ci, vec;
30f9fc94 512
e29aa339
RM
513 unmap_q = rcb->unmap_q;
514 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
515 unmap = &unmap_q->unmap[ci];
516 BNA_QE_INDX_INC(ci, rcb->q_depth);
517
518 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
519 bnad_rxq_cleanup_skb(bnad, unmap);
520 else
521 bnad_rxq_cleanup_page(bnad, unmap);
522 }
523}
524
525static void
6c3f5aef 526bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
e29aa339 527{
6c3f5aef 528 struct bna_rcb *rcb;
e29aa339 529 struct bnad *bnad;
e29aa339 530 struct bnad_rx_unmap_q *unmap_q;
6c3f5aef
IV
531 struct bna_cq_entry *cq, *cmpl;
532 u32 ci, pi, totlen = 0;
533
534 cq = ccb->sw_q;
535 pi = ccb->producer_index;
536 cmpl = &cq[pi];
e29aa339 537
6c3f5aef 538 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
e29aa339
RM
539 unmap_q = rcb->unmap_q;
540 bnad = rcb->bnad;
6c3f5aef 541 ci = rcb->consumer_index;
66f9513a
RM
542
543 /* prefetch header */
6c3f5aef
IV
544 prefetch(page_address(unmap_q->unmap[ci].page) +
545 unmap_q->unmap[ci].page_offset);
546
547 while (nvecs--) {
548 struct bnad_rx_unmap *unmap;
549 u32 len;
66f9513a 550
e29aa339
RM
551 unmap = &unmap_q->unmap[ci];
552 BNA_QE_INDX_INC(ci, rcb->q_depth);
30f9fc94
RM
553
554 dma_unmap_page(&bnad->pcidev->dev,
6c3f5aef
IV
555 dma_unmap_addr(&unmap->vector, dma_addr),
556 unmap->vector.len, DMA_FROM_DEVICE);
e29aa339 557
6c3f5aef 558 len = ntohs(cmpl->length);
f2d9da1a 559 skb->truesize += unmap->vector.len;
e29aa339
RM
560 totlen += len;
561
30f9fc94 562 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
6c3f5aef 563 unmap->page, unmap->page_offset, len);
30f9fc94
RM
564
565 unmap->page = NULL;
566 unmap->vector.len = 0;
6c3f5aef
IV
567
568 BNA_QE_INDX_INC(pi, ccb->q_depth);
569 cmpl = &cq[pi];
30f9fc94
RM
570 }
571
e29aa339
RM
572 skb->len += totlen;
573 skb->data_len += totlen;
e29aa339
RM
574}
575
576static inline void
577bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
578 struct bnad_rx_unmap *unmap, u32 len)
579{
580 prefetch(skb->data);
30f9fc94
RM
581
582 dma_unmap_single(&bnad->pcidev->dev,
583 dma_unmap_addr(&unmap->vector, dma_addr),
584 unmap->vector.len, DMA_FROM_DEVICE);
585
e29aa339 586 skb_put(skb, len);
30f9fc94
RM
587 skb->protocol = eth_type_trans(skb, bnad->netdev);
588
589 unmap->skb = NULL;
590 unmap->vector.len = 0;
30f9fc94
RM
591}
592
8b230ed8 593static u32
b3cc6e88 594bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
8b230ed8 595{
e29aa339 596 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
8b230ed8 597 struct bna_rcb *rcb = NULL;
30f9fc94 598 struct bnad_rx_unmap_q *unmap_q;
e29aa339
RM
599 struct bnad_rx_unmap *unmap = NULL;
600 struct sk_buff *skb = NULL;
8b230ed8 601 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
30f9fc94 602 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
e29aa339
RM
603 u32 packets = 0, len = 0, totlen = 0;
604 u32 pi, vec, sop_ci = 0, nvecs = 0;
605 u32 flags, masked_flags;
078086f3 606
8b230ed8 607 prefetch(bnad->netdev);
5216562a
RM
608
609 cq = ccb->sw_q;
5216562a 610
17a30a14 611 while (packets < budget) {
c36c9d50 612 cmpl = &cq[ccb->producer_index];
17a30a14
RM
613 if (!cmpl->valid)
614 break;
615 /* The 'valid' field is set by the adapter, only after writing
616 * the other fields of completion entry. Hence, do not load
617 * other fields of completion entry *before* the 'valid' is
618 * loaded. Adding the rmb() here prevents the compiler and/or
619 * CPU from reordering the reads which would potentially result
620 * in reading stale values in completion entry.
621 */
622 rmb();
623
8b230ed8
RM
624 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
625
078086f3 626 if (bna_is_small_rxq(cmpl->rxq_id))
8b230ed8 627 rcb = ccb->rcb[1];
078086f3
RM
628 else
629 rcb = ccb->rcb[0];
8b230ed8
RM
630
631 unmap_q = rcb->unmap_q;
632
e29aa339
RM
633 /* start of packet ci */
634 sop_ci = rcb->consumer_index;
635
636 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
637 unmap = &unmap_q->unmap[sop_ci];
638 skb = unmap->skb;
639 } else {
640 skb = napi_get_frags(&rx_ctrl->napi);
641 if (unlikely(!skb))
642 break;
643 }
644 prefetch(skb);
645
646 flags = ntohl(cmpl->flags);
647 len = ntohs(cmpl->length);
648 totlen = len;
649 nvecs = 1;
650
651 /* Check all the completions for this frame.
652 * busy-wait doesn't help much, break here.
653 */
654 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
655 (flags & BNA_CQ_EF_EOP) == 0) {
656 pi = ccb->producer_index;
657 do {
658 BNA_QE_INDX_INC(pi, ccb->q_depth);
659 next_cmpl = &cq[pi];
660
661 if (!next_cmpl->valid)
662 break;
17a30a14
RM
663 /* The 'valid' field is set by the adapter, only
664 * after writing the other fields of completion
665 * entry. Hence, do not load other fields of
666 * completion entry *before* the 'valid' is
667 * loaded. Adding the rmb() here prevents the
668 * compiler and/or CPU from reordering the reads
669 * which would potentially result in reading
670 * stale values in completion entry.
671 */
672 rmb();
5216562a 673
e29aa339
RM
674 len = ntohs(next_cmpl->length);
675 flags = ntohl(next_cmpl->flags);
676
677 nvecs++;
678 totlen += len;
679 } while ((flags & BNA_CQ_EF_EOP) == 0);
680
681 if (!next_cmpl->valid)
682 break;
683 }
ade4dc3e 684 packets++;
e29aa339
RM
685
686 /* TODO: BNA_CQ_EF_LOCAL ? */
687 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
688 BNA_CQ_EF_FCS_ERROR |
689 BNA_CQ_EF_TOO_LONG))) {
690 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
8b230ed8 691 rcb->rxq->rx_packets_with_error++;
e29aa339 692
8b230ed8
RM
693 goto next;
694 }
695
e29aa339
RM
696 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
697 bnad_cq_setup_skb(bnad, skb, unmap, len);
698 else
6c3f5aef 699 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
30f9fc94 700
e29aa339
RM
701 rcb->rxq->rx_packets++;
702 rcb->rxq->rx_bytes += totlen;
703 ccb->bytes_per_intr += totlen;
5e46631f
RM
704
705 masked_flags = flags & flags_cksum_prot_mask;
706
8b230ed8 707 if (likely
e5ee20e7 708 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
5e46631f
RM
709 ((masked_flags == flags_tcp4) ||
710 (masked_flags == flags_udp4) ||
711 (masked_flags == flags_tcp6) ||
712 (masked_flags == flags_udp6))))
8b230ed8
RM
713 skb->ip_summed = CHECKSUM_UNNECESSARY;
714 else
bc8acf2c 715 skb_checksum_none_assert(skb);
8b230ed8 716
877767dc
IV
717 if ((flags & BNA_CQ_EF_VLAN) &&
718 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
86a9bad3 719 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
f859d7cb 720
e29aa339 721 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
f859d7cb 722 netif_receive_skb(skb);
e29aa339
RM
723 else
724 napi_gro_frags(&rx_ctrl->napi);
8b230ed8
RM
725
726next:
e29aa339
RM
727 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
728 for (vec = 0; vec < nvecs; vec++) {
729 cmpl = &cq[ccb->producer_index];
730 cmpl->valid = 0;
731 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
732 }
8b230ed8
RM
733 }
734
30f9fc94 735 napi_gro_flush(&rx_ctrl->napi, false);
2be67144 736 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
271e8b79
RM
737 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
738
5216562a 739 bnad_rxq_post(bnad, ccb->rcb[0]);
2be67144 740 if (ccb->rcb[1])
5216562a 741 bnad_rxq_post(bnad, ccb->rcb[1]);
078086f3 742
8b230ed8
RM
743 return packets;
744}
745
8b230ed8
RM
746static void
747bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
748{
749 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
750 struct napi_struct *napi = &rx_ctrl->napi;
751
752 if (likely(napi_schedule_prep(napi))) {
be7fa326 753 __napi_schedule(napi);
271e8b79 754 rx_ctrl->rx_schedule++;
8b230ed8 755 }
8b230ed8
RM
756}
757
758/* MSIX Rx Path Handler */
759static irqreturn_t
760bnad_msix_rx(int irq, void *data)
761{
762 struct bna_ccb *ccb = (struct bna_ccb *)data;
8b230ed8 763
271e8b79 764 if (ccb) {
ebb56d37 765 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
2be67144 766 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
271e8b79 767 }
8b230ed8
RM
768
769 return IRQ_HANDLED;
770}
771
772/* Interrupt handlers */
773
774/* Mbox Interrupt Handlers */
775static irqreturn_t
776bnad_msix_mbox_handler(int irq, void *data)
777{
778 u32 intr_status;
e2fa6f2e 779 unsigned long flags;
be7fa326 780 struct bnad *bnad = (struct bnad *)data;
8b230ed8 781
8b230ed8 782 spin_lock_irqsave(&bnad->bna_lock, flags);
dfee325a
RM
783 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
784 spin_unlock_irqrestore(&bnad->bna_lock, flags);
785 return IRQ_HANDLED;
786 }
8b230ed8
RM
787
788 bna_intr_status_get(&bnad->bna, intr_status);
789
078086f3 790 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8
RM
791 bna_mbox_handler(&bnad->bna, intr_status);
792
793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
794
8b230ed8
RM
795 return IRQ_HANDLED;
796}
797
798static irqreturn_t
799bnad_isr(int irq, void *data)
800{
801 int i, j;
802 u32 intr_status;
803 unsigned long flags;
be7fa326 804 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
805 struct bnad_rx_info *rx_info;
806 struct bnad_rx_ctrl *rx_ctrl;
078086f3 807 struct bna_tcb *tcb = NULL;
8b230ed8 808
dfee325a
RM
809 spin_lock_irqsave(&bnad->bna_lock, flags);
810 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
e2fa6f2e 812 return IRQ_NONE;
dfee325a 813 }
8b230ed8
RM
814
815 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e 816
dfee325a
RM
817 if (unlikely(!intr_status)) {
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 819 return IRQ_NONE;
dfee325a 820 }
8b230ed8 821
078086f3 822 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8 823 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 824
8b230ed8
RM
825 spin_unlock_irqrestore(&bnad->bna_lock, flags);
826
be7fa326
RM
827 if (!BNA_IS_INTX_DATA_INTR(intr_status))
828 return IRQ_HANDLED;
829
8b230ed8 830 /* Process data interrupts */
be7fa326
RM
831 /* Tx processing */
832 for (i = 0; i < bnad->num_tx; i++) {
078086f3
RM
833 for (j = 0; j < bnad->num_txq_per_tx; j++) {
834 tcb = bnad->tx_info[i].tcb[j];
835 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
b3cc6e88 836 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
078086f3 837 }
be7fa326
RM
838 }
839 /* Rx processing */
8b230ed8
RM
840 for (i = 0; i < bnad->num_rx; i++) {
841 rx_info = &bnad->rx_info[i];
842 if (!rx_info->rx)
843 continue;
844 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
845 rx_ctrl = &rx_info->rx_ctrl[j];
846 if (rx_ctrl->ccb)
847 bnad_netif_rx_schedule_poll(bnad,
848 rx_ctrl->ccb);
849 }
850 }
8b230ed8
RM
851 return IRQ_HANDLED;
852}
853
854/*
855 * Called in interrupt / callback context
856 * with bna_lock held, so cfg_flags access is OK
857 */
858static void
859bnad_enable_mbox_irq(struct bnad *bnad)
860{
be7fa326 861 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 862
8b230ed8
RM
863 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
864}
865
866/*
867 * Called with bnad->bna_lock held b'cos of
868 * bnad->cfg_flags access.
869 */
b7ee31c5 870static void
8b230ed8
RM
871bnad_disable_mbox_irq(struct bnad *bnad)
872{
be7fa326 873 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 874
be7fa326
RM
875 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
876}
8b230ed8 877
be7fa326
RM
878static void
879bnad_set_netdev_perm_addr(struct bnad *bnad)
880{
881 struct net_device *netdev = bnad->netdev;
e2fa6f2e 882
d6b30598 883 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
be7fa326 884 if (is_zero_ether_addr(netdev->dev_addr))
d6b30598 885 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
8b230ed8
RM
886}
887
888/* Control Path Handlers */
889
890/* Callbacks */
891void
078086f3 892bnad_cb_mbox_intr_enable(struct bnad *bnad)
8b230ed8
RM
893{
894 bnad_enable_mbox_irq(bnad);
895}
896
897void
078086f3 898bnad_cb_mbox_intr_disable(struct bnad *bnad)
8b230ed8
RM
899{
900 bnad_disable_mbox_irq(bnad);
901}
902
903void
078086f3
RM
904bnad_cb_ioceth_ready(struct bnad *bnad)
905{
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
907 complete(&bnad->bnad_completions.ioc_comp);
908}
909
910void
911bnad_cb_ioceth_failed(struct bnad *bnad)
8b230ed8 912{
078086f3 913 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
8b230ed8 914 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
915}
916
917void
078086f3 918bnad_cb_ioceth_disabled(struct bnad *bnad)
8b230ed8 919{
078086f3 920 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
8b230ed8 921 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
922}
923
924static void
078086f3 925bnad_cb_enet_disabled(void *arg)
8b230ed8
RM
926{
927 struct bnad *bnad = (struct bnad *)arg;
928
8b230ed8 929 netif_carrier_off(bnad->netdev);
078086f3 930 complete(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
931}
932
933void
078086f3 934bnad_cb_ethport_link_status(struct bnad *bnad,
8b230ed8
RM
935 enum bna_link_status link_status)
936{
3db1cd5c 937 bool link_up = false;
8b230ed8
RM
938
939 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
940
941 if (link_status == BNA_CEE_UP) {
078086f3
RM
942 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
943 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 944 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3
RM
945 } else {
946 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
947 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 948 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3 949 }
8b230ed8
RM
950
951 if (link_up) {
952 if (!netif_carrier_ok(bnad->netdev)) {
078086f3 953 uint tx_id, tcb_id;
ecc46789 954 netdev_info(bnad->netdev, "link up\n");
8b230ed8
RM
955 netif_carrier_on(bnad->netdev);
956 BNAD_UPDATE_CTR(bnad, link_toggle);
078086f3
RM
957 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
958 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
959 tcb_id++) {
960 struct bna_tcb *tcb =
961 bnad->tx_info[tx_id].tcb[tcb_id];
962 u32 txq_id;
963 if (!tcb)
964 continue;
965
966 txq_id = tcb->id;
967
968 if (test_bit(BNAD_TXQ_TX_STARTED,
969 &tcb->flags)) {
970 /*
971 * Force an immediate
972 * Transmit Schedule */
078086f3
RM
973 netif_wake_subqueue(
974 bnad->netdev,
975 txq_id);
976 BNAD_UPDATE_CTR(bnad,
977 netif_queue_wakeup);
978 } else {
979 netif_stop_subqueue(
980 bnad->netdev,
981 txq_id);
982 BNAD_UPDATE_CTR(bnad,
983 netif_queue_stop);
984 }
985 }
8b230ed8
RM
986 }
987 }
988 } else {
989 if (netif_carrier_ok(bnad->netdev)) {
ecc46789 990 netdev_info(bnad->netdev, "link down\n");
8b230ed8
RM
991 netif_carrier_off(bnad->netdev);
992 BNAD_UPDATE_CTR(bnad, link_toggle);
993 }
994 }
995}
996
997static void
078086f3 998bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
8b230ed8
RM
999{
1000 struct bnad *bnad = (struct bnad *)arg;
1001
1002 complete(&bnad->bnad_completions.tx_comp);
1003}
1004
1005static void
1006bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1007{
1008 struct bnad_tx_info *tx_info =
1009 (struct bnad_tx_info *)tcb->txq->tx->priv;
8b230ed8 1010
5216562a 1011 tcb->priv = tcb;
8b230ed8 1012 tx_info->tcb[tcb->id] = tcb;
8b230ed8
RM
1013}
1014
1015static void
1016bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1017{
1018 struct bnad_tx_info *tx_info =
1019 (struct bnad_tx_info *)tcb->txq->tx->priv;
1020
1021 tx_info->tcb[tcb->id] = NULL;
01b54b14 1022 tcb->priv = NULL;
8b230ed8
RM
1023}
1024
8b230ed8
RM
1025static void
1026bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1027{
1028 struct bnad_rx_info *rx_info =
1029 (struct bnad_rx_info *)ccb->cq->rx->priv;
1030
1031 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1032 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1033}
1034
1035static void
1036bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1037{
1038 struct bnad_rx_info *rx_info =
1039 (struct bnad_rx_info *)ccb->cq->rx->priv;
1040
1041 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1042}
1043
1044static void
078086f3 1045bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
8b230ed8
RM
1046{
1047 struct bnad_tx_info *tx_info =
078086f3
RM
1048 (struct bnad_tx_info *)tx->priv;
1049 struct bna_tcb *tcb;
1050 u32 txq_id;
1051 int i;
8b230ed8 1052
078086f3
RM
1053 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1054 tcb = tx_info->tcb[i];
1055 if (!tcb)
1056 continue;
1057 txq_id = tcb->id;
1058 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1059 netif_stop_subqueue(bnad->netdev, txq_id);
078086f3 1060 }
8b230ed8
RM
1061}
1062
1063static void
078086f3 1064bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
8b230ed8 1065{
078086f3
RM
1066 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1067 struct bna_tcb *tcb;
078086f3
RM
1068 u32 txq_id;
1069 int i;
8b230ed8 1070
078086f3
RM
1071 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1072 tcb = tx_info->tcb[i];
1073 if (!tcb)
1074 continue;
1075 txq_id = tcb->id;
8b230ed8 1076
01b54b14 1077 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
078086f3 1078 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
01b54b14 1079 BUG_ON(*(tcb->hw_consumer_index) != 0);
078086f3
RM
1080
1081 if (netif_carrier_ok(bnad->netdev)) {
078086f3
RM
1082 netif_wake_subqueue(bnad->netdev, txq_id);
1083 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1084 }
1085 }
be7fa326
RM
1086
1087 /*
078086f3 1088 * Workaround for first ioceth enable failure & we
be7fa326
RM
1089 * get a 0 MAC address. We try to get the MAC address
1090 * again here.
1091 */
d6b30598
IV
1092 if (is_zero_ether_addr(bnad->perm_addr)) {
1093 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
be7fa326
RM
1094 bnad_set_netdev_perm_addr(bnad);
1095 }
be7fa326
RM
1096}
1097
01b54b14
JH
1098/*
1099 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1100 */
1101static void
1102bnad_tx_cleanup(struct delayed_work *work)
1103{
1104 struct bnad_tx_info *tx_info =
1105 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1106 struct bnad *bnad = NULL;
01b54b14
JH
1107 struct bna_tcb *tcb;
1108 unsigned long flags;
5216562a 1109 u32 i, pending = 0;
01b54b14
JH
1110
1111 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1112 tcb = tx_info->tcb[i];
1113 if (!tcb)
1114 continue;
1115
1116 bnad = tcb->bnad;
1117
1118 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1119 pending++;
1120 continue;
1121 }
1122
b3cc6e88 1123 bnad_txq_cleanup(bnad, tcb);
01b54b14 1124
4e857c58 1125 smp_mb__before_atomic();
01b54b14
JH
1126 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1127 }
1128
1129 if (pending) {
1130 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1131 msecs_to_jiffies(1));
1132 return;
1133 }
1134
1135 spin_lock_irqsave(&bnad->bna_lock, flags);
1136 bna_tx_cleanup_complete(tx_info->tx);
1137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1138}
1139
be7fa326 1140static void
078086f3 1141bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
be7fa326 1142{
078086f3
RM
1143 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1144 struct bna_tcb *tcb;
1145 int i;
1146
1147 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1148 tcb = tx_info->tcb[i];
1149 if (!tcb)
1150 continue;
1151 }
1152
01b54b14 1153 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
8b230ed8
RM
1154}
1155
5bcf6ac0
RM
1156static void
1157bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1158{
1159 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1160 struct bna_ccb *ccb;
1161 struct bnad_rx_ctrl *rx_ctrl;
1162 int i;
1163
1164 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1165 rx_ctrl = &rx_info->rx_ctrl[i];
1166 ccb = rx_ctrl->ccb;
1167 if (!ccb)
1168 continue;
1169
1170 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1171
1172 if (ccb->rcb[1])
1173 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1174 }
1175}
1176
01b54b14
JH
1177/*
1178 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1179 */
1180static void
1181bnad_rx_cleanup(void *work)
1182{
1183 struct bnad_rx_info *rx_info =
1184 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1185 struct bnad_rx_ctrl *rx_ctrl;
1186 struct bnad *bnad = NULL;
1187 unsigned long flags;
5216562a 1188 u32 i;
01b54b14
JH
1189
1190 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1191 rx_ctrl = &rx_info->rx_ctrl[i];
1192
1193 if (!rx_ctrl->ccb)
1194 continue;
1195
1196 bnad = rx_ctrl->ccb->bnad;
1197
1198 /*
1199 * Wait till the poll handler has exited
1200 * and nothing can be scheduled anymore
1201 */
1202 napi_disable(&rx_ctrl->napi);
1203
b3cc6e88
JH
1204 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1205 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
01b54b14 1206 if (rx_ctrl->ccb->rcb[1])
b3cc6e88 1207 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
01b54b14
JH
1208 }
1209
1210 spin_lock_irqsave(&bnad->bna_lock, flags);
1211 bna_rx_cleanup_complete(rx_info->rx);
1212 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1213}
1214
8b230ed8 1215static void
078086f3 1216bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1217{
078086f3
RM
1218 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1219 struct bna_ccb *ccb;
1220 struct bnad_rx_ctrl *rx_ctrl;
1221 int i;
1222
772b5235 1223 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1224 rx_ctrl = &rx_info->rx_ctrl[i];
1225 ccb = rx_ctrl->ccb;
1226 if (!ccb)
1227 continue;
1228
1229 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1230
1231 if (ccb->rcb[1])
1232 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
078086f3 1233 }
be7fa326 1234
01b54b14 1235 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
8b230ed8
RM
1236}
1237
1238static void
078086f3 1239bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1240{
078086f3
RM
1241 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1242 struct bna_ccb *ccb;
1243 struct bna_rcb *rcb;
1244 struct bnad_rx_ctrl *rx_ctrl;
30f9fc94 1245 int i, j;
be7fa326 1246
772b5235 1247 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1248 rx_ctrl = &rx_info->rx_ctrl[i];
1249 ccb = rx_ctrl->ccb;
1250 if (!ccb)
1251 continue;
be7fa326 1252
01b54b14 1253 napi_enable(&rx_ctrl->napi);
8b230ed8 1254
078086f3
RM
1255 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1256 rcb = ccb->rcb[j];
1257 if (!rcb)
1258 continue;
078086f3 1259
30f9fc94 1260 bnad_rxq_alloc_init(bnad, rcb);
078086f3 1261 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
5bcf6ac0 1262 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
5216562a 1263 bnad_rxq_post(bnad, rcb);
078086f3 1264 }
8b230ed8
RM
1265 }
1266}
1267
1268static void
078086f3 1269bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
8b230ed8
RM
1270{
1271 struct bnad *bnad = (struct bnad *)arg;
1272
1273 complete(&bnad->bnad_completions.rx_comp);
1274}
1275
1276static void
078086f3 1277bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1278{
078086f3 1279 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
8b230ed8
RM
1280 complete(&bnad->bnad_completions.mcast_comp);
1281}
1282
1283void
1284bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1285 struct bna_stats *stats)
1286{
1287 if (status == BNA_CB_SUCCESS)
1288 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1289
1290 if (!netif_running(bnad->netdev) ||
1291 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1292 return;
1293
1294 mod_timer(&bnad->stats_timer,
1295 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1296}
1297
078086f3
RM
1298static void
1299bnad_cb_enet_mtu_set(struct bnad *bnad)
1300{
1301 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1302 complete(&bnad->bnad_completions.mtu_comp);
1303}
1304
72a9730b
KG
1305void
1306bnad_cb_completion(void *arg, enum bfa_status status)
1307{
1308 struct bnad_iocmd_comp *iocmd_comp =
1309 (struct bnad_iocmd_comp *)arg;
1310
1311 iocmd_comp->comp_status = (u32) status;
1312 complete(&iocmd_comp->comp);
1313}
1314
8b230ed8
RM
1315/* Resource allocation, free functions */
1316
1317static void
1318bnad_mem_free(struct bnad *bnad,
1319 struct bna_mem_info *mem_info)
1320{
1321 int i;
1322 dma_addr_t dma_pa;
1323
1324 if (mem_info->mdl == NULL)
1325 return;
1326
1327 for (i = 0; i < mem_info->num; i++) {
1328 if (mem_info->mdl[i].kva != NULL) {
1329 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1330 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1331 dma_pa);
5ea74318
IV
1332 dma_free_coherent(&bnad->pcidev->dev,
1333 mem_info->mdl[i].len,
1334 mem_info->mdl[i].kva, dma_pa);
8b230ed8
RM
1335 } else
1336 kfree(mem_info->mdl[i].kva);
1337 }
1338 }
1339 kfree(mem_info->mdl);
1340 mem_info->mdl = NULL;
1341}
1342
1343static int
1344bnad_mem_alloc(struct bnad *bnad,
1345 struct bna_mem_info *mem_info)
1346{
1347 int i;
1348 dma_addr_t dma_pa;
1349
1350 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1351 mem_info->mdl = NULL;
1352 return 0;
1353 }
1354
1355 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1356 GFP_KERNEL);
1357 if (mem_info->mdl == NULL)
1358 return -ENOMEM;
1359
1360 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1361 for (i = 0; i < mem_info->num; i++) {
1362 mem_info->mdl[i].len = mem_info->len;
1363 mem_info->mdl[i].kva =
5ea74318 1364 dma_alloc_coherent(&bnad->pcidev->dev,
1f9061d2
JP
1365 mem_info->len, &dma_pa,
1366 GFP_KERNEL);
8b230ed8
RM
1367 if (mem_info->mdl[i].kva == NULL)
1368 goto err_return;
1369
1370 BNA_SET_DMA_ADDR(dma_pa,
1371 &(mem_info->mdl[i].dma));
1372 }
1373 } else {
1374 for (i = 0; i < mem_info->num; i++) {
1375 mem_info->mdl[i].len = mem_info->len;
1376 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1377 GFP_KERNEL);
1378 if (mem_info->mdl[i].kva == NULL)
1379 goto err_return;
1380 }
1381 }
1382
1383 return 0;
1384
1385err_return:
1386 bnad_mem_free(bnad, mem_info);
1387 return -ENOMEM;
1388}
1389
1390/* Free IRQ for Mailbox */
1391static void
078086f3 1392bnad_mbox_irq_free(struct bnad *bnad)
8b230ed8
RM
1393{
1394 int irq;
1395 unsigned long flags;
1396
8b230ed8 1397 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1398 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1399 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1400
1401 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1402 free_irq(irq, bnad);
8b230ed8
RM
1403}
1404
1405/*
1406 * Allocates IRQ for Mailbox, but keep it disabled
1407 * This will be enabled once we get the mbox enable callback
1408 * from bna
1409 */
1410static int
078086f3 1411bnad_mbox_irq_alloc(struct bnad *bnad)
8b230ed8 1412{
0120b99c
RM
1413 int err = 0;
1414 unsigned long irq_flags, flags;
8b230ed8 1415 u32 irq;
0120b99c 1416 irq_handler_t irq_handler;
8b230ed8 1417
8b230ed8
RM
1418 spin_lock_irqsave(&bnad->bna_lock, flags);
1419 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1420 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
8811e267 1421 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8279171a 1422 irq_flags = 0;
8b230ed8
RM
1423 } else {
1424 irq_handler = (irq_handler_t)bnad_isr;
1425 irq = bnad->pcidev->irq;
5f77898d 1426 irq_flags = IRQF_SHARED;
8b230ed8 1427 }
8811e267 1428
8b230ed8 1429 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1430 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1431
e2fa6f2e
RM
1432 /*
1433 * Set the Mbox IRQ disable flag, so that the IRQ handler
1434 * called from request_irq() for SHARED IRQs do not execute
1435 */
1436 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1437
be7fa326
RM
1438 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1439
8279171a 1440 err = request_irq(irq, irq_handler, irq_flags,
be7fa326 1441 bnad->mbox_irq_name, bnad);
e2fa6f2e 1442
be7fa326 1443 return err;
8b230ed8
RM
1444}
1445
1446static void
1447bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1448{
1449 kfree(intr_info->idl);
1450 intr_info->idl = NULL;
1451}
1452
1453/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1454static int
1455bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
078086f3 1456 u32 txrx_id, struct bna_intr_info *intr_info)
8b230ed8
RM
1457{
1458 int i, vector_start = 0;
1459 u32 cfg_flags;
1460 unsigned long flags;
1461
1462 spin_lock_irqsave(&bnad->bna_lock, flags);
1463 cfg_flags = bnad->cfg_flags;
1464 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1465
1466 if (cfg_flags & BNAD_CF_MSIX) {
1467 intr_info->intr_type = BNA_INTR_T_MSIX;
1468 intr_info->idl = kcalloc(intr_info->num,
1469 sizeof(struct bna_intr_descr),
1470 GFP_KERNEL);
1471 if (!intr_info->idl)
1472 return -ENOMEM;
1473
1474 switch (src) {
1475 case BNAD_INTR_TX:
8811e267 1476 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
8b230ed8
RM
1477 break;
1478
1479 case BNAD_INTR_RX:
8811e267
RM
1480 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1481 (bnad->num_tx * bnad->num_txq_per_tx) +
8b230ed8
RM
1482 txrx_id;
1483 break;
1484
1485 default:
1486 BUG();
1487 }
1488
1489 for (i = 0; i < intr_info->num; i++)
1490 intr_info->idl[i].vector = vector_start + i;
1491 } else {
1492 intr_info->intr_type = BNA_INTR_T_INTX;
1493 intr_info->num = 1;
1494 intr_info->idl = kcalloc(intr_info->num,
1495 sizeof(struct bna_intr_descr),
1496 GFP_KERNEL);
1497 if (!intr_info->idl)
1498 return -ENOMEM;
1499
1500 switch (src) {
1501 case BNAD_INTR_TX:
8811e267 1502 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
8b230ed8
RM
1503 break;
1504
1505 case BNAD_INTR_RX:
8811e267 1506 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
8b230ed8
RM
1507 break;
1508 }
1509 }
1510 return 0;
1511}
1512
1aa8b471 1513/* NOTE: Should be called for MSIX only
8b230ed8
RM
1514 * Unregisters Tx MSIX vector(s) from the kernel
1515 */
1516static void
1517bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1518 int num_txqs)
1519{
1520 int i;
1521 int vector_num;
1522
1523 for (i = 0; i < num_txqs; i++) {
1524 if (tx_info->tcb[i] == NULL)
1525 continue;
1526
1527 vector_num = tx_info->tcb[i]->intr_vector;
1528 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1529 }
1530}
1531
1aa8b471 1532/* NOTE: Should be called for MSIX only
8b230ed8
RM
1533 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1534 */
1535static int
1536bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
078086f3 1537 u32 tx_id, int num_txqs)
8b230ed8
RM
1538{
1539 int i;
1540 int err;
1541 int vector_num;
1542
1543 for (i = 0; i < num_txqs; i++) {
1544 vector_num = tx_info->tcb[i]->intr_vector;
1545 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1546 tx_id + tx_info->tcb[i]->id);
1547 err = request_irq(bnad->msix_table[vector_num].vector,
1548 (irq_handler_t)bnad_msix_tx, 0,
1549 tx_info->tcb[i]->name,
1550 tx_info->tcb[i]);
1551 if (err)
1552 goto err_return;
1553 }
1554
1555 return 0;
1556
1557err_return:
1558 if (i > 0)
1559 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1560 return -1;
1561}
1562
1aa8b471 1563/* NOTE: Should be called for MSIX only
8b230ed8
RM
1564 * Unregisters Rx MSIX vector(s) from the kernel
1565 */
1566static void
1567bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1568 int num_rxps)
1569{
1570 int i;
1571 int vector_num;
1572
1573 for (i = 0; i < num_rxps; i++) {
1574 if (rx_info->rx_ctrl[i].ccb == NULL)
1575 continue;
1576
1577 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1578 free_irq(bnad->msix_table[vector_num].vector,
1579 rx_info->rx_ctrl[i].ccb);
1580 }
1581}
1582
1aa8b471 1583/* NOTE: Should be called for MSIX only
8b230ed8
RM
1584 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1585 */
1586static int
1587bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
078086f3 1588 u32 rx_id, int num_rxps)
8b230ed8
RM
1589{
1590 int i;
1591 int err;
1592 int vector_num;
1593
1594 for (i = 0; i < num_rxps; i++) {
1595 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1596 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1597 bnad->netdev->name,
1598 rx_id + rx_info->rx_ctrl[i].ccb->id);
1599 err = request_irq(bnad->msix_table[vector_num].vector,
1600 (irq_handler_t)bnad_msix_rx, 0,
1601 rx_info->rx_ctrl[i].ccb->name,
1602 rx_info->rx_ctrl[i].ccb);
1603 if (err)
1604 goto err_return;
1605 }
1606
1607 return 0;
1608
1609err_return:
1610 if (i > 0)
1611 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1612 return -1;
1613}
1614
1615/* Free Tx object Resources */
1616static void
1617bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1618{
1619 int i;
1620
1621 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1622 if (res_info[i].res_type == BNA_RES_T_MEM)
1623 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1624 else if (res_info[i].res_type == BNA_RES_T_INTR)
1625 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1626 }
1627}
1628
1629/* Allocates memory and interrupt resources for Tx object */
1630static int
1631bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
078086f3 1632 u32 tx_id)
8b230ed8
RM
1633{
1634 int i, err = 0;
1635
1636 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1637 if (res_info[i].res_type == BNA_RES_T_MEM)
1638 err = bnad_mem_alloc(bnad,
1639 &res_info[i].res_u.mem_info);
1640 else if (res_info[i].res_type == BNA_RES_T_INTR)
1641 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1642 &res_info[i].res_u.intr_info);
1643 if (err)
1644 goto err_return;
1645 }
1646 return 0;
1647
1648err_return:
1649 bnad_tx_res_free(bnad, res_info);
1650 return err;
1651}
1652
1653/* Free Rx object Resources */
1654static void
1655bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1656{
1657 int i;
1658
1659 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1660 if (res_info[i].res_type == BNA_RES_T_MEM)
1661 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1662 else if (res_info[i].res_type == BNA_RES_T_INTR)
1663 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1664 }
1665}
1666
1667/* Allocates memory and interrupt resources for Rx object */
1668static int
1669bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1670 uint rx_id)
1671{
1672 int i, err = 0;
1673
1674 /* All memory needs to be allocated before setup_ccbs */
1675 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1676 if (res_info[i].res_type == BNA_RES_T_MEM)
1677 err = bnad_mem_alloc(bnad,
1678 &res_info[i].res_u.mem_info);
1679 else if (res_info[i].res_type == BNA_RES_T_INTR)
1680 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1681 &res_info[i].res_u.intr_info);
1682 if (err)
1683 goto err_return;
1684 }
1685 return 0;
1686
1687err_return:
1688 bnad_rx_res_free(bnad, res_info);
1689 return err;
1690}
1691
1692/* Timer callbacks */
1693/* a) IOC timer */
1694static void
1695bnad_ioc_timeout(unsigned long data)
1696{
1697 struct bnad *bnad = (struct bnad *)data;
1698 unsigned long flags;
1699
1700 spin_lock_irqsave(&bnad->bna_lock, flags);
ad24d6f0 1701 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
8b230ed8
RM
1702 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1703}
1704
1705static void
1706bnad_ioc_hb_check(unsigned long data)
1707{
1708 struct bnad *bnad = (struct bnad *)data;
1709 unsigned long flags;
1710
1711 spin_lock_irqsave(&bnad->bna_lock, flags);
ad24d6f0 1712 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
8b230ed8
RM
1713 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1714}
1715
1716static void
1d32f769 1717bnad_iocpf_timeout(unsigned long data)
8b230ed8
RM
1718{
1719 struct bnad *bnad = (struct bnad *)data;
1720 unsigned long flags;
1721
1722 spin_lock_irqsave(&bnad->bna_lock, flags);
ad24d6f0 1723 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1d32f769
RM
1724 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1725}
1726
1727static void
1728bnad_iocpf_sem_timeout(unsigned long data)
1729{
1730 struct bnad *bnad = (struct bnad *)data;
1731 unsigned long flags;
1732
1733 spin_lock_irqsave(&bnad->bna_lock, flags);
ad24d6f0 1734 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
8b230ed8
RM
1735 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1736}
1737
1738/*
1739 * All timer routines use bnad->bna_lock to protect against
1740 * the following race, which may occur in case of no locking:
0120b99c 1741 * Time CPU m CPU n
8b230ed8
RM
1742 * 0 1 = test_bit
1743 * 1 clear_bit
1744 * 2 del_timer_sync
1745 * 3 mod_timer
1746 */
1747
1748/* b) Dynamic Interrupt Moderation Timer */
1749static void
1750bnad_dim_timeout(unsigned long data)
1751{
1752 struct bnad *bnad = (struct bnad *)data;
1753 struct bnad_rx_info *rx_info;
1754 struct bnad_rx_ctrl *rx_ctrl;
1755 int i, j;
1756 unsigned long flags;
1757
1758 if (!netif_carrier_ok(bnad->netdev))
1759 return;
1760
1761 spin_lock_irqsave(&bnad->bna_lock, flags);
1762 for (i = 0; i < bnad->num_rx; i++) {
1763 rx_info = &bnad->rx_info[i];
1764 if (!rx_info->rx)
1765 continue;
1766 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1767 rx_ctrl = &rx_info->rx_ctrl[j];
1768 if (!rx_ctrl->ccb)
1769 continue;
1770 bna_rx_dim_update(rx_ctrl->ccb);
1771 }
1772 }
1773
1774 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1775 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1776 mod_timer(&bnad->dim_timer,
1777 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1779}
1780
1781/* c) Statistics Timer */
1782static void
1783bnad_stats_timeout(unsigned long data)
1784{
1785 struct bnad *bnad = (struct bnad *)data;
1786 unsigned long flags;
1787
1788 if (!netif_running(bnad->netdev) ||
1789 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1790 return;
1791
1792 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1793 bna_hw_stats_get(&bnad->bna);
8b230ed8
RM
1794 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1795}
1796
1797/*
1798 * Set up timer for DIM
1799 * Called with bnad->bna_lock held
1800 */
1801void
1802bnad_dim_timer_start(struct bnad *bnad)
1803{
1804 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1805 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1806 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1807 (unsigned long)bnad);
1808 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1809 mod_timer(&bnad->dim_timer,
1810 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1811 }
1812}
1813
1814/*
1815 * Set up timer for statistics
1816 * Called with mutex_lock(&bnad->conf_mutex) held
1817 */
1818static void
1819bnad_stats_timer_start(struct bnad *bnad)
1820{
1821 unsigned long flags;
1822
1823 spin_lock_irqsave(&bnad->bna_lock, flags);
1824 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1825 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1826 (unsigned long)bnad);
1827 mod_timer(&bnad->stats_timer,
1828 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1829 }
1830 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1831}
1832
1833/*
1834 * Stops the stats timer
1835 * Called with mutex_lock(&bnad->conf_mutex) held
1836 */
1837static void
1838bnad_stats_timer_stop(struct bnad *bnad)
1839{
1840 int to_del = 0;
1841 unsigned long flags;
1842
1843 spin_lock_irqsave(&bnad->bna_lock, flags);
1844 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1845 to_del = 1;
1846 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1847 if (to_del)
1848 del_timer_sync(&bnad->stats_timer);
1849}
1850
1851/* Utilities */
1852
1853static void
1854bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1855{
1856 int i = 1; /* Index 0 has broadcast address */
1857 struct netdev_hw_addr *mc_addr;
1858
1859 netdev_for_each_mc_addr(mc_addr, netdev) {
e2f9ecfc 1860 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
8b230ed8
RM
1861 i++;
1862 }
1863}
1864
1865static int
1866bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1867{
1868 struct bnad_rx_ctrl *rx_ctrl =
1869 container_of(napi, struct bnad_rx_ctrl, napi);
2be67144 1870 struct bnad *bnad = rx_ctrl->bnad;
8b230ed8
RM
1871 int rcvd = 0;
1872
271e8b79 1873 rx_ctrl->rx_poll_ctr++;
8b230ed8
RM
1874
1875 if (!netif_carrier_ok(bnad->netdev))
1876 goto poll_exit;
1877
b3cc6e88 1878 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
271e8b79 1879 if (rcvd >= budget)
8b230ed8
RM
1880 return rcvd;
1881
1882poll_exit:
19dbff9f 1883 napi_complete(napi);
8b230ed8 1884
271e8b79 1885 rx_ctrl->rx_complete++;
2be67144
RM
1886
1887 if (rx_ctrl->ccb)
271e8b79
RM
1888 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1889
8b230ed8
RM
1890 return rcvd;
1891}
1892
2be67144 1893#define BNAD_NAPI_POLL_QUOTA 64
8b230ed8 1894static void
01b54b14 1895bnad_napi_add(struct bnad *bnad, u32 rx_id)
8b230ed8 1896{
8b230ed8
RM
1897 struct bnad_rx_ctrl *rx_ctrl;
1898 int i;
8b230ed8
RM
1899
1900 /* Initialize & enable NAPI */
1901 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1902 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1903 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
2be67144
RM
1904 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1905 }
1906}
1907
1908static void
01b54b14 1909bnad_napi_delete(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1910{
1911 int i;
1912
1913 /* First disable and then clean up */
01b54b14 1914 for (i = 0; i < bnad->num_rxp_per_rx; i++)
8b230ed8 1915 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
8b230ed8
RM
1916}
1917
1918/* Should be held with conf_lock held */
1919void
b3cc6e88 1920bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1921{
1922 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1923 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1924 unsigned long flags;
1925
1926 if (!tx_info->tx)
1927 return;
1928
1929 init_completion(&bnad->bnad_completions.tx_comp);
1930 spin_lock_irqsave(&bnad->bna_lock, flags);
1931 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933 wait_for_completion(&bnad->bnad_completions.tx_comp);
1934
1935 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1936 bnad_tx_msix_unregister(bnad, tx_info,
1937 bnad->num_txq_per_tx);
1938
1939 spin_lock_irqsave(&bnad->bna_lock, flags);
1940 bna_tx_destroy(tx_info->tx);
1941 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1942
1943 tx_info->tx = NULL;
078086f3 1944 tx_info->tx_id = 0;
8b230ed8 1945
8b230ed8
RM
1946 bnad_tx_res_free(bnad, res_info);
1947}
1948
1949/* Should be held with conf_lock held */
1950int
078086f3 1951bnad_setup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1952{
1953 int err;
1954 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1955 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1956 struct bna_intr_info *intr_info =
1957 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1958 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
d91d25d5 1959 static const struct bna_tx_event_cbfn tx_cbfn = {
1960 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1961 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1962 .tx_stall_cbfn = bnad_cb_tx_stall,
1963 .tx_resume_cbfn = bnad_cb_tx_resume,
1964 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1965 };
1966
8b230ed8
RM
1967 struct bna_tx *tx;
1968 unsigned long flags;
1969
078086f3
RM
1970 tx_info->tx_id = tx_id;
1971
8b230ed8
RM
1972 /* Initialize the Tx object configuration */
1973 tx_config->num_txq = bnad->num_txq_per_tx;
1974 tx_config->txq_depth = bnad->txq_depth;
1975 tx_config->tx_type = BNA_TX_T_REGULAR;
078086f3 1976 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
8b230ed8 1977
8b230ed8
RM
1978 /* Get BNA's resource requirement for one tx object */
1979 spin_lock_irqsave(&bnad->bna_lock, flags);
1980 bna_tx_res_req(bnad->num_txq_per_tx,
1981 bnad->txq_depth, res_info);
1982 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1983
1984 /* Fill Unmap Q memory requirements */
5216562a
RM
1985 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1986 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1987 bnad->txq_depth));
8b230ed8
RM
1988
1989 /* Allocate resources */
1990 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1991 if (err)
1992 return err;
1993
1994 /* Ask BNA to create one Tx object, supplying required resources */
1995 spin_lock_irqsave(&bnad->bna_lock, flags);
1996 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1997 tx_info);
1998 spin_unlock_irqrestore(&bnad->bna_lock, flags);
f29eeb79
RM
1999 if (!tx) {
2000 err = -ENOMEM;
8b230ed8 2001 goto err_return;
f29eeb79 2002 }
8b230ed8
RM
2003 tx_info->tx = tx;
2004
01b54b14
JH
2005 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2006 (work_func_t)bnad_tx_cleanup);
2007
8b230ed8
RM
2008 /* Register ISR for the Tx object */
2009 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2010 err = bnad_tx_msix_register(bnad, tx_info,
2011 tx_id, bnad->num_txq_per_tx);
2012 if (err)
f29eeb79 2013 goto cleanup_tx;
8b230ed8
RM
2014 }
2015
2016 spin_lock_irqsave(&bnad->bna_lock, flags);
2017 bna_tx_enable(tx);
2018 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2019
2020 return 0;
2021
f29eeb79
RM
2022cleanup_tx:
2023 spin_lock_irqsave(&bnad->bna_lock, flags);
2024 bna_tx_destroy(tx_info->tx);
2025 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2026 tx_info->tx = NULL;
2027 tx_info->tx_id = 0;
8b230ed8
RM
2028err_return:
2029 bnad_tx_res_free(bnad, res_info);
2030 return err;
2031}
2032
2033/* Setup the rx config for bna_rx_create */
2034/* bnad decides the configuration */
2035static void
2036bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2037{
e29aa339 2038 memset(rx_config, 0, sizeof(*rx_config));
8b230ed8
RM
2039 rx_config->rx_type = BNA_RX_T_REGULAR;
2040 rx_config->num_paths = bnad->num_rxp_per_rx;
078086f3 2041 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
8b230ed8
RM
2042
2043 if (bnad->num_rxp_per_rx > 1) {
2044 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2045 rx_config->rss_config.hash_type =
078086f3
RM
2046 (BFI_ENET_RSS_IPV6 |
2047 BFI_ENET_RSS_IPV6_TCP |
2048 BFI_ENET_RSS_IPV4 |
2049 BFI_ENET_RSS_IPV4_TCP);
8b230ed8
RM
2050 rx_config->rss_config.hash_mask =
2051 bnad->num_rxp_per_rx - 1;
0fa6aa4a 2052 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
8b230ed8
RM
2053 sizeof(rx_config->rss_config.toeplitz_hash_key));
2054 } else {
2055 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2056 memset(&rx_config->rss_config, 0,
2057 sizeof(rx_config->rss_config));
2058 }
e29aa339
RM
2059
2060 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2061 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2062
2063 /* BNA_RXP_SINGLE - one data-buffer queue
2064 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2065 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2066 */
2067 /* TODO: configurable param for queue type */
8b230ed8 2068 rx_config->rxp_type = BNA_RXP_SLR;
8b230ed8 2069
e29aa339
RM
2070 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2071 rx_config->frame_size > 4096) {
2072 /* though size_routing_enable is set in SLR,
2073 * small packets may get routed to same rxq.
2074 * set buf_size to 2048 instead of PAGE_SIZE.
2075 */
2076 rx_config->q0_buf_size = 2048;
2077 /* this should be in multiples of 2 */
2078 rx_config->q0_num_vecs = 4;
2079 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2080 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2081 } else {
2082 rx_config->q0_buf_size = rx_config->frame_size;
2083 rx_config->q0_num_vecs = 1;
2084 rx_config->q0_depth = bnad->rxq_depth;
2085 }
2086
2087 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2088 if (rx_config->rxp_type == BNA_RXP_SLR) {
2089 rx_config->q1_depth = bnad->rxq_depth;
2090 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2091 }
8b230ed8 2092
877767dc
IV
2093 rx_config->vlan_strip_status =
2094 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2095 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
8b230ed8
RM
2096}
2097
2be67144
RM
2098static void
2099bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2100{
2101 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2102 int i;
2103
2104 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2105 rx_info->rx_ctrl[i].bnad = bnad;
2106}
2107
8b230ed8 2108/* Called with mutex_lock(&bnad->conf_mutex) held */
2fd888a5 2109static u32
e29aa339
RM
2110bnad_reinit_rx(struct bnad *bnad)
2111{
2112 struct net_device *netdev = bnad->netdev;
2113 u32 err = 0, current_err = 0;
2114 u32 rx_id = 0, count = 0;
2115 unsigned long flags;
2116
2117 /* destroy and create new rx objects */
2118 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2119 if (!bnad->rx_info[rx_id].rx)
2120 continue;
2121 bnad_destroy_rx(bnad, rx_id);
2122 }
2123
2124 spin_lock_irqsave(&bnad->bna_lock, flags);
2125 bna_enet_mtu_set(&bnad->bna.enet,
2126 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2127 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2128
2129 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2130 count++;
2131 current_err = bnad_setup_rx(bnad, rx_id);
2132 if (current_err && !err) {
2133 err = current_err;
ecc46789 2134 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
e29aa339
RM
2135 }
2136 }
2137
2138 /* restore rx configuration */
2139 if (bnad->rx_info[0].rx && !err) {
2140 bnad_restore_vlans(bnad, 0);
2141 bnad_enable_default_bcast(bnad);
2142 spin_lock_irqsave(&bnad->bna_lock, flags);
2143 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2144 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2145 bnad_set_rx_mode(netdev);
2146 }
2147
2148 return count;
2149}
2150
2151/* Called with bnad_conf_lock() held */
8b230ed8 2152void
b3cc6e88 2153bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
2154{
2155 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2156 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2157 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2158 unsigned long flags;
271e8b79 2159 int to_del = 0;
8b230ed8
RM
2160
2161 if (!rx_info->rx)
2162 return;
2163
2164 if (0 == rx_id) {
2165 spin_lock_irqsave(&bnad->bna_lock, flags);
271e8b79
RM
2166 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2167 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
8b230ed8 2168 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
271e8b79
RM
2169 to_del = 1;
2170 }
8b230ed8 2171 spin_unlock_irqrestore(&bnad->bna_lock, flags);
271e8b79 2172 if (to_del)
8b230ed8
RM
2173 del_timer_sync(&bnad->dim_timer);
2174 }
2175
8b230ed8
RM
2176 init_completion(&bnad->bnad_completions.rx_comp);
2177 spin_lock_irqsave(&bnad->bna_lock, flags);
2178 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2179 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2180 wait_for_completion(&bnad->bnad_completions.rx_comp);
2181
2182 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2183 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2184
01b54b14 2185 bnad_napi_delete(bnad, rx_id);
2be67144 2186
8b230ed8
RM
2187 spin_lock_irqsave(&bnad->bna_lock, flags);
2188 bna_rx_destroy(rx_info->rx);
8b230ed8
RM
2189
2190 rx_info->rx = NULL;
3caa1e95 2191 rx_info->rx_id = 0;
b9fa1fbf 2192 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
2193
2194 bnad_rx_res_free(bnad, res_info);
2195}
2196
2197/* Called with mutex_lock(&bnad->conf_mutex) held */
2198int
078086f3 2199bnad_setup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
2200{
2201 int err;
2202 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2203 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2204 struct bna_intr_info *intr_info =
2205 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2206 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
d91d25d5 2207 static const struct bna_rx_event_cbfn rx_cbfn = {
5216562a 2208 .rcb_setup_cbfn = NULL,
01b54b14 2209 .rcb_destroy_cbfn = NULL,
d91d25d5 2210 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2211 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
5bcf6ac0 2212 .rx_stall_cbfn = bnad_cb_rx_stall,
d91d25d5 2213 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2214 .rx_post_cbfn = bnad_cb_rx_post,
2215 };
8b230ed8
RM
2216 struct bna_rx *rx;
2217 unsigned long flags;
2218
078086f3
RM
2219 rx_info->rx_id = rx_id;
2220
8b230ed8
RM
2221 /* Initialize the Rx object configuration */
2222 bnad_init_rx_config(bnad, rx_config);
2223
8b230ed8
RM
2224 /* Get BNA's resource requirement for one Rx object */
2225 spin_lock_irqsave(&bnad->bna_lock, flags);
2226 bna_rx_res_req(rx_config, res_info);
2227 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2228
2229 /* Fill Unmap Q memory requirements */
e29aa339
RM
2230 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2231 rx_config->num_paths,
2232 (rx_config->q0_depth *
2233 sizeof(struct bnad_rx_unmap)) +
2234 sizeof(struct bnad_rx_unmap_q));
2235
2236 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2237 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2238 rx_config->num_paths,
2239 (rx_config->q1_depth *
2240 sizeof(struct bnad_rx_unmap) +
2241 sizeof(struct bnad_rx_unmap_q)));
2242 }
8b230ed8
RM
2243 /* Allocate resource */
2244 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2245 if (err)
2246 return err;
2247
2be67144
RM
2248 bnad_rx_ctrl_init(bnad, rx_id);
2249
8b230ed8
RM
2250 /* Ask BNA to create one Rx object, supplying required resources */
2251 spin_lock_irqsave(&bnad->bna_lock, flags);
2252 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2253 rx_info);
3caa1e95
RM
2254 if (!rx) {
2255 err = -ENOMEM;
b9fa1fbf 2256 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 2257 goto err_return;
3caa1e95 2258 }
8b230ed8 2259 rx_info->rx = rx;
b9fa1fbf 2260 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 2261
01b54b14
JH
2262 INIT_WORK(&rx_info->rx_cleanup_work,
2263 (work_func_t)(bnad_rx_cleanup));
2264
2be67144
RM
2265 /*
2266 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2267 * so that IRQ handler cannot schedule NAPI at this point.
2268 */
01b54b14 2269 bnad_napi_add(bnad, rx_id);
2be67144 2270
8b230ed8
RM
2271 /* Register ISR for the Rx object */
2272 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2273 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2274 rx_config->num_paths);
2275 if (err)
2276 goto err_return;
2277 }
2278
8b230ed8
RM
2279 spin_lock_irqsave(&bnad->bna_lock, flags);
2280 if (0 == rx_id) {
2281 /* Set up Dynamic Interrupt Moderation Vector */
2282 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2283 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2284
2285 /* Enable VLAN filtering only on the default Rx */
2286 bna_rx_vlanfilter_enable(rx);
2287
2288 /* Start the DIM timer */
2289 bnad_dim_timer_start(bnad);
2290 }
2291
2292 bna_rx_enable(rx);
2293 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2294
2295 return 0;
2296
2297err_return:
b3cc6e88 2298 bnad_destroy_rx(bnad, rx_id);
8b230ed8
RM
2299 return err;
2300}
2301
2302/* Called with conf_lock & bnad->bna_lock held */
2303void
2304bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2305{
2306 struct bnad_tx_info *tx_info;
2307
2308 tx_info = &bnad->tx_info[0];
2309 if (!tx_info->tx)
2310 return;
2311
2312 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2313}
2314
2315/* Called with conf_lock & bnad->bna_lock held */
2316void
2317bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2318{
2319 struct bnad_rx_info *rx_info;
0120b99c 2320 int i;
8b230ed8
RM
2321
2322 for (i = 0; i < bnad->num_rx; i++) {
2323 rx_info = &bnad->rx_info[i];
2324 if (!rx_info->rx)
2325 continue;
2326 bna_rx_coalescing_timeo_set(rx_info->rx,
2327 bnad->rx_coalescing_timeo);
2328 }
2329}
2330
2331/*
2332 * Called with bnad->bna_lock held
2333 */
a2122d95 2334int
558caad7 2335bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
8b230ed8
RM
2336{
2337 int ret;
2338
2339 if (!is_valid_ether_addr(mac_addr))
2340 return -EADDRNOTAVAIL;
2341
2342 /* If datapath is down, pretend everything went through */
2343 if (!bnad->rx_info[0].rx)
2344 return 0;
2345
1f9883e0 2346 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
8b230ed8
RM
2347 if (ret != BNA_CB_SUCCESS)
2348 return -EADDRNOTAVAIL;
2349
2350 return 0;
2351}
2352
2353/* Should be called with conf_lock held */
a2122d95 2354int
8b230ed8
RM
2355bnad_enable_default_bcast(struct bnad *bnad)
2356{
2357 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2358 int ret;
2359 unsigned long flags;
2360
2361 init_completion(&bnad->bnad_completions.mcast_comp);
2362
2363 spin_lock_irqsave(&bnad->bna_lock, flags);
558caad7
IV
2364 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2365 bnad_cb_rx_mcast_add);
8b230ed8
RM
2366 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2367
2368 if (ret == BNA_CB_SUCCESS)
2369 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2370 else
2371 return -ENODEV;
2372
2373 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2374 return -ENODEV;
2375
2376 return 0;
2377}
2378
19dbff9f 2379/* Called with mutex_lock(&bnad->conf_mutex) held */
a2122d95 2380void
aad75b66
RM
2381bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2382{
f859d7cb 2383 u16 vid;
aad75b66
RM
2384 unsigned long flags;
2385
f859d7cb 2386 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
aad75b66 2387 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2388 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
aad75b66
RM
2389 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2390 }
2391}
2392
8b230ed8
RM
2393/* Statistics utilities */
2394void
250e061e 2395bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2396{
8b230ed8
RM
2397 int i, j;
2398
2399 for (i = 0; i < bnad->num_rx; i++) {
2400 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2401 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 2402 stats->rx_packets += bnad->rx_info[i].
8b230ed8 2403 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 2404 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
2405 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2406 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2407 bnad->rx_info[i].rx_ctrl[j].ccb->
2408 rcb[1]->rxq) {
250e061e 2409 stats->rx_packets +=
8b230ed8
RM
2410 bnad->rx_info[i].rx_ctrl[j].
2411 ccb->rcb[1]->rxq->rx_packets;
250e061e 2412 stats->rx_bytes +=
8b230ed8
RM
2413 bnad->rx_info[i].rx_ctrl[j].
2414 ccb->rcb[1]->rxq->rx_bytes;
2415 }
2416 }
2417 }
2418 }
2419 for (i = 0; i < bnad->num_tx; i++) {
2420 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2421 if (bnad->tx_info[i].tcb[j]) {
250e061e 2422 stats->tx_packets +=
8b230ed8 2423 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 2424 stats->tx_bytes +=
8b230ed8
RM
2425 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2426 }
2427 }
2428 }
2429}
2430
2431/*
2432 * Must be called with the bna_lock held.
2433 */
2434void
250e061e 2435bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2436{
078086f3
RM
2437 struct bfi_enet_stats_mac *mac_stats;
2438 u32 bmap;
8b230ed8
RM
2439 int i;
2440
078086f3 2441 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
250e061e 2442 stats->rx_errors =
8b230ed8
RM
2443 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2444 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2445 mac_stats->rx_undersize;
250e061e 2446 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2447 mac_stats->tx_undersize;
250e061e
ED
2448 stats->rx_dropped = mac_stats->rx_drop;
2449 stats->tx_dropped = mac_stats->tx_drop;
2450 stats->multicast = mac_stats->rx_multicast;
2451 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2452
250e061e 2453 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2454
2455 /* receive ring buffer overflow ?? */
2456
250e061e
ED
2457 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2458 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8 2459 /* recv'r fifo overrun */
078086f3
RM
2460 bmap = bna_rx_rid_mask(&bnad->bna);
2461 for (i = 0; bmap; i++) {
8b230ed8 2462 if (bmap & 1) {
250e061e 2463 stats->rx_fifo_errors +=
8b230ed8 2464 bnad->stats.bna_stats->
078086f3 2465 hw_stats.rxf_stats[i].frame_drops;
8b230ed8
RM
2466 break;
2467 }
2468 bmap >>= 1;
2469 }
2470}
2471
2472static void
2473bnad_mbox_irq_sync(struct bnad *bnad)
2474{
2475 u32 irq;
2476 unsigned long flags;
2477
2478 spin_lock_irqsave(&bnad->bna_lock, flags);
2479 if (bnad->cfg_flags & BNAD_CF_MSIX)
8811e267 2480 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8b230ed8
RM
2481 else
2482 irq = bnad->pcidev->irq;
2483 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2484
2485 synchronize_irq(irq);
2486}
2487
2488/* Utility used by bnad_start_xmit, for doing TSO */
2489static int
2490bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2491{
2492 int err;
2493
b13a8a99 2494 err = skb_cow_head(skb, 0);
2495 if (err < 0) {
2496 BNAD_UPDATE_CTR(bnad, tso_err);
2497 return err;
8b230ed8
RM
2498 }
2499
2500 /*
2501 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2502 * excluding the length field.
2503 */
1c53730a 2504 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
8b230ed8
RM
2505 struct iphdr *iph = ip_hdr(skb);
2506
2507 /* Do we really need these? */
2508 iph->tot_len = 0;
2509 iph->check = 0;
2510
2511 tcp_hdr(skb)->check =
2512 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2513 IPPROTO_TCP, 0);
2514 BNAD_UPDATE_CTR(bnad, tso4);
2515 } else {
2516 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2517
8b230ed8
RM
2518 ipv6h->payload_len = 0;
2519 tcp_hdr(skb)->check =
2520 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2521 IPPROTO_TCP, 0);
2522 BNAD_UPDATE_CTR(bnad, tso6);
2523 }
2524
2525 return 0;
2526}
2527
2528/*
2529 * Initialize Q numbers depending on Rx Paths
2530 * Called with bnad->bna_lock held, because of cfg_flags
2531 * access.
2532 */
2533static void
2534bnad_q_num_init(struct bnad *bnad)
2535{
2536 int rxps;
2537
2538 rxps = min((uint)num_online_cpus(),
772b5235 2539 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
8b230ed8
RM
2540
2541 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2542 rxps = 1; /* INTx */
2543
2544 bnad->num_rx = 1;
2545 bnad->num_tx = 1;
2546 bnad->num_rxp_per_rx = rxps;
2547 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2548}
2549
2550/*
2551 * Adjusts the Q numbers, given a number of msix vectors
2552 * Give preference to RSS as opposed to Tx priority Queues,
2553 * in such a case, just use 1 Tx Q
2554 * Called with bnad->bna_lock held b'cos of cfg_flags access
2555 */
2556static void
078086f3 2557bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
8b230ed8
RM
2558{
2559 bnad->num_txq_per_tx = 1;
2560 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2561 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2562 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2563 bnad->num_rxp_per_rx = msix_vectors -
2564 (bnad->num_tx * bnad->num_txq_per_tx) -
2565 BNAD_MAILBOX_MSIX_VECTORS;
2566 } else
2567 bnad->num_rxp_per_rx = 1;
2568}
2569
078086f3
RM
2570/* Enable / disable ioceth */
2571static int
2572bnad_ioceth_disable(struct bnad *bnad)
8b230ed8
RM
2573{
2574 unsigned long flags;
078086f3 2575 int err = 0;
8b230ed8
RM
2576
2577 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2578 init_completion(&bnad->bnad_completions.ioc_comp);
2579 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
8b230ed8
RM
2580 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2581
078086f3
RM
2582 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2583 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2584
2585 err = bnad->bnad_completions.ioc_comp_status;
2586 return err;
8b230ed8
RM
2587}
2588
2589static int
078086f3 2590bnad_ioceth_enable(struct bnad *bnad)
8b230ed8
RM
2591{
2592 int err = 0;
2593 unsigned long flags;
2594
8b230ed8 2595 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2596 init_completion(&bnad->bnad_completions.ioc_comp);
2597 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2598 bna_ioceth_enable(&bnad->bna.ioceth);
8b230ed8
RM
2599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2600
078086f3
RM
2601 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2602 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
8b230ed8 2603
078086f3 2604 err = bnad->bnad_completions.ioc_comp_status;
8b230ed8
RM
2605
2606 return err;
2607}
2608
2609/* Free BNA resources */
2610static void
078086f3
RM
2611bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2612 u32 res_val_max)
8b230ed8
RM
2613{
2614 int i;
8b230ed8 2615
078086f3
RM
2616 for (i = 0; i < res_val_max; i++)
2617 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2618}
2619
2620/* Allocates memory and interrupt resources for BNA */
2621static int
078086f3
RM
2622bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2623 u32 res_val_max)
8b230ed8
RM
2624{
2625 int i, err;
8b230ed8 2626
078086f3
RM
2627 for (i = 0; i < res_val_max; i++) {
2628 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2629 if (err)
2630 goto err_return;
2631 }
2632 return 0;
2633
2634err_return:
078086f3 2635 bnad_res_free(bnad, res_info, res_val_max);
8b230ed8
RM
2636 return err;
2637}
2638
2639/* Interrupt enable / disable */
2640static void
2641bnad_enable_msix(struct bnad *bnad)
2642{
2643 int i, ret;
8b230ed8
RM
2644 unsigned long flags;
2645
2646 spin_lock_irqsave(&bnad->bna_lock, flags);
2647 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2649 return;
2650 }
2651 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2652
2653 if (bnad->msix_table)
2654 return;
2655
8b230ed8 2656 bnad->msix_table =
b7ee31c5 2657 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2658
2659 if (!bnad->msix_table)
2660 goto intx_mode;
2661
b7ee31c5 2662 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2663 bnad->msix_table[i].entry = i;
2664
43c20200
AG
2665 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2666 1, bnad->msix_num);
2667 if (ret < 0) {
2668 goto intx_mode;
2669 } else if (ret < bnad->msix_num) {
ecc46789
IV
2670 dev_warn(&bnad->pcidev->dev,
2671 "%d MSI-X vectors allocated < %d requested\n",
2672 ret, bnad->msix_num);
8b230ed8
RM
2673
2674 spin_lock_irqsave(&bnad->bna_lock, flags);
2675 /* ret = #of vectors that we got */
271e8b79
RM
2676 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2677 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
8b230ed8
RM
2678 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2679
271e8b79 2680 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
8b230ed8 2681 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8 2682
43c20200
AG
2683 if (bnad->msix_num > ret) {
2684 pci_disable_msix(bnad->pcidev);
8b230ed8 2685 goto intx_mode;
43c20200
AG
2686 }
2687 }
078086f3
RM
2688
2689 pci_intx(bnad->pcidev, 0);
2690
8b230ed8
RM
2691 return;
2692
2693intx_mode:
ecc46789
IV
2694 dev_warn(&bnad->pcidev->dev,
2695 "MSI-X enable failed - operating in INTx mode\n");
8b230ed8
RM
2696
2697 kfree(bnad->msix_table);
2698 bnad->msix_table = NULL;
2699 bnad->msix_num = 0;
8b230ed8
RM
2700 spin_lock_irqsave(&bnad->bna_lock, flags);
2701 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702 bnad_q_num_init(bnad);
2703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2704}
2705
2706static void
2707bnad_disable_msix(struct bnad *bnad)
2708{
2709 u32 cfg_flags;
2710 unsigned long flags;
2711
2712 spin_lock_irqsave(&bnad->bna_lock, flags);
2713 cfg_flags = bnad->cfg_flags;
2714 if (bnad->cfg_flags & BNAD_CF_MSIX)
2715 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2716 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2717
2718 if (cfg_flags & BNAD_CF_MSIX) {
2719 pci_disable_msix(bnad->pcidev);
2720 kfree(bnad->msix_table);
2721 bnad->msix_table = NULL;
2722 }
2723}
2724
2725/* Netdev entry points */
2726static int
2727bnad_open(struct net_device *netdev)
2728{
2729 int err;
2730 struct bnad *bnad = netdev_priv(netdev);
2731 struct bna_pause_config pause_config;
8b230ed8
RM
2732 unsigned long flags;
2733
2734 mutex_lock(&bnad->conf_mutex);
2735
2736 /* Tx */
2737 err = bnad_setup_tx(bnad, 0);
2738 if (err)
2739 goto err_return;
2740
2741 /* Rx */
2742 err = bnad_setup_rx(bnad, 0);
2743 if (err)
2744 goto cleanup_tx;
2745
2746 /* Port */
2747 pause_config.tx_pause = 0;
2748 pause_config.rx_pause = 0;
2749
8b230ed8 2750 spin_lock_irqsave(&bnad->bna_lock, flags);
e29aa339
RM
2751 bna_enet_mtu_set(&bnad->bna.enet,
2752 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
1f9883e0 2753 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
078086f3 2754 bna_enet_enable(&bnad->bna.enet);
8b230ed8
RM
2755 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2756
2757 /* Enable broadcast */
2758 bnad_enable_default_bcast(bnad);
2759
aad75b66
RM
2760 /* Restore VLANs, if any */
2761 bnad_restore_vlans(bnad, 0);
2762
8b230ed8
RM
2763 /* Set the UCAST address */
2764 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2766 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2767
2768 /* Start the stats timer */
2769 bnad_stats_timer_start(bnad);
2770
2771 mutex_unlock(&bnad->conf_mutex);
2772
2773 return 0;
2774
2775cleanup_tx:
b3cc6e88 2776 bnad_destroy_tx(bnad, 0);
8b230ed8
RM
2777
2778err_return:
2779 mutex_unlock(&bnad->conf_mutex);
2780 return err;
2781}
2782
2783static int
2784bnad_stop(struct net_device *netdev)
2785{
2786 struct bnad *bnad = netdev_priv(netdev);
2787 unsigned long flags;
2788
2789 mutex_lock(&bnad->conf_mutex);
2790
2791 /* Stop the stats timer */
2792 bnad_stats_timer_stop(bnad);
2793
078086f3 2794 init_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2795
2796 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2797 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2798 bnad_cb_enet_disabled);
8b230ed8
RM
2799 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2800
078086f3 2801 wait_for_completion(&bnad->bnad_completions.enet_comp);
8b230ed8 2802
b3cc6e88
JH
2803 bnad_destroy_tx(bnad, 0);
2804 bnad_destroy_rx(bnad, 0);
8b230ed8
RM
2805
2806 /* Synchronize mailbox IRQ */
2807 bnad_mbox_irq_sync(bnad);
2808
2809 mutex_unlock(&bnad->conf_mutex);
2810
2811 return 0;
2812}
2813
2814/* TX */
5216562a
RM
2815/* Returns 0 for success */
2816static int
2817bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2818 struct sk_buff *skb, struct bna_txq_entry *txqent)
8b230ed8 2819{
5216562a
RM
2820 u16 flags = 0;
2821 u32 gso_size;
2822 u16 vlan_tag = 0;
8b230ed8 2823
df8a39de
JP
2824 if (skb_vlan_tag_present(skb)) {
2825 vlan_tag = (u16)skb_vlan_tag_get(skb);
8b230ed8
RM
2826 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2827 }
2828 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
5216562a
RM
2829 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2830 | (vlan_tag & 0x1fff);
8b230ed8
RM
2831 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2832 }
8b230ed8
RM
2833 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2834
2835 if (skb_is_gso(skb)) {
271e8b79 2836 gso_size = skb_shinfo(skb)->gso_size;
5216562a 2837 if (unlikely(gso_size > bnad->netdev->mtu)) {
271e8b79 2838 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
5216562a 2839 return -EINVAL;
271e8b79
RM
2840 }
2841 if (unlikely((gso_size + skb_transport_offset(skb) +
5216562a 2842 tcp_hdrlen(skb)) >= skb->len)) {
b779d0af 2843 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
271e8b79
RM
2844 txqent->hdr.wi.lso_mss = 0;
2845 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2846 } else {
b779d0af 2847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
271e8b79
RM
2848 txqent->hdr.wi.lso_mss = htons(gso_size);
2849 }
2850
5216562a 2851 if (bnad_tso_prepare(bnad, skb)) {
271e8b79 2852 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
5216562a 2853 return -EINVAL;
8b230ed8 2854 }
5216562a 2855
8b230ed8
RM
2856 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2857 txqent->hdr.wi.l4_hdr_size_n_offset =
5216562a
RM
2858 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2859 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2860 } else {
b779d0af 2861 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
8b230ed8
RM
2862 txqent->hdr.wi.lso_mss = 0;
2863
6654cf60 2864 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
271e8b79 2865 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
5216562a 2866 return -EINVAL;
8b230ed8 2867 }
8b230ed8 2868
271e8b79 2869 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1c53730a 2870 __be16 net_proto = vlan_get_protocol(skb);
271e8b79 2871 u8 proto = 0;
8b230ed8 2872
1c53730a 2873 if (net_proto == htons(ETH_P_IP))
271e8b79 2874 proto = ip_hdr(skb)->protocol;
5216562a 2875#ifdef NETIF_F_IPV6_CSUM
1c53730a 2876 else if (net_proto == htons(ETH_P_IPV6)) {
271e8b79
RM
2877 /* nexthdr may not be TCP immediately. */
2878 proto = ipv6_hdr(skb)->nexthdr;
2879 }
5216562a 2880#endif
271e8b79
RM
2881 if (proto == IPPROTO_TCP) {
2882 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2883 txqent->hdr.wi.l4_hdr_size_n_offset =
2884 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 (0, skb_transport_offset(skb)));
2886
2887 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2888
2889 if (unlikely(skb_headlen(skb) <
5216562a
RM
2890 skb_transport_offset(skb) +
2891 tcp_hdrlen(skb))) {
271e8b79 2892 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
5216562a 2893 return -EINVAL;
271e8b79 2894 }
271e8b79
RM
2895 } else if (proto == IPPROTO_UDP) {
2896 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2897 txqent->hdr.wi.l4_hdr_size_n_offset =
2898 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2899 (0, skb_transport_offset(skb)));
2900
2901 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2902 if (unlikely(skb_headlen(skb) <
5216562a 2903 skb_transport_offset(skb) +
271e8b79 2904 sizeof(struct udphdr))) {
271e8b79 2905 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
5216562a 2906 return -EINVAL;
271e8b79
RM
2907 }
2908 } else {
5216562a 2909
271e8b79 2910 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
5216562a 2911 return -EINVAL;
8b230ed8 2912 }
5216562a 2913 } else
271e8b79 2914 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
8b230ed8
RM
2915 }
2916
2917 txqent->hdr.wi.flags = htons(flags);
8b230ed8
RM
2918 txqent->hdr.wi.frame_length = htonl(skb->len);
2919
5216562a
RM
2920 return 0;
2921}
2922
2923/*
2924 * bnad_start_xmit : Netdev entry point for Transmit
2925 * Called under lock held by net_device
2926 */
2927static netdev_tx_t
2928bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2929{
2930 struct bnad *bnad = netdev_priv(netdev);
2931 u32 txq_id = 0;
2932 struct bna_tcb *tcb = NULL;
2933 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2934 u32 prod, q_depth, vect_id;
2935 u32 wis, vectors, len;
2936 int i;
2937 dma_addr_t dma_addr;
2938 struct bna_txq_entry *txqent;
2939
271e8b79 2940 len = skb_headlen(skb);
8b230ed8 2941
5216562a
RM
2942 /* Sanity checks for the skb */
2943
2944 if (unlikely(skb->len <= ETH_HLEN)) {
27400df8 2945 dev_kfree_skb_any(skb);
5216562a
RM
2946 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2947 return NETDEV_TX_OK;
2948 }
2949 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
27400df8 2950 dev_kfree_skb_any(skb);
5216562a
RM
2951 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2952 return NETDEV_TX_OK;
2953 }
2954 if (unlikely(len == 0)) {
27400df8 2955 dev_kfree_skb_any(skb);
5216562a
RM
2956 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2957 return NETDEV_TX_OK;
2958 }
2959
2960 tcb = bnad->tx_info[0].tcb[txq_id];
271e8b79 2961
5216562a
RM
2962 /*
2963 * Takes care of the Tx that is scheduled between clearing the flag
2964 * and the netif_tx_stop_all_queues() call.
2965 */
96e31adf 2966 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
27400df8 2967 dev_kfree_skb_any(skb);
5216562a
RM
2968 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2969 return NETDEV_TX_OK;
2970 }
2971
96e31adf
RM
2972 q_depth = tcb->q_depth;
2973 prod = tcb->producer_index;
2974 unmap_q = tcb->unmap_q;
2975
5216562a
RM
2976 vectors = 1 + skb_shinfo(skb)->nr_frags;
2977 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2978
2979 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
27400df8 2980 dev_kfree_skb_any(skb);
5216562a
RM
2981 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2982 return NETDEV_TX_OK;
2983 }
2984
2985 /* Check for available TxQ resources */
2986 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2987 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2988 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2989 u32 sent;
2990 sent = bnad_txcmpl_process(bnad, tcb);
2991 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2992 bna_ib_ack(tcb->i_dbell, sent);
4e857c58 2993 smp_mb__before_atomic();
5216562a
RM
2994 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2995 } else {
2996 netif_stop_queue(netdev);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2998 }
2999
3000 smp_mb();
3001 /*
3002 * Check again to deal with race condition between
3003 * netif_stop_queue here, and netif_wake_queue in
3004 * interrupt handler which is not inside netif tx lock.
3005 */
3006 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3007 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3008 return NETDEV_TX_BUSY;
3009 } else {
3010 netif_wake_queue(netdev);
3011 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3012 }
3013 }
3014
3015 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3016 head_unmap = &unmap_q[prod];
3017
3018 /* Program the opcode, flags, frame_len, num_vectors in WI */
3019 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
27400df8 3020 dev_kfree_skb_any(skb);
5216562a
RM
3021 return NETDEV_TX_OK;
3022 }
3023 txqent->hdr.wi.reserved = 0;
3024 txqent->hdr.wi.num_vectors = vectors;
3025
3026 head_unmap->skb = skb;
3027 head_unmap->nvecs = 0;
3028
3029 /* Program the vectors */
3030 unmap = head_unmap;
3031 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3032 len, DMA_TO_DEVICE);
ba5ca784
IV
3033 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3034 dev_kfree_skb_any(skb);
3035 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3036 return NETDEV_TX_OK;
3037 }
5216562a
RM
3038 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3039 txqent->vector[0].length = htons(len);
3040 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3041 head_unmap->nvecs++;
3042
3043 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
9e903e08 3044 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
24f5d33d 3045 u32 size = skb_frag_size(frag);
8b230ed8 3046
271e8b79 3047 if (unlikely(size == 0)) {
5216562a
RM
3048 /* Undo the changes starting at tcb->producer_index */
3049 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3050 tcb->producer_index);
27400df8 3051 dev_kfree_skb_any(skb);
271e8b79
RM
3052 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3053 return NETDEV_TX_OK;
3054 }
3055
3056 len += size;
3057
5216562a
RM
3058 vect_id++;
3059 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
8b230ed8 3060 vect_id = 0;
5216562a
RM
3061 BNA_QE_INDX_INC(prod, q_depth);
3062 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
b779d0af 3063 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
5216562a 3064 unmap = &unmap_q[prod];
8b230ed8
RM
3065 }
3066
4d5b1a67
IC
3067 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3068 0, size, DMA_TO_DEVICE);
ba5ca784
IV
3069 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3070 /* Undo the changes starting at tcb->producer_index */
3071 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3072 tcb->producer_index);
3073 dev_kfree_skb_any(skb);
3074 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3075 return NETDEV_TX_OK;
3076 }
3077
ecca6a96 3078 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
8b230ed8 3079 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
5216562a
RM
3080 txqent->vector[vect_id].length = htons(size);
3081 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
ecca6a96 3082 dma_addr);
5216562a 3083 head_unmap->nvecs++;
8b230ed8
RM
3084 }
3085
271e8b79 3086 if (unlikely(len != skb->len)) {
5216562a
RM
3087 /* Undo the changes starting at tcb->producer_index */
3088 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
27400df8 3089 dev_kfree_skb_any(skb);
271e8b79
RM
3090 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3091 return NETDEV_TX_OK;
3092 }
3093
5216562a
RM
3094 BNA_QE_INDX_INC(prod, q_depth);
3095 tcb->producer_index = prod;
8b230ed8
RM
3096
3097 smp_mb();
be7fa326
RM
3098
3099 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3100 return NETDEV_TX_OK;
3101
fee1253e
RM
3102 skb_tx_timestamp(skb);
3103
8b230ed8 3104 bna_txq_prod_indx_doorbell(tcb);
271e8b79 3105 smp_mb();
8b230ed8 3106
8b230ed8
RM
3107 return NETDEV_TX_OK;
3108}
3109
3110/*
3111 * Used spin_lock to synchronize reading of stats structures, which
3112 * is written by BNA under the same lock.
3113 */
250e061e
ED
3114static struct rtnl_link_stats64 *
3115bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
3116{
3117 struct bnad *bnad = netdev_priv(netdev);
3118 unsigned long flags;
3119
3120 spin_lock_irqsave(&bnad->bna_lock, flags);
3121
250e061e
ED
3122 bnad_netdev_qstats_fill(bnad, stats);
3123 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
3124
3125 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3126
250e061e 3127 return stats;
8b230ed8
RM
3128}
3129
fe1624cf
RM
3130static void
3131bnad_set_rx_ucast_fltr(struct bnad *bnad)
3132{
3133 struct net_device *netdev = bnad->netdev;
3134 int uc_count = netdev_uc_count(netdev);
3135 enum bna_cb_status ret;
3136 u8 *mac_list;
3137 struct netdev_hw_addr *ha;
3138 int entry;
3139
3140 if (netdev_uc_empty(bnad->netdev)) {
1f9883e0 3141 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
fe1624cf
RM
3142 return;
3143 }
3144
3145 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3146 goto mode_default;
3147
3148 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3149 if (mac_list == NULL)
3150 goto mode_default;
3151
3152 entry = 0;
3153 netdev_for_each_uc_addr(ha, netdev) {
e2f9ecfc 3154 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
fe1624cf
RM
3155 entry++;
3156 }
3157
1f9883e0 3158 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
fe1624cf
RM
3159 kfree(mac_list);
3160
3161 if (ret != BNA_CB_SUCCESS)
3162 goto mode_default;
3163
3164 return;
3165
3166 /* ucast packets not in UCAM are routed to default function */
3167mode_default:
3168 bnad->cfg_flags |= BNAD_CF_DEFAULT;
1f9883e0 3169 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
fe1624cf
RM
3170}
3171
3172static void
3173bnad_set_rx_mcast_fltr(struct bnad *bnad)
3174{
3175 struct net_device *netdev = bnad->netdev;
3176 int mc_count = netdev_mc_count(netdev);
3177 enum bna_cb_status ret;
3178 u8 *mac_list;
3179
3180 if (netdev->flags & IFF_ALLMULTI)
3181 goto mode_allmulti;
3182
3183 if (netdev_mc_empty(netdev))
3184 return;
3185
3186 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3187 goto mode_allmulti;
3188
3189 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3190
3191 if (mac_list == NULL)
3192 goto mode_allmulti;
3193
e2f9ecfc 3194 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
fe1624cf
RM
3195
3196 /* copy rest of the MCAST addresses */
3197 bnad_netdev_mc_list_get(netdev, mac_list);
1f9883e0 3198 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
fe1624cf
RM
3199 kfree(mac_list);
3200
3201 if (ret != BNA_CB_SUCCESS)
3202 goto mode_allmulti;
3203
3204 return;
3205
3206mode_allmulti:
3207 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
1f9883e0 3208 bna_rx_mcast_delall(bnad->rx_info[0].rx);
fe1624cf
RM
3209}
3210
a2122d95 3211void
8b230ed8
RM
3212bnad_set_rx_mode(struct net_device *netdev)
3213{
3214 struct bnad *bnad = netdev_priv(netdev);
fe1624cf 3215 enum bna_rxmode new_mode, mode_mask;
8b230ed8
RM
3216 unsigned long flags;
3217
3218 spin_lock_irqsave(&bnad->bna_lock, flags);
3219
fe1624cf
RM
3220 if (bnad->rx_info[0].rx == NULL) {
3221 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3222 return;
8b230ed8
RM
3223 }
3224
fe1624cf
RM
3225 /* clear bnad flags to update it with new settings */
3226 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3227 BNAD_CF_ALLMULTI);
271e8b79 3228
fe1624cf
RM
3229 new_mode = 0;
3230 if (netdev->flags & IFF_PROMISC) {
3231 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3232 bnad->cfg_flags |= BNAD_CF_PROMISC;
3233 } else {
3234 bnad_set_rx_mcast_fltr(bnad);
8b230ed8 3235
fe1624cf
RM
3236 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3237 new_mode |= BNA_RXMODE_ALLMULTI;
8b230ed8 3238
fe1624cf 3239 bnad_set_rx_ucast_fltr(bnad);
8b230ed8 3240
fe1624cf
RM
3241 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3242 new_mode |= BNA_RXMODE_DEFAULT;
3243 }
8b230ed8 3244
fe1624cf
RM
3245 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3246 BNA_RXMODE_ALLMULTI;
1f9883e0 3247 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
8b230ed8 3248
8b230ed8
RM
3249 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3250}
3251
3252/*
3253 * bna_lock is used to sync writes to netdev->addr
3254 * conf_lock cannot be used since this call may be made
3255 * in a non-blocking context.
3256 */
3257static int
e2f9ecfc 3258bnad_set_mac_address(struct net_device *netdev, void *addr)
8b230ed8
RM
3259{
3260 int err;
3261 struct bnad *bnad = netdev_priv(netdev);
e2f9ecfc 3262 struct sockaddr *sa = (struct sockaddr *)addr;
8b230ed8
RM
3263 unsigned long flags;
3264
3265 spin_lock_irqsave(&bnad->bna_lock, flags);
3266
3267 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
8b230ed8 3268 if (!err)
e2f9ecfc 3269 ether_addr_copy(netdev->dev_addr, sa->sa_data);
8b230ed8
RM
3270
3271 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3272
3273 return err;
3274}
3275
3276static int
e29aa339 3277bnad_mtu_set(struct bnad *bnad, int frame_size)
8b230ed8 3278{
8b230ed8
RM
3279 unsigned long flags;
3280
078086f3
RM
3281 init_completion(&bnad->bnad_completions.mtu_comp);
3282
3283 spin_lock_irqsave(&bnad->bna_lock, flags);
e29aa339 3284 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
078086f3
RM
3285 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3286
3287 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3288
3289 return bnad->bnad_completions.mtu_comp_status;
3290}
3291
3292static int
3293bnad_change_mtu(struct net_device *netdev, int new_mtu)
3294{
e29aa339 3295 int err, mtu;
8b230ed8 3296 struct bnad *bnad = netdev_priv(netdev);
e29aa339 3297 u32 rx_count = 0, frame, new_frame;
8b230ed8
RM
3298
3299 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3300 return -EINVAL;
3301
3302 mutex_lock(&bnad->conf_mutex);
3303
e29aa339 3304 mtu = netdev->mtu;
8b230ed8
RM
3305 netdev->mtu = new_mtu;
3306
e29aa339
RM
3307 frame = BNAD_FRAME_SIZE(mtu);
3308 new_frame = BNAD_FRAME_SIZE(new_mtu);
3309
3310 /* check if multi-buffer needs to be enabled */
3311 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3312 netif_running(bnad->netdev)) {
3313 /* only when transition is over 4K */
3314 if ((frame <= 4096 && new_frame > 4096) ||
3315 (frame > 4096 && new_frame <= 4096))
3316 rx_count = bnad_reinit_rx(bnad);
3317 }
3318
3319 /* rx_count > 0 - new rx created
3320 * - Linux set err = 0 and return
3321 */
3322 err = bnad_mtu_set(bnad, new_frame);
078086f3
RM
3323 if (err)
3324 err = -EBUSY;
8b230ed8
RM
3325
3326 mutex_unlock(&bnad->conf_mutex);
3327 return err;
3328}
3329
8e586137 3330static int
80d5c368 3331bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
8b230ed8
RM
3332{
3333 struct bnad *bnad = netdev_priv(netdev);
3334 unsigned long flags;
3335
3336 if (!bnad->rx_info[0].rx)
8e586137 3337 return 0;
8b230ed8
RM
3338
3339 mutex_lock(&bnad->conf_mutex);
3340
3341 spin_lock_irqsave(&bnad->bna_lock, flags);
3342 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
f859d7cb 3343 set_bit(vid, bnad->active_vlans);
8b230ed8
RM
3344 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3345
3346 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3347
3348 return 0;
8b230ed8
RM
3349}
3350
8e586137 3351static int
80d5c368 3352bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
8b230ed8
RM
3353{
3354 struct bnad *bnad = netdev_priv(netdev);
3355 unsigned long flags;
3356
3357 if (!bnad->rx_info[0].rx)
8e586137 3358 return 0;
8b230ed8
RM
3359
3360 mutex_lock(&bnad->conf_mutex);
3361
3362 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 3363 clear_bit(vid, bnad->active_vlans);
8b230ed8
RM
3364 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3365 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3366
3367 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3368
3369 return 0;
8b230ed8
RM
3370}
3371
877767dc
IV
3372static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3373{
3374 struct bnad *bnad = netdev_priv(dev);
3375 netdev_features_t changed = features ^ dev->features;
3376
3377 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3378 unsigned long flags;
3379
3380 spin_lock_irqsave(&bnad->bna_lock, flags);
3381
3382 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3383 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3384 else
3385 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3386
3387 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3388 }
3389
3390 return 0;
3391}
3392
8b230ed8
RM
3393#ifdef CONFIG_NET_POLL_CONTROLLER
3394static void
3395bnad_netpoll(struct net_device *netdev)
3396{
3397 struct bnad *bnad = netdev_priv(netdev);
3398 struct bnad_rx_info *rx_info;
3399 struct bnad_rx_ctrl *rx_ctrl;
3400 u32 curr_mask;
3401 int i, j;
3402
3403 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3404 bna_intx_disable(&bnad->bna, curr_mask);
3405 bnad_isr(bnad->pcidev->irq, netdev);
3406 bna_intx_enable(&bnad->bna, curr_mask);
3407 } else {
19dbff9f
RM
3408 /*
3409 * Tx processing may happen in sending context, so no need
3410 * to explicitly process completions here
3411 */
3412
3413 /* Rx processing */
8b230ed8
RM
3414 for (i = 0; i < bnad->num_rx; i++) {
3415 rx_info = &bnad->rx_info[i];
3416 if (!rx_info->rx)
3417 continue;
3418 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3419 rx_ctrl = &rx_info->rx_ctrl[j];
271e8b79 3420 if (rx_ctrl->ccb)
8b230ed8
RM
3421 bnad_netif_rx_schedule_poll(bnad,
3422 rx_ctrl->ccb);
8b230ed8
RM
3423 }
3424 }
3425 }
3426}
3427#endif
3428
3429static const struct net_device_ops bnad_netdev_ops = {
3430 .ndo_open = bnad_open,
3431 .ndo_stop = bnad_stop,
3432 .ndo_start_xmit = bnad_start_xmit,
250e061e 3433 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8 3434 .ndo_set_rx_mode = bnad_set_rx_mode,
8b230ed8
RM
3435 .ndo_validate_addr = eth_validate_addr,
3436 .ndo_set_mac_address = bnad_set_mac_address,
3437 .ndo_change_mtu = bnad_change_mtu,
8b230ed8
RM
3438 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3439 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
877767dc 3440 .ndo_set_features = bnad_set_features,
8b230ed8
RM
3441#ifdef CONFIG_NET_POLL_CONTROLLER
3442 .ndo_poll_controller = bnad_netpoll
3443#endif
3444};
3445
3446static void
3447bnad_netdev_init(struct bnad *bnad, bool using_dac)
3448{
3449 struct net_device *netdev = bnad->netdev;
3450
e5ee20e7
MM
3451 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3452 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
877767dc
IV
3453 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3454 NETIF_F_HW_VLAN_CTAG_RX;
8b230ed8 3455
e5ee20e7
MM
3456 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3457 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3458 NETIF_F_TSO | NETIF_F_TSO6;
8b230ed8 3459
877767dc 3460 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
8b230ed8
RM
3461
3462 if (using_dac)
3463 netdev->features |= NETIF_F_HIGHDMA;
3464
8b230ed8
RM
3465 netdev->mem_start = bnad->mmio_start;
3466 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3467
3468 netdev->netdev_ops = &bnad_netdev_ops;
3469 bnad_set_ethtool_ops(netdev);
3470}
3471
3472/*
3473 * 1. Initialize the bnad structure
3474 * 2. Setup netdev pointer in pci_dev
d95d1081
JH
3475 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3476 * 4. Initialize work queue.
8b230ed8
RM
3477 */
3478static int
3479bnad_init(struct bnad *bnad,
3480 struct pci_dev *pdev, struct net_device *netdev)
3481{
3482 unsigned long flags;
3483
3484 SET_NETDEV_DEV(netdev, &pdev->dev);
3485 pci_set_drvdata(pdev, netdev);
3486
3487 bnad->netdev = netdev;
3488 bnad->pcidev = pdev;
3489 bnad->mmio_start = pci_resource_start(pdev, 0);
3490 bnad->mmio_len = pci_resource_len(pdev, 0);
3491 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3492 if (!bnad->bar0) {
3493 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
8b230ed8
RM
3494 return -ENOMEM;
3495 }
ecc46789
IV
3496 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3497 (unsigned long long) bnad->mmio_len);
8b230ed8
RM
3498
3499 spin_lock_irqsave(&bnad->bna_lock, flags);
3500 if (!bnad_msix_disable)
3501 bnad->cfg_flags = BNAD_CF_MSIX;
3502
3503 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3504
3505 bnad_q_num_init(bnad);
3506 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3507
3508 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3509 (bnad->num_rx * bnad->num_rxp_per_rx) +
3510 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
3511
3512 bnad->txq_depth = BNAD_TXQ_DEPTH;
3513 bnad->rxq_depth = BNAD_RXQ_DEPTH;
8b230ed8
RM
3514
3515 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3516 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3517
01b54b14
JH
3518 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3519 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
ba21fc69
WY
3520 if (!bnad->work_q) {
3521 iounmap(bnad->bar0);
01b54b14 3522 return -ENOMEM;
ba21fc69 3523 }
01b54b14 3524
8b230ed8
RM
3525 return 0;
3526}
3527
3528/*
3529 * Must be called after bnad_pci_uninit()
3530 * so that iounmap() and pci_set_drvdata(NULL)
3531 * happens only after PCI uninitialization.
3532 */
3533static void
3534bnad_uninit(struct bnad *bnad)
3535{
01b54b14
JH
3536 if (bnad->work_q) {
3537 flush_workqueue(bnad->work_q);
3538 destroy_workqueue(bnad->work_q);
3539 bnad->work_q = NULL;
3540 }
3541
8b230ed8
RM
3542 if (bnad->bar0)
3543 iounmap(bnad->bar0);
8b230ed8
RM
3544}
3545
3546/*
3547 * Initialize locks
078086f3 3548 a) Per ioceth mutes used for serializing configuration
8b230ed8
RM
3549 changes from OS interface
3550 b) spin lock used to protect bna state machine
3551 */
3552static void
3553bnad_lock_init(struct bnad *bnad)
3554{
3555 spin_lock_init(&bnad->bna_lock);
3556 mutex_init(&bnad->conf_mutex);
3557}
3558
3559static void
3560bnad_lock_uninit(struct bnad *bnad)
3561{
3562 mutex_destroy(&bnad->conf_mutex);
3563}
3564
3565/* PCI Initialization */
3566static int
3567bnad_pci_init(struct bnad *bnad,
3568 struct pci_dev *pdev, bool *using_dac)
3569{
3570 int err;
3571
3572 err = pci_enable_device(pdev);
3573 if (err)
3574 return err;
3575 err = pci_request_regions(pdev, BNAD_NAME);
3576 if (err)
3577 goto disable_device;
3e548079 3578 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3db1cd5c 3579 *using_dac = true;
8b230ed8 3580 } else {
3e548079
RK
3581 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3582 if (err)
3583 goto release_regions;
3db1cd5c 3584 *using_dac = false;
8b230ed8
RM
3585 }
3586 pci_set_master(pdev);
3587 return 0;
3588
3589release_regions:
3590 pci_release_regions(pdev);
3591disable_device:
3592 pci_disable_device(pdev);
3593
3594 return err;
3595}
3596
3597static void
3598bnad_pci_uninit(struct pci_dev *pdev)
3599{
3600 pci_release_regions(pdev);
3601 pci_disable_device(pdev);
3602}
3603
c4eef189 3604static int
8b230ed8
RM
3605bnad_pci_probe(struct pci_dev *pdev,
3606 const struct pci_device_id *pcidev_id)
3607{
3caa1e95 3608 bool using_dac;
0120b99c 3609 int err;
8b230ed8
RM
3610 struct bnad *bnad;
3611 struct bna *bna;
3612 struct net_device *netdev;
3613 struct bfa_pcidev pcidev_info;
3614 unsigned long flags;
3615
8b230ed8
RM
3616 mutex_lock(&bnad_fwimg_mutex);
3617 if (!cna_get_firmware_buf(pdev)) {
3618 mutex_unlock(&bnad_fwimg_mutex);
ecc46789 3619 dev_err(&pdev->dev, "failed to load firmware image!\n");
8b230ed8
RM
3620 return -ENODEV;
3621 }
3622 mutex_unlock(&bnad_fwimg_mutex);
3623
3624 /*
3625 * Allocates sizeof(struct net_device + struct bnad)
3626 * bnad = netdev->priv
3627 */
3628 netdev = alloc_etherdev(sizeof(struct bnad));
3629 if (!netdev) {
8b230ed8
RM
3630 err = -ENOMEM;
3631 return err;
3632 }
3633 bnad = netdev_priv(netdev);
078086f3 3634 bnad_lock_init(bnad);
285eb9c3 3635 bnad->id = atomic_inc_return(&bna_id) - 1;
078086f3
RM
3636
3637 mutex_lock(&bnad->conf_mutex);
8b230ed8
RM
3638 /*
3639 * PCI initialization
0120b99c 3640 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3641 * = 0 for 32 bit DMA
8b230ed8 3642 */
e905ed57 3643 using_dac = false;
8b230ed8
RM
3644 err = bnad_pci_init(bnad, pdev, &using_dac);
3645 if (err)
44861f44 3646 goto unlock_mutex;
8b230ed8 3647
8b230ed8
RM
3648 /*
3649 * Initialize bnad structure
3650 * Setup relation between pci_dev & netdev
8b230ed8
RM
3651 */
3652 err = bnad_init(bnad, pdev, netdev);
3653 if (err)
3654 goto pci_uninit;
078086f3 3655
8b230ed8
RM
3656 /* Initialize netdev structure, set up ethtool ops */
3657 bnad_netdev_init(bnad, using_dac);
3658
815f41e7
RM
3659 /* Set link to down state */
3660 netif_carrier_off(netdev);
3661
7afc5dbd
KG
3662 /* Setup the debugfs node for this bfad */
3663 if (bna_debugfs_enable)
3664 bnad_debugfs_init(bnad);
3665
8b230ed8 3666 /* Get resource requirement form bna */
078086f3 3667 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 3668 bna_res_req(&bnad->res_info[0]);
078086f3 3669 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3670
3671 /* Allocate resources from bna */
078086f3 3672 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
8b230ed8 3673 if (err)
078086f3 3674 goto drv_uninit;
8b230ed8
RM
3675
3676 bna = &bnad->bna;
3677
3678 /* Setup pcidev_info for bna_init() */
3679 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3680 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3681 pcidev_info.device_id = bnad->pcidev->device;
3682 pcidev_info.pci_bar_kva = bnad->bar0;
3683
8b230ed8
RM
3684 spin_lock_irqsave(&bnad->bna_lock, flags);
3685 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3686 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3687
3688 bnad->stats.bna_stats = &bna->stats;
3689
078086f3
RM
3690 bnad_enable_msix(bnad);
3691 err = bnad_mbox_irq_alloc(bnad);
3692 if (err)
3693 goto res_free;
3694
8b230ed8 3695 /* Set up timers */
078086f3 3696 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
ebb56d37 3697 (unsigned long)bnad);
078086f3 3698 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
ebb56d37 3699 (unsigned long)bnad);
078086f3 3700 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
ebb56d37 3701 (unsigned long)bnad);
078086f3 3702 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
ebb56d37 3703 (unsigned long)bnad);
8b230ed8 3704
8b230ed8
RM
3705 /*
3706 * Start the chip
078086f3
RM
3707 * If the call back comes with error, we bail out.
3708 * This is a catastrophic error.
8b230ed8 3709 */
078086f3
RM
3710 err = bnad_ioceth_enable(bnad);
3711 if (err) {
ecc46789 3712 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
078086f3
RM
3713 goto probe_success;
3714 }
3715
3716 spin_lock_irqsave(&bnad->bna_lock, flags);
3717 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3718 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3719 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3720 bna_attr(bna)->num_rxp - 1);
3721 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3722 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3723 err = -EIO;
3724 }
3caa1e95
RM
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3726 if (err)
3727 goto disable_ioceth;
3728
3729 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
3730 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3731 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3732
3733 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
0caa9aae
RM
3734 if (err) {
3735 err = -EIO;
078086f3 3736 goto disable_ioceth;
0caa9aae 3737 }
078086f3
RM
3738
3739 spin_lock_irqsave(&bnad->bna_lock, flags);
3740 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3741 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3742
3743 /* Get the burnt-in mac */
3744 spin_lock_irqsave(&bnad->bna_lock, flags);
d6b30598 3745 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
8b230ed8
RM
3746 bnad_set_netdev_perm_addr(bnad);
3747 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3748
0caa9aae
RM
3749 mutex_unlock(&bnad->conf_mutex);
3750
8b230ed8
RM
3751 /* Finally, reguister with net_device layer */
3752 err = register_netdev(netdev);
3753 if (err) {
ecc46789 3754 dev_err(&pdev->dev, "registering net device failed\n");
078086f3 3755 goto probe_uninit;
8b230ed8 3756 }
078086f3 3757 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
8b230ed8 3758
0caa9aae
RM
3759 return 0;
3760
078086f3
RM
3761probe_success:
3762 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3763 return 0;
3764
078086f3 3765probe_uninit:
3fc72370 3766 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3767 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3768disable_ioceth:
3769 bnad_ioceth_disable(bnad);
3770 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3771 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3772 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3773 spin_lock_irqsave(&bnad->bna_lock, flags);
3774 bna_uninit(bna);
3775 spin_unlock_irqrestore(&bnad->bna_lock, flags);
078086f3 3776 bnad_mbox_irq_free(bnad);
8b230ed8 3777 bnad_disable_msix(bnad);
078086f3
RM
3778res_free:
3779 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3780drv_uninit:
7afc5dbd
KG
3781 /* Remove the debugfs node for this bnad */
3782 kfree(bnad->regdata);
3783 bnad_debugfs_uninit(bnad);
078086f3 3784 bnad_uninit(bnad);
8b230ed8
RM
3785pci_uninit:
3786 bnad_pci_uninit(pdev);
44861f44 3787unlock_mutex:
078086f3 3788 mutex_unlock(&bnad->conf_mutex);
8b230ed8 3789 bnad_lock_uninit(bnad);
8b230ed8
RM
3790 free_netdev(netdev);
3791 return err;
3792}
3793
c4eef189 3794static void
8b230ed8
RM
3795bnad_pci_remove(struct pci_dev *pdev)
3796{
3797 struct net_device *netdev = pci_get_drvdata(pdev);
3798 struct bnad *bnad;
3799 struct bna *bna;
3800 unsigned long flags;
3801
3802 if (!netdev)
3803 return;
3804
8b230ed8
RM
3805 bnad = netdev_priv(netdev);
3806 bna = &bnad->bna;
3807
078086f3
RM
3808 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3809 unregister_netdev(netdev);
8b230ed8
RM
3810
3811 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3812 bnad_ioceth_disable(bnad);
3813 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3814 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3815 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3816 spin_lock_irqsave(&bnad->bna_lock, flags);
3817 bna_uninit(bna);
3818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 3819
078086f3
RM
3820 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3821 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3822 bnad_mbox_irq_free(bnad);
8b230ed8
RM
3823 bnad_disable_msix(bnad);
3824 bnad_pci_uninit(pdev);
078086f3 3825 mutex_unlock(&bnad->conf_mutex);
8b230ed8 3826 bnad_lock_uninit(bnad);
7afc5dbd
KG
3827 /* Remove the debugfs node for this bnad */
3828 kfree(bnad->regdata);
3829 bnad_debugfs_uninit(bnad);
8b230ed8
RM
3830 bnad_uninit(bnad);
3831 free_netdev(netdev);
3832}
3833
9baa3c34 3834static const struct pci_device_id bnad_pci_id_table[] = {
8b230ed8
RM
3835 {
3836 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3837 PCI_DEVICE_ID_BROCADE_CT),
3838 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3839 .class_mask = 0xffff00
586b2816
RM
3840 },
3841 {
3842 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3843 BFA_PCI_DEVICE_ID_CT2),
3844 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3845 .class_mask = 0xffff00
3846 },
3847 {0, },
8b230ed8
RM
3848};
3849
3850MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3851
3852static struct pci_driver bnad_pci_driver = {
3853 .name = BNAD_NAME,
3854 .id_table = bnad_pci_id_table,
3855 .probe = bnad_pci_probe,
c4eef189 3856 .remove = bnad_pci_remove,
8b230ed8
RM
3857};
3858
3859static int __init
3860bnad_module_init(void)
3861{
3862 int err;
3863
ecc46789
IV
3864 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3865 BNAD_VERSION);
8b230ed8 3866
8a891429 3867 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3868
3869 err = pci_register_driver(&bnad_pci_driver);
3870 if (err < 0) {
ecc46789 3871 pr_err("bna: PCI driver registration failed err=%d\n", err);
8b230ed8
RM
3872 return err;
3873 }
3874
3875 return 0;
3876}
3877
3878static void __exit
3879bnad_module_exit(void)
3880{
3881 pci_unregister_driver(&bnad_pci_driver);
294ca868 3882 release_firmware(bfi_fw);
8b230ed8
RM
3883}
3884
3885module_init(bnad_module_init);
3886module_exit(bnad_module_exit);
3887
3888MODULE_AUTHOR("Brocade");
3889MODULE_LICENSE("GPL");
2732ba56 3890MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
8b230ed8
RM
3891MODULE_VERSION(BNAD_VERSION);
3892MODULE_FIRMWARE(CNA_FW_FILE_CT);
1bf9fd70 3893MODULE_FIRMWARE(CNA_FW_FILE_CT2);
This page took 0.738139 seconds and 5 git commands to generate.