2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
31 DEFINE_MUTEX(bnad_fwimg_mutex
);
36 static uint bnad_msix_disable
;
37 module_param(bnad_msix_disable
, uint
, 0444);
38 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
40 static uint bnad_ioc_auto_recover
= 1;
41 module_param(bnad_ioc_auto_recover
, uint
, 0444);
42 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
47 u32 bnad_rxqs_per_cq
= 2;
49 const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
54 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58 #define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
63 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
74 * Reinitialize completions in CQ, once Rx is taken down
77 bnad_cq_cmpl_init(struct bnad
*bnad
, struct bna_ccb
*ccb
)
79 struct bna_cq_entry
*cmpl
, *next_cmpl
;
80 unsigned int wi_range
, wis
= 0, ccb_prod
= 0;
83 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
, cmpl
,
86 for (i
= 0; i
< ccb
->q_depth
; i
++) {
88 if (likely(--wi_range
))
91 BNA_QE_INDX_ADD(ccb_prod
, wis
, ccb
->q_depth
);
93 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
,
102 * Frees all pending Tx Bufs
103 * At this point no activity is expected on the Q,
104 * so DMA unmap & freeing is fine.
107 bnad_free_all_txbufs(struct bnad
*bnad
,
111 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
112 struct bnad_skb_unmap
*unmap_array
;
113 struct sk_buff
*skb
= NULL
;
116 unmap_array
= unmap_q
->unmap_array
;
119 while (unmap_cons
< unmap_q
->q_depth
) {
120 skb
= unmap_array
[unmap_cons
].skb
;
125 unmap_array
[unmap_cons
].skb
= NULL
;
127 pci_unmap_single(bnad
->pcidev
,
128 pci_unmap_addr(&unmap_array
[unmap_cons
],
129 dma_addr
), skb_headlen(skb
),
132 pci_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
, 0);
134 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
135 pci_unmap_page(bnad
->pcidev
,
136 pci_unmap_addr(&unmap_array
[unmap_cons
],
138 skb_shinfo(skb
)->frags
[i
].size
,
140 pci_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
,
144 dev_kfree_skb_any(skb
);
148 /* Data Path Handlers */
151 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
152 * Can be called in a) Interrupt context
157 bnad_free_txbufs(struct bnad
*bnad
,
160 u32 sent_packets
= 0, sent_bytes
= 0;
161 u16 wis
, unmap_cons
, updated_hw_cons
;
162 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
163 struct bnad_skb_unmap
*unmap_array
;
168 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been
174 if (!test_bit(BNAD_RF_TX_STARTED
, &bnad
->run_flags
))
177 updated_hw_cons
= *(tcb
->hw_consumer_index
);
179 wis
= BNA_Q_INDEX_CHANGE(tcb
->consumer_index
,
180 updated_hw_cons
, tcb
->q_depth
);
182 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
184 unmap_array
= unmap_q
->unmap_array
;
185 unmap_cons
= unmap_q
->consumer_index
;
187 prefetch(&unmap_array
[unmap_cons
+ 1]);
189 skb
= unmap_array
[unmap_cons
].skb
;
191 unmap_array
[unmap_cons
].skb
= NULL
;
194 sent_bytes
+= skb
->len
;
195 wis
-= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb
)->nr_frags
);
197 pci_unmap_single(bnad
->pcidev
,
198 pci_unmap_addr(&unmap_array
[unmap_cons
],
199 dma_addr
), skb_headlen(skb
),
201 pci_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
, 0);
202 BNA_QE_INDX_ADD(unmap_cons
, 1, unmap_q
->q_depth
);
204 prefetch(&unmap_array
[unmap_cons
+ 1]);
205 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
206 prefetch(&unmap_array
[unmap_cons
+ 1]);
208 pci_unmap_page(bnad
->pcidev
,
209 pci_unmap_addr(&unmap_array
[unmap_cons
],
211 skb_shinfo(skb
)->frags
[i
].size
,
213 pci_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
,
215 BNA_QE_INDX_ADD(unmap_cons
, 1, unmap_q
->q_depth
);
217 dev_kfree_skb_any(skb
);
220 /* Update consumer pointers. */
221 tcb
->consumer_index
= updated_hw_cons
;
222 unmap_q
->consumer_index
= unmap_cons
;
224 tcb
->txq
->tx_packets
+= sent_packets
;
225 tcb
->txq
->tx_bytes
+= sent_bytes
;
230 /* Tx Free Tasklet function */
231 /* Frees for all the tcb's in all the Tx's */
233 * Scheduled from sending context, so that
234 * the fat Tx lock is not held for too long
235 * in the sending context.
238 bnad_tx_free_tasklet(unsigned long bnad_ptr
)
240 struct bnad
*bnad
= (struct bnad
*)bnad_ptr
;
245 for (i
= 0; i
< bnad
->num_tx
; i
++) {
246 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
247 tcb
= bnad
->tx_info
[i
].tcb
[j
];
250 if (((u16
) (*tcb
->hw_consumer_index
) !=
251 tcb
->consumer_index
) &&
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT
,
254 acked
= bnad_free_txbufs(bnad
, tcb
);
255 bna_ib_ack(tcb
->i_dbell
, acked
);
256 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
264 bnad_tx(struct bnad
*bnad
, struct bna_tcb
*tcb
)
266 struct net_device
*netdev
= bnad
->netdev
;
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
272 sent
= bnad_free_txbufs(bnad
, tcb
);
274 if (netif_queue_stopped(netdev
) &&
275 netif_carrier_ok(netdev
) &&
276 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
277 BNAD_NETIF_WAKE_THRESHOLD
) {
278 netif_wake_queue(netdev
);
279 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
281 bna_ib_ack(tcb
->i_dbell
, sent
);
283 bna_ib_ack(tcb
->i_dbell
, 0);
285 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
291 /* MSIX Tx Completion Handler */
293 bnad_msix_tx(int irq
, void *data
)
295 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
296 struct bnad
*bnad
= tcb
->bnad
;
304 bnad_reset_rcb(struct bnad
*bnad
, struct bna_rcb
*rcb
)
306 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
308 rcb
->producer_index
= 0;
309 rcb
->consumer_index
= 0;
311 unmap_q
->producer_index
= 0;
312 unmap_q
->consumer_index
= 0;
316 bnad_free_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
318 struct bnad_unmap_q
*unmap_q
;
321 unmap_q
= rcb
->unmap_q
;
322 while (BNA_QE_IN_USE_CNT(unmap_q
, unmap_q
->q_depth
)) {
323 skb
= unmap_q
->unmap_array
[unmap_q
->consumer_index
].skb
;
325 unmap_q
->unmap_array
[unmap_q
->consumer_index
].skb
= NULL
;
326 pci_unmap_single(bnad
->pcidev
, pci_unmap_addr(&unmap_q
->
327 unmap_array
[unmap_q
->consumer_index
],
328 dma_addr
), rcb
->rxq
->buffer_size
+
329 NET_IP_ALIGN
, PCI_DMA_FROMDEVICE
);
331 BNA_QE_INDX_ADD(unmap_q
->consumer_index
, 1, unmap_q
->q_depth
);
332 BNA_QE_INDX_ADD(rcb
->consumer_index
, 1, rcb
->q_depth
);
335 bnad_reset_rcb(bnad
, rcb
);
339 bnad_alloc_n_post_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
341 u16 to_alloc
, alloced
, unmap_prod
, wi_range
;
342 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
343 struct bnad_skb_unmap
*unmap_array
;
344 struct bna_rxq_entry
*rxent
;
350 BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
);
352 unmap_array
= unmap_q
->unmap_array
;
353 unmap_prod
= unmap_q
->producer_index
;
355 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
, wi_range
);
359 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
,
362 skb
= alloc_skb(rcb
->rxq
->buffer_size
+ NET_IP_ALIGN
,
364 if (unlikely(!skb
)) {
365 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
368 skb
->dev
= bnad
->netdev
;
369 skb_reserve(skb
, NET_IP_ALIGN
);
370 unmap_array
[unmap_prod
].skb
= skb
;
371 dma_addr
= pci_map_single(bnad
->pcidev
, skb
->data
,
372 rcb
->rxq
->buffer_size
, PCI_DMA_FROMDEVICE
);
373 pci_unmap_addr_set(&unmap_array
[unmap_prod
], dma_addr
,
375 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
376 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
384 if (likely(alloced
)) {
385 unmap_q
->producer_index
= unmap_prod
;
386 rcb
->producer_index
= unmap_prod
;
388 bna_rxq_prod_indx_doorbell(rcb
);
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
399 bnad_enable_txrx_irqs(struct bnad
*bnad
)
406 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
407 for (i
= 0; i
< bnad
->num_tx
; i
++) {
408 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
409 tcb
= bnad
->tx_info
[i
].tcb
[j
];
410 bna_ib_coalescing_timer_set(tcb
->i_dbell
,
411 tcb
->txq
->ib
->ib_config
.coalescing_timeo
);
412 bna_ib_ack(tcb
->i_dbell
, 0);
416 for (i
= 0; i
< bnad
->num_rx
; i
++) {
417 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
418 ccb
= bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
;
419 bnad_enable_rx_irq_unsafe(ccb
);
422 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
426 bnad_refill_rxq(struct bnad
*bnad
, struct bna_rcb
*rcb
)
428 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
430 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
431 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
432 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
433 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
434 smp_mb__before_clear_bit();
435 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
440 bnad_poll_cq(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
442 struct bna_cq_entry
*cmpl
, *next_cmpl
;
443 struct bna_rcb
*rcb
= NULL
;
444 unsigned int wi_range
, packets
= 0, wis
= 0;
445 struct bnad_unmap_q
*unmap_q
;
448 u32 qid0
= ccb
->rcb
[0]->rxq
->rxq_id
;
449 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
451 prefetch(bnad
->netdev
);
452 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
, cmpl
,
454 BUG_ON(!(wi_range
<= ccb
->q_depth
));
455 while (cmpl
->valid
&& packets
< budget
) {
457 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
459 if (qid0
== cmpl
->rxq_id
)
464 unmap_q
= rcb
->unmap_q
;
466 skb
= unmap_q
->unmap_array
[unmap_q
->consumer_index
].skb
;
468 unmap_q
->unmap_array
[unmap_q
->consumer_index
].skb
= NULL
;
469 pci_unmap_single(bnad
->pcidev
,
470 pci_unmap_addr(&unmap_q
->
471 unmap_array
[unmap_q
->
474 rcb
->rxq
->buffer_size
,
476 BNA_QE_INDX_ADD(unmap_q
->consumer_index
, 1, unmap_q
->q_depth
);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb
->consumer_index
, 1, rcb
->q_depth
);
482 if (likely(--wi_range
))
483 next_cmpl
= cmpl
+ 1;
485 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
487 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
,
488 next_cmpl
, wi_range
);
489 BUG_ON(!(wi_range
<= ccb
->q_depth
));
493 flags
= ntohl(cmpl
->flags
);
496 (BNA_CQ_EF_MAC_ERROR
| BNA_CQ_EF_FCS_ERROR
|
497 BNA_CQ_EF_TOO_LONG
))) {
498 dev_kfree_skb_any(skb
);
499 rcb
->rxq
->rx_packets_with_error
++;
503 skb_put(skb
, ntohs(cmpl
->length
));
506 (((flags
& BNA_CQ_EF_IPV4
) &&
507 (flags
& BNA_CQ_EF_L3_CKSUM_OK
)) ||
508 (flags
& BNA_CQ_EF_IPV6
)) &&
509 (flags
& (BNA_CQ_EF_TCP
| BNA_CQ_EF_UDP
)) &&
510 (flags
& BNA_CQ_EF_L4_CKSUM_OK
)))
511 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
513 skb
->ip_summed
= CHECKSUM_NONE
;
515 rcb
->rxq
->rx_packets
++;
516 rcb
->rxq
->rx_bytes
+= skb
->len
;
517 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
519 if (bnad
->vlan_grp
&& (flags
& BNA_CQ_EF_VLAN
)) {
520 struct bnad_rx_ctrl
*rx_ctrl
=
521 (struct bnad_rx_ctrl
*)ccb
->ctrl
;
522 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
523 vlan_gro_receive(&rx_ctrl
->napi
, bnad
->vlan_grp
,
524 ntohs(cmpl
->vlan_tag
), skb
);
526 vlan_hwaccel_receive_skb(skb
,
528 ntohs(cmpl
->vlan_tag
));
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl
*rx_ctrl
=
532 (struct bnad_rx_ctrl
*)ccb
->ctrl
;
533 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
534 napi_gro_receive(&rx_ctrl
->napi
, skb
);
536 netif_receive_skb(skb
);
544 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
547 bna_ib_ack(ccb
->i_dbell
, packets
);
548 bnad_refill_rxq(bnad
, ccb
->rcb
[0]);
550 bnad_refill_rxq(bnad
, ccb
->rcb
[1]);
552 bna_ib_ack(ccb
->i_dbell
, 0);
558 bnad_disable_rx_irq(struct bnad
*bnad
, struct bna_ccb
*ccb
)
560 bna_ib_coalescing_timer_set(ccb
->i_dbell
, 0);
561 bna_ib_ack(ccb
->i_dbell
, 0);
565 bnad_enable_rx_irq(struct bnad
*bnad
, struct bna_ccb
*ccb
)
567 spin_lock_irq(&bnad
->bna_lock
); /* Because of polling context */
568 bnad_enable_rx_irq_unsafe(ccb
);
569 spin_unlock_irq(&bnad
->bna_lock
);
573 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
575 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
576 if (likely(napi_schedule_prep((&rx_ctrl
->napi
)))) {
577 bnad_disable_rx_irq(bnad
, ccb
);
578 __napi_schedule((&rx_ctrl
->napi
));
580 BNAD_UPDATE_CTR(bnad
, netif_rx_schedule
);
583 /* MSIX Rx Path Handler */
585 bnad_msix_rx(int irq
, void *data
)
587 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
588 struct bnad
*bnad
= ccb
->bnad
;
590 bnad_netif_rx_schedule_poll(bnad
, ccb
);
595 /* Interrupt handlers */
597 /* Mbox Interrupt Handlers */
599 bnad_msix_mbox_handler(int irq
, void *data
)
603 struct net_device
*netdev
= data
;
606 bnad
= netdev_priv(netdev
);
608 /* BNA_ISR_GET(bnad); Inc Ref count */
609 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
611 bna_intr_status_get(&bnad
->bna
, intr_status
);
613 if (BNA_IS_MBOX_ERR_INTR(intr_status
))
614 bna_mbox_handler(&bnad
->bna
, intr_status
);
616 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
618 /* BNAD_ISR_PUT(bnad); Dec Ref count */
623 bnad_isr(int irq
, void *data
)
628 struct net_device
*netdev
= data
;
629 struct bnad
*bnad
= netdev_priv(netdev
);
630 struct bnad_rx_info
*rx_info
;
631 struct bnad_rx_ctrl
*rx_ctrl
;
633 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
635 bna_intr_status_get(&bnad
->bna
, intr_status
);
637 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
641 if (BNA_IS_MBOX_ERR_INTR(intr_status
)) {
642 bna_mbox_handler(&bnad
->bna
, intr_status
);
643 if (!BNA_IS_INTX_DATA_INTR(intr_status
)) {
644 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
648 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
650 /* Process data interrupts */
651 for (i
= 0; i
< bnad
->num_rx
; i
++) {
652 rx_info
= &bnad
->rx_info
[i
];
655 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
656 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
658 bnad_netif_rx_schedule_poll(bnad
,
667 * Called in interrupt / callback context
668 * with bna_lock held, so cfg_flags access is OK
671 bnad_enable_mbox_irq(struct bnad
*bnad
)
673 int irq
= BNAD_GET_MBOX_IRQ(bnad
);
675 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
678 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))
680 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
684 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access.
688 bnad_disable_mbox_irq(struct bnad
*bnad
)
690 int irq
= BNAD_GET_MBOX_IRQ(bnad
);
692 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
695 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))
696 disable_irq_nosync(irq
);
697 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
700 /* Control Path Handlers */
704 bnad_cb_device_enable_mbox_intr(struct bnad
*bnad
)
706 bnad_enable_mbox_irq(bnad
);
710 bnad_cb_device_disable_mbox_intr(struct bnad
*bnad
)
712 bnad_disable_mbox_irq(bnad
);
716 bnad_cb_device_enabled(struct bnad
*bnad
, enum bna_cb_status status
)
718 complete(&bnad
->bnad_completions
.ioc_comp
);
719 bnad
->bnad_completions
.ioc_comp_status
= status
;
723 bnad_cb_device_disabled(struct bnad
*bnad
, enum bna_cb_status status
)
725 complete(&bnad
->bnad_completions
.ioc_comp
);
726 bnad
->bnad_completions
.ioc_comp_status
= status
;
730 bnad_cb_port_disabled(void *arg
, enum bna_cb_status status
)
732 struct bnad
*bnad
= (struct bnad
*)arg
;
734 complete(&bnad
->bnad_completions
.port_comp
);
736 netif_carrier_off(bnad
->netdev
);
740 bnad_cb_port_link_status(struct bnad
*bnad
,
741 enum bna_link_status link_status
)
745 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
747 if (link_status
== BNA_CEE_UP
) {
748 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
749 BNAD_UPDATE_CTR(bnad
, cee_up
);
751 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
754 if (!netif_carrier_ok(bnad
->netdev
)) {
755 pr_warn("bna: %s link up\n",
757 netif_carrier_on(bnad
->netdev
);
758 BNAD_UPDATE_CTR(bnad
, link_toggle
);
759 if (test_bit(BNAD_RF_TX_STARTED
, &bnad
->run_flags
)) {
760 /* Force an immediate Transmit Schedule */
761 pr_info("bna: %s TX_STARTED\n",
763 netif_wake_queue(bnad
->netdev
);
764 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
766 netif_stop_queue(bnad
->netdev
);
767 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
771 if (netif_carrier_ok(bnad
->netdev
)) {
772 pr_warn("bna: %s link down\n",
774 netif_carrier_off(bnad
->netdev
);
775 BNAD_UPDATE_CTR(bnad
, link_toggle
);
781 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
,
782 enum bna_cb_status status
)
784 struct bnad
*bnad
= (struct bnad
*)arg
;
786 complete(&bnad
->bnad_completions
.tx_comp
);
790 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
792 struct bnad_tx_info
*tx_info
=
793 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
794 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
796 tx_info
->tcb
[tcb
->id
] = tcb
;
797 unmap_q
->producer_index
= 0;
798 unmap_q
->consumer_index
= 0;
799 unmap_q
->q_depth
= BNAD_TX_UNMAPQ_DEPTH
;
803 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
805 struct bnad_tx_info
*tx_info
=
806 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
808 tx_info
->tcb
[tcb
->id
] = NULL
;
812 bnad_cb_rcb_setup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
814 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
816 unmap_q
->producer_index
= 0;
817 unmap_q
->consumer_index
= 0;
818 unmap_q
->q_depth
= BNAD_RX_UNMAPQ_DEPTH
;
822 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
824 struct bnad_rx_info
*rx_info
=
825 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
827 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
828 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
832 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
834 struct bnad_rx_info
*rx_info
=
835 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
837 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
841 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tcb
*tcb
)
843 struct bnad_tx_info
*tx_info
=
844 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
846 if (tx_info
!= &bnad
->tx_info
[0])
849 clear_bit(BNAD_RF_TX_STARTED
, &bnad
->run_flags
);
850 netif_stop_queue(bnad
->netdev
);
851 pr_info("bna: %s TX_STOPPED\n", bnad
->netdev
->name
);
855 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tcb
*tcb
)
857 if (test_and_set_bit(BNAD_RF_TX_STARTED
, &bnad
->run_flags
))
860 if (netif_carrier_ok(bnad
->netdev
)) {
861 pr_info("bna: %s TX_STARTED\n", bnad
->netdev
->name
);
862 netif_wake_queue(bnad
->netdev
);
863 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
868 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
870 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
872 if (!tcb
|| (!tcb
->unmap_q
))
875 if (!unmap_q
->unmap_array
)
878 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
881 bnad_free_all_txbufs(bnad
, tcb
);
883 unmap_q
->producer_index
= 0;
884 unmap_q
->consumer_index
= 0;
886 smp_mb__before_clear_bit();
887 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
891 bnad_cb_rx_cleanup(struct bnad
*bnad
,
894 bnad_cq_cmpl_init(bnad
, ccb
);
896 bnad_free_rxbufs(bnad
, ccb
->rcb
[0]);
897 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
900 bnad_free_rxbufs(bnad
, ccb
->rcb
[1]);
901 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
906 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rcb
*rcb
)
908 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
910 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
912 /* Now allocate & post buffers for this RCB */
913 /* !!Allocation in callback context */
914 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
915 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
916 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
917 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
918 smp_mb__before_clear_bit();
919 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
924 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
,
925 enum bna_cb_status status
)
927 struct bnad
*bnad
= (struct bnad
*)arg
;
929 complete(&bnad
->bnad_completions
.rx_comp
);
933 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
,
934 enum bna_cb_status status
)
936 bnad
->bnad_completions
.mcast_comp_status
= status
;
937 complete(&bnad
->bnad_completions
.mcast_comp
);
941 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
942 struct bna_stats
*stats
)
944 if (status
== BNA_CB_SUCCESS
)
945 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
947 if (!netif_running(bnad
->netdev
) ||
948 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
951 mod_timer(&bnad
->stats_timer
,
952 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
956 bnad_cb_stats_clr(struct bnad
*bnad
)
960 /* Resource allocation, free functions */
963 bnad_mem_free(struct bnad
*bnad
,
964 struct bna_mem_info
*mem_info
)
969 if (mem_info
->mdl
== NULL
)
972 for (i
= 0; i
< mem_info
->num
; i
++) {
973 if (mem_info
->mdl
[i
].kva
!= NULL
) {
974 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
975 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
977 pci_free_consistent(bnad
->pcidev
,
978 mem_info
->mdl
[i
].len
,
979 mem_info
->mdl
[i
].kva
, dma_pa
);
981 kfree(mem_info
->mdl
[i
].kva
);
984 kfree(mem_info
->mdl
);
985 mem_info
->mdl
= NULL
;
989 bnad_mem_alloc(struct bnad
*bnad
,
990 struct bna_mem_info
*mem_info
)
995 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
996 mem_info
->mdl
= NULL
;
1000 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1002 if (mem_info
->mdl
== NULL
)
1005 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1006 for (i
= 0; i
< mem_info
->num
; i
++) {
1007 mem_info
->mdl
[i
].len
= mem_info
->len
;
1008 mem_info
->mdl
[i
].kva
=
1009 pci_alloc_consistent(bnad
->pcidev
,
1010 mem_info
->len
, &dma_pa
);
1012 if (mem_info
->mdl
[i
].kva
== NULL
)
1015 BNA_SET_DMA_ADDR(dma_pa
,
1016 &(mem_info
->mdl
[i
].dma
));
1019 for (i
= 0; i
< mem_info
->num
; i
++) {
1020 mem_info
->mdl
[i
].len
= mem_info
->len
;
1021 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1023 if (mem_info
->mdl
[i
].kva
== NULL
)
1031 bnad_mem_free(bnad
, mem_info
);
1035 /* Free IRQ for Mailbox */
1037 bnad_mbox_irq_free(struct bnad
*bnad
,
1038 struct bna_intr_info
*intr_info
)
1041 unsigned long flags
;
1043 if (intr_info
->idl
== NULL
)
1046 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1048 bnad_disable_mbox_irq(bnad
);
1050 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1051 free_irq(irq
, bnad
->netdev
);
1053 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1055 kfree(intr_info
->idl
);
1059 * Allocates IRQ for Mailbox, but keep it disabled
1060 * This will be enabled once we get the mbox enable callback
1064 bnad_mbox_irq_alloc(struct bnad
*bnad
,
1065 struct bna_intr_info
*intr_info
)
1068 unsigned long flags
;
1070 irq_handler_t irq_handler
;
1072 /* Mbox should use only 1 vector */
1074 intr_info
->idl
= kzalloc(sizeof(*(intr_info
->idl
)), GFP_KERNEL
);
1075 if (!intr_info
->idl
)
1078 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1079 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1080 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1081 irq
= bnad
->msix_table
[bnad
->msix_num
- 1].vector
;
1083 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1084 intr_info
->idl
[0].vector
= bnad
->msix_num
- 1;
1086 irq_handler
= (irq_handler_t
)bnad_isr
;
1087 irq
= bnad
->pcidev
->irq
;
1088 flags
= IRQF_SHARED
;
1089 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1090 /* intr_info->idl.vector = 0 ? */
1092 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1094 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1096 err
= request_irq(irq
, irq_handler
, flags
,
1097 bnad
->mbox_irq_name
, bnad
->netdev
);
1099 kfree(intr_info
->idl
);
1100 intr_info
->idl
= NULL
;
1104 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1105 bnad_disable_mbox_irq(bnad
);
1106 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1111 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1113 kfree(intr_info
->idl
);
1114 intr_info
->idl
= NULL
;
1117 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1119 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1120 uint txrx_id
, struct bna_intr_info
*intr_info
)
1122 int i
, vector_start
= 0;
1124 unsigned long flags
;
1126 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1127 cfg_flags
= bnad
->cfg_flags
;
1128 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1130 if (cfg_flags
& BNAD_CF_MSIX
) {
1131 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1132 intr_info
->idl
= kcalloc(intr_info
->num
,
1133 sizeof(struct bna_intr_descr
),
1135 if (!intr_info
->idl
)
1140 vector_start
= txrx_id
;
1144 vector_start
= bnad
->num_tx
* bnad
->num_txq_per_tx
+
1152 for (i
= 0; i
< intr_info
->num
; i
++)
1153 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1155 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1157 intr_info
->idl
= kcalloc(intr_info
->num
,
1158 sizeof(struct bna_intr_descr
),
1160 if (!intr_info
->idl
)
1165 intr_info
->idl
[0].vector
= 0x1; /* Bit mask : Tx IB */
1169 intr_info
->idl
[0].vector
= 0x2; /* Bit mask : Rx IB */
1177 * NOTE: Should be called for MSIX only
1178 * Unregisters Tx MSIX vector(s) from the kernel
1181 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1187 for (i
= 0; i
< num_txqs
; i
++) {
1188 if (tx_info
->tcb
[i
] == NULL
)
1191 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1192 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1197 * NOTE: Should be called for MSIX only
1198 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1201 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1202 uint tx_id
, int num_txqs
)
1208 for (i
= 0; i
< num_txqs
; i
++) {
1209 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1210 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1211 tx_id
+ tx_info
->tcb
[i
]->id
);
1212 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1213 (irq_handler_t
)bnad_msix_tx
, 0,
1214 tx_info
->tcb
[i
]->name
,
1224 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1229 * NOTE: Should be called for MSIX only
1230 * Unregisters Rx MSIX vector(s) from the kernel
1233 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1239 for (i
= 0; i
< num_rxps
; i
++) {
1240 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1243 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1244 free_irq(bnad
->msix_table
[vector_num
].vector
,
1245 rx_info
->rx_ctrl
[i
].ccb
);
1250 * NOTE: Should be called for MSIX only
1251 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1254 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1255 uint rx_id
, int num_rxps
)
1261 for (i
= 0; i
< num_rxps
; i
++) {
1262 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1263 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1265 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1266 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1267 (irq_handler_t
)bnad_msix_rx
, 0,
1268 rx_info
->rx_ctrl
[i
].ccb
->name
,
1269 rx_info
->rx_ctrl
[i
].ccb
);
1278 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1282 /* Free Tx object Resources */
1284 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1288 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1289 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1290 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1291 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1292 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1296 /* Allocates memory and interrupt resources for Tx object */
1298 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1303 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1304 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1305 err
= bnad_mem_alloc(bnad
,
1306 &res_info
[i
].res_u
.mem_info
);
1307 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1308 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1309 &res_info
[i
].res_u
.intr_info
);
1316 bnad_tx_res_free(bnad
, res_info
);
1320 /* Free Rx object Resources */
1322 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1326 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1327 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1328 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1329 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1330 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1334 /* Allocates memory and interrupt resources for Rx object */
1336 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1341 /* All memory needs to be allocated before setup_ccbs */
1342 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1343 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1344 err
= bnad_mem_alloc(bnad
,
1345 &res_info
[i
].res_u
.mem_info
);
1346 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1347 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1348 &res_info
[i
].res_u
.intr_info
);
1355 bnad_rx_res_free(bnad
, res_info
);
1359 /* Timer callbacks */
1362 bnad_ioc_timeout(unsigned long data
)
1364 struct bnad
*bnad
= (struct bnad
*)data
;
1365 unsigned long flags
;
1367 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1368 bfa_ioc_timeout((void *) &bnad
->bna
.device
.ioc
);
1369 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1373 bnad_ioc_hb_check(unsigned long data
)
1375 struct bnad
*bnad
= (struct bnad
*)data
;
1376 unsigned long flags
;
1378 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1379 bfa_ioc_hb_check((void *) &bnad
->bna
.device
.ioc
);
1380 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1384 bnad_ioc_sem_timeout(unsigned long data
)
1386 struct bnad
*bnad
= (struct bnad
*)data
;
1387 unsigned long flags
;
1389 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1390 bfa_ioc_sem_timeout((void *) &bnad
->bna
.device
.ioc
);
1391 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1395 * All timer routines use bnad->bna_lock to protect against
1396 * the following race, which may occur in case of no locking:
1404 /* b) Dynamic Interrupt Moderation Timer */
1406 bnad_dim_timeout(unsigned long data
)
1408 struct bnad
*bnad
= (struct bnad
*)data
;
1409 struct bnad_rx_info
*rx_info
;
1410 struct bnad_rx_ctrl
*rx_ctrl
;
1412 unsigned long flags
;
1414 if (!netif_carrier_ok(bnad
->netdev
))
1417 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1418 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1419 rx_info
= &bnad
->rx_info
[i
];
1422 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1423 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1426 bna_rx_dim_update(rx_ctrl
->ccb
);
1430 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1431 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1432 mod_timer(&bnad
->dim_timer
,
1433 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1434 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1437 /* c) Statistics Timer */
1439 bnad_stats_timeout(unsigned long data
)
1441 struct bnad
*bnad
= (struct bnad
*)data
;
1442 unsigned long flags
;
1444 if (!netif_running(bnad
->netdev
) ||
1445 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1448 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1449 bna_stats_get(&bnad
->bna
);
1450 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1454 * Set up timer for DIM
1455 * Called with bnad->bna_lock held
1458 bnad_dim_timer_start(struct bnad
*bnad
)
1460 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1461 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1462 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1463 (unsigned long)bnad
);
1464 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1465 mod_timer(&bnad
->dim_timer
,
1466 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1471 * Set up timer for statistics
1472 * Called with mutex_lock(&bnad->conf_mutex) held
1475 bnad_stats_timer_start(struct bnad
*bnad
)
1477 unsigned long flags
;
1479 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1480 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1481 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1482 (unsigned long)bnad
);
1483 mod_timer(&bnad
->stats_timer
,
1484 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1486 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1491 * Stops the stats timer
1492 * Called with mutex_lock(&bnad->conf_mutex) held
1495 bnad_stats_timer_stop(struct bnad
*bnad
)
1498 unsigned long flags
;
1500 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1501 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1503 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1505 del_timer_sync(&bnad
->stats_timer
);
1511 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1513 int i
= 1; /* Index 0 has broadcast address */
1514 struct netdev_hw_addr
*mc_addr
;
1516 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1517 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1524 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1526 struct bnad_rx_ctrl
*rx_ctrl
=
1527 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1528 struct bna_ccb
*ccb
;
1536 if (!netif_carrier_ok(bnad
->netdev
))
1539 rcvd
= bnad_poll_cq(bnad
, ccb
, budget
);
1544 napi_complete((napi
));
1546 BNAD_UPDATE_CTR(bnad
, netif_rx_complete
);
1548 bnad_enable_rx_irq(bnad
, ccb
);
1553 bnad_napi_poll_txrx(struct napi_struct
*napi
, int budget
)
1555 struct bnad_rx_ctrl
*rx_ctrl
=
1556 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1557 struct bna_ccb
*ccb
;
1566 if (!netif_carrier_ok(bnad
->netdev
))
1569 /* Handle Tx Completions, if any */
1570 for (i
= 0; i
< bnad
->num_tx
; i
++) {
1571 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++)
1572 bnad_tx(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
1575 /* Handle Rx Completions */
1576 rcvd
= bnad_poll_cq(bnad
, ccb
, budget
);
1580 napi_complete((napi
));
1582 BNAD_UPDATE_CTR(bnad
, netif_rx_complete
);
1584 bnad_enable_txrx_irqs(bnad
);
1589 bnad_napi_enable(struct bnad
*bnad
, u32 rx_id
)
1591 int (*napi_poll
) (struct napi_struct
*, int);
1592 struct bnad_rx_ctrl
*rx_ctrl
;
1594 unsigned long flags
;
1596 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1597 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
1598 napi_poll
= bnad_napi_poll_rx
;
1600 napi_poll
= bnad_napi_poll_txrx
;
1601 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1603 /* Initialize & enable NAPI */
1604 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1605 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1606 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1608 napi_enable(&rx_ctrl
->napi
);
1613 bnad_napi_disable(struct bnad
*bnad
, u32 rx_id
)
1617 /* First disable and then clean up */
1618 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1619 napi_disable(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1620 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1624 /* Should be held with conf_lock held */
1626 bnad_cleanup_tx(struct bnad
*bnad
, uint tx_id
)
1628 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1629 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1630 unsigned long flags
;
1635 init_completion(&bnad
->bnad_completions
.tx_comp
);
1636 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1637 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1638 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1639 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1641 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1642 bnad_tx_msix_unregister(bnad
, tx_info
,
1643 bnad
->num_txq_per_tx
);
1645 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1646 bna_tx_destroy(tx_info
->tx
);
1647 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1652 tasklet_kill(&bnad
->tx_free_tasklet
);
1654 bnad_tx_res_free(bnad
, res_info
);
1657 /* Should be held with conf_lock held */
1659 bnad_setup_tx(struct bnad
*bnad
, uint tx_id
)
1662 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1663 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1664 struct bna_intr_info
*intr_info
=
1665 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1666 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1667 struct bna_tx_event_cbfn tx_cbfn
;
1669 unsigned long flags
;
1671 /* Initialize the Tx object configuration */
1672 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1673 tx_config
->txq_depth
= bnad
->txq_depth
;
1674 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1676 /* Initialize the tx event handlers */
1677 tx_cbfn
.tcb_setup_cbfn
= bnad_cb_tcb_setup
;
1678 tx_cbfn
.tcb_destroy_cbfn
= bnad_cb_tcb_destroy
;
1679 tx_cbfn
.tx_stall_cbfn
= bnad_cb_tx_stall
;
1680 tx_cbfn
.tx_resume_cbfn
= bnad_cb_tx_resume
;
1681 tx_cbfn
.tx_cleanup_cbfn
= bnad_cb_tx_cleanup
;
1683 /* Get BNA's resource requirement for one tx object */
1684 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1685 bna_tx_res_req(bnad
->num_txq_per_tx
,
1686 bnad
->txq_depth
, res_info
);
1687 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1689 /* Fill Unmap Q memory requirements */
1690 BNAD_FILL_UNMAPQ_MEM_REQ(
1691 &res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1692 bnad
->num_txq_per_tx
,
1693 BNAD_TX_UNMAPQ_DEPTH
);
1695 /* Allocate resources */
1696 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1700 /* Ask BNA to create one Tx object, supplying required resources */
1701 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1702 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1704 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1709 /* Register ISR for the Tx object */
1710 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1711 err
= bnad_tx_msix_register(bnad
, tx_info
,
1712 tx_id
, bnad
->num_txq_per_tx
);
1717 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1719 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1724 bnad_tx_res_free(bnad
, res_info
);
1728 /* Setup the rx config for bna_rx_create */
1729 /* bnad decides the configuration */
1731 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
1733 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
1734 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
1736 if (bnad
->num_rxp_per_rx
> 1) {
1737 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
1738 rx_config
->rss_config
.hash_type
=
1743 rx_config
->rss_config
.hash_mask
=
1744 bnad
->num_rxp_per_rx
- 1;
1745 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
1746 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
1748 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
1749 memset(&rx_config
->rss_config
, 0,
1750 sizeof(rx_config
->rss_config
));
1752 rx_config
->rxp_type
= BNA_RXP_SLR
;
1753 rx_config
->q_depth
= bnad
->rxq_depth
;
1755 rx_config
->small_buff_size
= BFI_SMALL_RXBUF_SIZE
;
1757 rx_config
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
1760 /* Called with mutex_lock(&bnad->conf_mutex) held */
1762 bnad_cleanup_rx(struct bnad
*bnad
, uint rx_id
)
1764 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1765 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1766 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1767 unsigned long flags
;
1768 int dim_timer_del
= 0;
1774 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1775 dim_timer_del
= bnad_dim_timer_running(bnad
);
1777 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1778 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1780 del_timer_sync(&bnad
->dim_timer
);
1783 bnad_napi_disable(bnad
, rx_id
);
1785 init_completion(&bnad
->bnad_completions
.rx_comp
);
1786 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1787 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
1788 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1789 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
1791 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
1792 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
1794 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1795 bna_rx_destroy(rx_info
->rx
);
1796 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1800 bnad_rx_res_free(bnad
, res_info
);
1803 /* Called with mutex_lock(&bnad->conf_mutex) held */
1805 bnad_setup_rx(struct bnad
*bnad
, uint rx_id
)
1808 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1809 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1810 struct bna_intr_info
*intr_info
=
1811 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
1812 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1813 struct bna_rx_event_cbfn rx_cbfn
;
1815 unsigned long flags
;
1817 /* Initialize the Rx object configuration */
1818 bnad_init_rx_config(bnad
, rx_config
);
1820 /* Initialize the Rx event handlers */
1821 rx_cbfn
.rcb_setup_cbfn
= bnad_cb_rcb_setup
;
1822 rx_cbfn
.rcb_destroy_cbfn
= NULL
;
1823 rx_cbfn
.ccb_setup_cbfn
= bnad_cb_ccb_setup
;
1824 rx_cbfn
.ccb_destroy_cbfn
= bnad_cb_ccb_destroy
;
1825 rx_cbfn
.rx_cleanup_cbfn
= bnad_cb_rx_cleanup
;
1826 rx_cbfn
.rx_post_cbfn
= bnad_cb_rx_post
;
1828 /* Get BNA's resource requirement for one Rx object */
1829 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1830 bna_rx_res_req(rx_config
, res_info
);
1831 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1833 /* Fill Unmap Q memory requirements */
1834 BNAD_FILL_UNMAPQ_MEM_REQ(
1835 &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
],
1836 rx_config
->num_paths
+
1837 ((rx_config
->rxp_type
== BNA_RXP_SINGLE
) ? 0 :
1838 rx_config
->num_paths
), BNAD_RX_UNMAPQ_DEPTH
);
1840 /* Allocate resource */
1841 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
1845 /* Ask BNA to create one Rx object, supplying required resources */
1846 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1847 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
1849 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1854 /* Register ISR for the Rx object */
1855 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1856 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
1857 rx_config
->num_paths
);
1863 bnad_napi_enable(bnad
, rx_id
);
1865 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1867 /* Set up Dynamic Interrupt Moderation Vector */
1868 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
1869 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
1871 /* Enable VLAN filtering only on the default Rx */
1872 bna_rx_vlanfilter_enable(rx
);
1874 /* Start the DIM timer */
1875 bnad_dim_timer_start(bnad
);
1879 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1884 bnad_cleanup_rx(bnad
, rx_id
);
1888 /* Called with conf_lock & bnad->bna_lock held */
1890 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
1892 struct bnad_tx_info
*tx_info
;
1894 tx_info
= &bnad
->tx_info
[0];
1898 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
1901 /* Called with conf_lock & bnad->bna_lock held */
1903 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
1905 struct bnad_rx_info
*rx_info
;
1908 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1909 rx_info
= &bnad
->rx_info
[i
];
1912 bna_rx_coalescing_timeo_set(rx_info
->rx
,
1913 bnad
->rx_coalescing_timeo
);
1918 * Called with bnad->bna_lock held
1921 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
1925 if (!is_valid_ether_addr(mac_addr
))
1926 return -EADDRNOTAVAIL
;
1928 /* If datapath is down, pretend everything went through */
1929 if (!bnad
->rx_info
[0].rx
)
1932 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
1933 if (ret
!= BNA_CB_SUCCESS
)
1934 return -EADDRNOTAVAIL
;
1939 /* Should be called with conf_lock held */
1941 bnad_enable_default_bcast(struct bnad
*bnad
)
1943 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
1945 unsigned long flags
;
1947 init_completion(&bnad
->bnad_completions
.mcast_comp
);
1949 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1950 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
1951 bnad_cb_rx_mcast_add
);
1952 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1954 if (ret
== BNA_CB_SUCCESS
)
1955 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
1959 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
1965 /* Statistics utilities */
1967 bnad_netdev_qstats_fill(struct bnad
*bnad
)
1969 struct net_device_stats
*net_stats
= &bnad
->net_stats
;
1972 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1973 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1974 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
1975 net_stats
->rx_packets
+= bnad
->rx_info
[i
].
1976 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
1977 net_stats
->rx_bytes
+= bnad
->rx_info
[i
].
1978 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
1979 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
1980 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
1982 net_stats
->rx_packets
+=
1983 bnad
->rx_info
[i
].rx_ctrl
[j
].
1984 ccb
->rcb
[1]->rxq
->rx_packets
;
1985 net_stats
->rx_bytes
+=
1986 bnad
->rx_info
[i
].rx_ctrl
[j
].
1987 ccb
->rcb
[1]->rxq
->rx_bytes
;
1992 for (i
= 0; i
< bnad
->num_tx
; i
++) {
1993 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
1994 if (bnad
->tx_info
[i
].tcb
[j
]) {
1995 net_stats
->tx_packets
+=
1996 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
1997 net_stats
->tx_bytes
+=
1998 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2005 * Must be called with the bna_lock held.
2008 bnad_netdev_hwstats_fill(struct bnad
*bnad
)
2010 struct bfi_ll_stats_mac
*mac_stats
;
2011 struct net_device_stats
*net_stats
= &bnad
->net_stats
;
2015 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
->mac_stats
;
2016 net_stats
->rx_errors
=
2017 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2018 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2019 mac_stats
->rx_undersize
;
2020 net_stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2021 mac_stats
->tx_undersize
;
2022 net_stats
->rx_dropped
= mac_stats
->rx_drop
;
2023 net_stats
->tx_dropped
= mac_stats
->tx_drop
;
2024 net_stats
->multicast
= mac_stats
->rx_multicast
;
2025 net_stats
->collisions
= mac_stats
->tx_total_collision
;
2027 net_stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2029 /* receive ring buffer overflow ?? */
2031 net_stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2032 net_stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2033 /* recv'r fifo overrun */
2034 bmap
= (u64
)bnad
->stats
.bna_stats
->rxf_bmap
[0] |
2035 ((u64
)bnad
->stats
.bna_stats
->rxf_bmap
[1] << 32);
2036 for (i
= 0; bmap
&& (i
< BFI_LL_RXF_ID_MAX
); i
++) {
2038 net_stats
->rx_fifo_errors
=
2039 bnad
->stats
.bna_stats
->
2040 hw_stats
->rxf_stats
[i
].frame_drops
;
2048 bnad_mbox_irq_sync(struct bnad
*bnad
)
2051 unsigned long flags
;
2053 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2054 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2055 irq
= bnad
->msix_table
[bnad
->msix_num
- 1].vector
;
2057 irq
= bnad
->pcidev
->irq
;
2058 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2060 synchronize_irq(irq
);
2063 /* Utility used by bnad_start_xmit, for doing TSO */
2065 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2069 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2070 BUG_ON(!(skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV4
||
2071 skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
));
2072 if (skb_header_cloned(skb
)) {
2073 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2075 BNAD_UPDATE_CTR(bnad
, tso_err
);
2081 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2082 * excluding the length field.
2084 if (skb
->protocol
== htons(ETH_P_IP
)) {
2085 struct iphdr
*iph
= ip_hdr(skb
);
2087 /* Do we really need these? */
2091 tcp_hdr(skb
)->check
=
2092 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2094 BNAD_UPDATE_CTR(bnad
, tso4
);
2096 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2098 BUG_ON(!(skb
->protocol
== htons(ETH_P_IPV6
)));
2099 ipv6h
->payload_len
= 0;
2100 tcp_hdr(skb
)->check
=
2101 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2103 BNAD_UPDATE_CTR(bnad
, tso6
);
2110 * Initialize Q numbers depending on Rx Paths
2111 * Called with bnad->bna_lock held, because of cfg_flags
2115 bnad_q_num_init(struct bnad
*bnad
)
2119 rxps
= min((uint
)num_online_cpus(),
2120 (uint
)(BNAD_MAX_RXS
* BNAD_MAX_RXPS_PER_RX
));
2122 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2123 rxps
= 1; /* INTx */
2127 bnad
->num_rxp_per_rx
= rxps
;
2128 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2132 * Adjusts the Q numbers, given a number of msix vectors
2133 * Give preference to RSS as opposed to Tx priority Queues,
2134 * in such a case, just use 1 Tx Q
2135 * Called with bnad->bna_lock held b'cos of cfg_flags access
2138 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
)
2140 bnad
->num_txq_per_tx
= 1;
2141 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2142 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2143 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2144 bnad
->num_rxp_per_rx
= msix_vectors
-
2145 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2146 BNAD_MAILBOX_MSIX_VECTORS
;
2148 bnad
->num_rxp_per_rx
= 1;
2152 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
2154 struct net_device
*netdev
= bnad
->netdev
;
2156 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
2157 if (is_zero_ether_addr(netdev
->dev_addr
))
2158 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
2161 /* Enable / disable device */
2163 bnad_device_disable(struct bnad
*bnad
)
2165 unsigned long flags
;
2167 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2169 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2170 bna_device_disable(&bnad
->bna
.device
, BNA_HARD_CLEANUP
);
2171 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2173 wait_for_completion(&bnad
->bnad_completions
.ioc_comp
);
2178 bnad_device_enable(struct bnad
*bnad
)
2181 unsigned long flags
;
2183 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2185 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2186 bna_device_enable(&bnad
->bna
.device
);
2187 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2189 wait_for_completion(&bnad
->bnad_completions
.ioc_comp
);
2191 if (bnad
->bnad_completions
.ioc_comp_status
)
2192 err
= bnad
->bnad_completions
.ioc_comp_status
;
2197 /* Free BNA resources */
2199 bnad_res_free(struct bnad
*bnad
)
2202 struct bna_res_info
*res_info
= &bnad
->res_info
[0];
2204 for (i
= 0; i
< BNA_RES_T_MAX
; i
++) {
2205 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
2206 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2208 bnad_mbox_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
2212 /* Allocates memory and interrupt resources for BNA */
2214 bnad_res_alloc(struct bnad
*bnad
)
2217 struct bna_res_info
*res_info
= &bnad
->res_info
[0];
2219 for (i
= 0; i
< BNA_RES_T_MAX
; i
++) {
2220 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
2221 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2223 err
= bnad_mbox_irq_alloc(bnad
,
2224 &res_info
[i
].res_u
.intr_info
);
2231 bnad_res_free(bnad
);
2235 /* Interrupt enable / disable */
2237 bnad_enable_msix(struct bnad
*bnad
)
2241 unsigned long flags
;
2243 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2244 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2245 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2248 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2250 if (bnad
->msix_table
)
2253 tot_msix_num
= bnad
->msix_num
+ bnad
->msix_diag_num
;
2256 kcalloc(tot_msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2258 if (!bnad
->msix_table
)
2261 for (i
= 0; i
< tot_msix_num
; i
++)
2262 bnad
->msix_table
[i
].entry
= i
;
2264 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
, tot_msix_num
);
2266 /* Not enough MSI-X vectors. */
2268 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2269 /* ret = #of vectors that we got */
2270 bnad_q_num_adjust(bnad
, ret
);
2271 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2273 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
)
2275 * bnad
->num_rxp_per_rx
) +
2276 BNAD_MAILBOX_MSIX_VECTORS
;
2277 tot_msix_num
= bnad
->msix_num
+ bnad
->msix_diag_num
;
2279 /* Try once more with adjusted numbers */
2280 /* If this fails, fall back to INTx */
2281 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
,
2292 kfree(bnad
->msix_table
);
2293 bnad
->msix_table
= NULL
;
2295 bnad
->msix_diag_num
= 0;
2296 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2297 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2298 bnad_q_num_init(bnad
);
2299 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2303 bnad_disable_msix(struct bnad
*bnad
)
2306 unsigned long flags
;
2308 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2309 cfg_flags
= bnad
->cfg_flags
;
2310 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2311 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2312 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2314 if (cfg_flags
& BNAD_CF_MSIX
) {
2315 pci_disable_msix(bnad
->pcidev
);
2316 kfree(bnad
->msix_table
);
2317 bnad
->msix_table
= NULL
;
2321 /* Netdev entry points */
2323 bnad_open(struct net_device
*netdev
)
2326 struct bnad
*bnad
= netdev_priv(netdev
);
2327 struct bna_pause_config pause_config
;
2329 unsigned long flags
;
2331 mutex_lock(&bnad
->conf_mutex
);
2334 err
= bnad_setup_tx(bnad
, 0);
2339 err
= bnad_setup_rx(bnad
, 0);
2344 pause_config
.tx_pause
= 0;
2345 pause_config
.rx_pause
= 0;
2347 mtu
= ETH_HLEN
+ bnad
->netdev
->mtu
+ ETH_FCS_LEN
;
2349 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2350 bna_port_mtu_set(&bnad
->bna
.port
, mtu
, NULL
);
2351 bna_port_pause_config(&bnad
->bna
.port
, &pause_config
, NULL
);
2352 bna_port_enable(&bnad
->bna
.port
);
2353 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2355 /* Enable broadcast */
2356 bnad_enable_default_bcast(bnad
);
2358 /* Set the UCAST address */
2359 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2360 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2361 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2363 /* Start the stats timer */
2364 bnad_stats_timer_start(bnad
);
2366 mutex_unlock(&bnad
->conf_mutex
);
2371 bnad_cleanup_tx(bnad
, 0);
2374 mutex_unlock(&bnad
->conf_mutex
);
2379 bnad_stop(struct net_device
*netdev
)
2381 struct bnad
*bnad
= netdev_priv(netdev
);
2382 unsigned long flags
;
2384 mutex_lock(&bnad
->conf_mutex
);
2386 /* Stop the stats timer */
2387 bnad_stats_timer_stop(bnad
);
2389 init_completion(&bnad
->bnad_completions
.port_comp
);
2391 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2392 bna_port_disable(&bnad
->bna
.port
, BNA_HARD_CLEANUP
,
2393 bnad_cb_port_disabled
);
2394 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2396 wait_for_completion(&bnad
->bnad_completions
.port_comp
);
2398 bnad_cleanup_tx(bnad
, 0);
2399 bnad_cleanup_rx(bnad
, 0);
2401 /* Synchronize mailbox IRQ */
2402 bnad_mbox_irq_sync(bnad
);
2404 mutex_unlock(&bnad
->conf_mutex
);
2411 * bnad_start_xmit : Netdev entry point for Transmit
2412 * Called under lock held by net_device
2415 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2417 struct bnad
*bnad
= netdev_priv(netdev
);
2419 u16 txq_prod
, vlan_tag
= 0;
2420 u32 unmap_prod
, wis
, wis_used
, wi_range
;
2421 u32 vectors
, vect_id
, i
, acked
;
2425 struct bnad_tx_info
*tx_info
;
2426 struct bna_tcb
*tcb
;
2427 struct bnad_unmap_q
*unmap_q
;
2428 dma_addr_t dma_addr
;
2429 struct bna_txq_entry
*txqent
;
2430 bna_txq_wi_ctrl_flag_t flags
;
2433 (skb
->len
<= ETH_HLEN
|| skb
->len
> BFI_TX_MAX_DATA_PER_PKT
)) {
2435 return NETDEV_TX_OK
;
2439 * Takes care of the Tx that is scheduled between clearing the flag
2440 * and the netif_stop_queue() call.
2442 if (unlikely(!test_bit(BNAD_RF_TX_STARTED
, &bnad
->run_flags
))) {
2444 return NETDEV_TX_OK
;
2449 tx_info
= &bnad
->tx_info
[tx_id
];
2450 tcb
= tx_info
->tcb
[tx_id
];
2451 unmap_q
= tcb
->unmap_q
;
2453 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2454 if (vectors
> BFI_TX_MAX_VECTORS_PER_PKT
) {
2456 return NETDEV_TX_OK
;
2458 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2461 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2462 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2463 if ((u16
) (*tcb
->hw_consumer_index
) !=
2464 tcb
->consumer_index
&&
2465 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2466 acked
= bnad_free_txbufs(bnad
, tcb
);
2467 bna_ib_ack(tcb
->i_dbell
, acked
);
2468 smp_mb__before_clear_bit();
2469 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2471 netif_stop_queue(netdev
);
2472 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2477 * Check again to deal with race condition between
2478 * netif_stop_queue here, and netif_wake_queue in
2479 * interrupt handler which is not inside netif tx lock.
2482 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2483 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2484 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2485 return NETDEV_TX_BUSY
;
2487 netif_wake_queue(netdev
);
2488 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
2492 unmap_prod
= unmap_q
->producer_index
;
2497 txq_prod
= tcb
->producer_index
;
2498 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
, txqent
, wi_range
);
2499 BUG_ON(!(wi_range
<= tcb
->q_depth
));
2500 txqent
->hdr
.wi
.reserved
= 0;
2501 txqent
->hdr
.wi
.num_vectors
= vectors
;
2502 txqent
->hdr
.wi
.opcode
=
2503 htons((skb_is_gso(skb
) ? BNA_TXQ_WI_SEND_LSO
:
2506 if (bnad
->vlan_grp
&& vlan_tx_tag_present(skb
)) {
2507 vlan_tag
= (u16
) vlan_tx_tag_get(skb
);
2508 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2510 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2512 (tcb
->priority
& 0x7) << 13 | (vlan_tag
& 0x1fff);
2513 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2516 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2518 if (skb_is_gso(skb
)) {
2519 err
= bnad_tso_prepare(bnad
, skb
);
2522 return NETDEV_TX_OK
;
2524 txqent
->hdr
.wi
.lso_mss
= htons(skb_is_gso(skb
));
2525 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2526 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2527 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2528 (tcp_hdrlen(skb
) >> 2,
2529 skb_transport_offset(skb
)));
2530 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2533 txqent
->hdr
.wi
.lso_mss
= 0;
2535 if (skb
->protocol
== htons(ETH_P_IP
))
2536 proto
= ip_hdr(skb
)->protocol
;
2537 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2538 /* nexthdr may not be TCP immediately. */
2539 proto
= ipv6_hdr(skb
)->nexthdr
;
2541 if (proto
== IPPROTO_TCP
) {
2542 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2543 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2544 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2545 (0, skb_transport_offset(skb
)));
2547 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2549 BUG_ON(!(skb_headlen(skb
) >=
2550 skb_transport_offset(skb
) + tcp_hdrlen(skb
)));
2552 } else if (proto
== IPPROTO_UDP
) {
2553 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2554 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2555 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2556 (0, skb_transport_offset(skb
)));
2558 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2560 BUG_ON(!(skb_headlen(skb
) >=
2561 skb_transport_offset(skb
) +
2562 sizeof(struct udphdr
)));
2564 err
= skb_checksum_help(skb
);
2565 BNAD_UPDATE_CTR(bnad
, csum_help
);
2568 BNAD_UPDATE_CTR(bnad
, csum_help_err
);
2569 return NETDEV_TX_OK
;
2573 txqent
->hdr
.wi
.lso_mss
= 0;
2574 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2577 txqent
->hdr
.wi
.flags
= htons(flags
);
2579 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2581 unmap_q
->unmap_array
[unmap_prod
].skb
= skb
;
2582 BUG_ON(!(skb_headlen(skb
) <= BFI_TX_MAX_DATA_PER_VECTOR
));
2583 txqent
->vector
[vect_id
].length
= htons(skb_headlen(skb
));
2584 dma_addr
= pci_map_single(bnad
->pcidev
, skb
->data
, skb_headlen(skb
),
2586 pci_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2589 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2590 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2592 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2593 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
2594 u32 size
= frag
->size
;
2596 if (++vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
2601 BNA_QE_INDX_ADD(txq_prod
, wis_used
,
2604 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
,
2606 BUG_ON(!(wi_range
<= tcb
->q_depth
));
2609 txqent
->hdr
.wi_ext
.opcode
= htons(BNA_TXQ_WI_EXTENSION
);
2612 BUG_ON(!(size
<= BFI_TX_MAX_DATA_PER_VECTOR
));
2613 txqent
->vector
[vect_id
].length
= htons(size
);
2615 pci_map_page(bnad
->pcidev
, frag
->page
,
2616 frag
->page_offset
, size
,
2618 pci_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2620 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2621 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2624 unmap_q
->producer_index
= unmap_prod
;
2625 BNA_QE_INDX_ADD(txq_prod
, wis_used
, tcb
->q_depth
);
2626 tcb
->producer_index
= txq_prod
;
2629 bna_txq_prod_indx_doorbell(tcb
);
2631 if ((u16
) (*tcb
->hw_consumer_index
) != tcb
->consumer_index
)
2632 tasklet_schedule(&bnad
->tx_free_tasklet
);
2634 return NETDEV_TX_OK
;
2638 * Used spin_lock to synchronize reading of stats structures, which
2639 * is written by BNA under the same lock.
2641 static struct net_device_stats
*
2642 bnad_get_netdev_stats(struct net_device
*netdev
)
2644 struct bnad
*bnad
= netdev_priv(netdev
);
2645 unsigned long flags
;
2647 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2649 memset(&bnad
->net_stats
, 0, sizeof(struct net_device_stats
));
2651 bnad_netdev_qstats_fill(bnad
);
2652 bnad_netdev_hwstats_fill(bnad
);
2654 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2656 return &bnad
->net_stats
;
2660 bnad_set_rx_mode(struct net_device
*netdev
)
2662 struct bnad
*bnad
= netdev_priv(netdev
);
2663 u32 new_mask
, valid_mask
;
2664 unsigned long flags
;
2666 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2668 new_mask
= valid_mask
= 0;
2670 if (netdev
->flags
& IFF_PROMISC
) {
2671 if (!(bnad
->cfg_flags
& BNAD_CF_PROMISC
)) {
2672 new_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2673 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2674 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
2677 if (bnad
->cfg_flags
& BNAD_CF_PROMISC
) {
2678 new_mask
= ~BNAD_RXMODE_PROMISC_DEFAULT
;
2679 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2680 bnad
->cfg_flags
&= ~BNAD_CF_PROMISC
;
2684 if (netdev
->flags
& IFF_ALLMULTI
) {
2685 if (!(bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)) {
2686 new_mask
|= BNA_RXMODE_ALLMULTI
;
2687 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2688 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
2691 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
) {
2692 new_mask
&= ~BNA_RXMODE_ALLMULTI
;
2693 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2694 bnad
->cfg_flags
&= ~BNAD_CF_ALLMULTI
;
2698 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mask
, valid_mask
, NULL
);
2700 if (!netdev_mc_empty(netdev
)) {
2702 int mc_count
= netdev_mc_count(netdev
);
2704 /* Index 0 holds the broadcast address */
2706 kzalloc((mc_count
+ 1) * ETH_ALEN
,
2711 memcpy(&mcaddr_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
2713 /* Copy rest of the MC addresses */
2714 bnad_netdev_mc_list_get(netdev
, mcaddr_list
);
2716 bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
2719 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2722 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2726 * bna_lock is used to sync writes to netdev->addr
2727 * conf_lock cannot be used since this call may be made
2728 * in a non-blocking context.
2731 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
2734 struct bnad
*bnad
= netdev_priv(netdev
);
2735 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
2736 unsigned long flags
;
2738 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2740 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
2743 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
2745 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2751 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
2754 unsigned long flags
;
2756 struct bnad
*bnad
= netdev_priv(netdev
);
2758 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
2761 mutex_lock(&bnad
->conf_mutex
);
2763 netdev
->mtu
= new_mtu
;
2765 mtu
= ETH_HLEN
+ new_mtu
+ ETH_FCS_LEN
;
2767 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2768 bna_port_mtu_set(&bnad
->bna
.port
, mtu
, NULL
);
2769 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2771 mutex_unlock(&bnad
->conf_mutex
);
2776 bnad_vlan_rx_register(struct net_device
*netdev
,
2777 struct vlan_group
*vlan_grp
)
2779 struct bnad
*bnad
= netdev_priv(netdev
);
2781 mutex_lock(&bnad
->conf_mutex
);
2782 bnad
->vlan_grp
= vlan_grp
;
2783 mutex_unlock(&bnad
->conf_mutex
);
2787 bnad_vlan_rx_add_vid(struct net_device
*netdev
,
2790 struct bnad
*bnad
= netdev_priv(netdev
);
2791 unsigned long flags
;
2793 if (!bnad
->rx_info
[0].rx
)
2796 mutex_lock(&bnad
->conf_mutex
);
2798 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2799 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
2800 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2802 mutex_unlock(&bnad
->conf_mutex
);
2806 bnad_vlan_rx_kill_vid(struct net_device
*netdev
,
2809 struct bnad
*bnad
= netdev_priv(netdev
);
2810 unsigned long flags
;
2812 if (!bnad
->rx_info
[0].rx
)
2815 mutex_lock(&bnad
->conf_mutex
);
2817 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2818 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
2819 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2821 mutex_unlock(&bnad
->conf_mutex
);
2824 #ifdef CONFIG_NET_POLL_CONTROLLER
2826 bnad_netpoll(struct net_device
*netdev
)
2828 struct bnad
*bnad
= netdev_priv(netdev
);
2829 struct bnad_rx_info
*rx_info
;
2830 struct bnad_rx_ctrl
*rx_ctrl
;
2834 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2835 bna_intx_disable(&bnad
->bna
, curr_mask
);
2836 bnad_isr(bnad
->pcidev
->irq
, netdev
);
2837 bna_intx_enable(&bnad
->bna
, curr_mask
);
2839 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2840 rx_info
= &bnad
->rx_info
[i
];
2843 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2844 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
2846 bnad_disable_rx_irq(bnad
,
2848 bnad_netif_rx_schedule_poll(bnad
,
2857 static const struct net_device_ops bnad_netdev_ops
= {
2858 .ndo_open
= bnad_open
,
2859 .ndo_stop
= bnad_stop
,
2860 .ndo_start_xmit
= bnad_start_xmit
,
2861 .ndo_get_stats
= bnad_get_netdev_stats
,
2862 .ndo_set_rx_mode
= bnad_set_rx_mode
,
2863 .ndo_set_multicast_list
= bnad_set_rx_mode
,
2864 .ndo_validate_addr
= eth_validate_addr
,
2865 .ndo_set_mac_address
= bnad_set_mac_address
,
2866 .ndo_change_mtu
= bnad_change_mtu
,
2867 .ndo_vlan_rx_register
= bnad_vlan_rx_register
,
2868 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
2869 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
2870 #ifdef CONFIG_NET_POLL_CONTROLLER
2871 .ndo_poll_controller
= bnad_netpoll
2876 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
2878 struct net_device
*netdev
= bnad
->netdev
;
2880 netdev
->features
|= NETIF_F_IPV6_CSUM
;
2881 netdev
->features
|= NETIF_F_TSO
;
2882 netdev
->features
|= NETIF_F_TSO6
;
2884 netdev
->features
|= NETIF_F_GRO
;
2885 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2887 netdev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
2890 netdev
->features
|= NETIF_F_HIGHDMA
;
2893 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
2894 NETIF_F_HW_VLAN_FILTER
;
2896 netdev
->vlan_features
= netdev
->features
;
2897 netdev
->mem_start
= bnad
->mmio_start
;
2898 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
2900 netdev
->netdev_ops
= &bnad_netdev_ops
;
2901 bnad_set_ethtool_ops(netdev
);
2905 * 1. Initialize the bnad structure
2906 * 2. Setup netdev pointer in pci_dev
2907 * 3. Initialze Tx free tasklet
2908 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2911 bnad_init(struct bnad
*bnad
,
2912 struct pci_dev
*pdev
, struct net_device
*netdev
)
2914 unsigned long flags
;
2916 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2917 pci_set_drvdata(pdev
, netdev
);
2919 bnad
->netdev
= netdev
;
2920 bnad
->pcidev
= pdev
;
2921 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
2922 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
2923 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
2925 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
2926 pci_set_drvdata(pdev
, NULL
);
2929 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
2930 (unsigned long long) bnad
->mmio_len
);
2932 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2933 if (!bnad_msix_disable
)
2934 bnad
->cfg_flags
= BNAD_CF_MSIX
;
2936 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
2938 bnad_q_num_init(bnad
);
2939 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2941 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2942 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
2943 BNAD_MAILBOX_MSIX_VECTORS
;
2944 bnad
->msix_diag_num
= 2; /* 1 for Tx, 1 for Rx */
2946 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
2947 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
2948 bnad
->rx_csum
= true;
2950 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
2951 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
2953 tasklet_init(&bnad
->tx_free_tasklet
, bnad_tx_free_tasklet
,
2954 (unsigned long)bnad
);
2960 * Must be called after bnad_pci_uninit()
2961 * so that iounmap() and pci_set_drvdata(NULL)
2962 * happens only after PCI uninitialization.
2965 bnad_uninit(struct bnad
*bnad
)
2968 iounmap(bnad
->bar0
);
2969 pci_set_drvdata(bnad
->pcidev
, NULL
);
2974 a) Per device mutes used for serializing configuration
2975 changes from OS interface
2976 b) spin lock used to protect bna state machine
2979 bnad_lock_init(struct bnad
*bnad
)
2981 spin_lock_init(&bnad
->bna_lock
);
2982 mutex_init(&bnad
->conf_mutex
);
2986 bnad_lock_uninit(struct bnad
*bnad
)
2988 mutex_destroy(&bnad
->conf_mutex
);
2991 /* PCI Initialization */
2993 bnad_pci_init(struct bnad
*bnad
,
2994 struct pci_dev
*pdev
, bool *using_dac
)
2998 err
= pci_enable_device(pdev
);
3001 err
= pci_request_regions(pdev
, BNAD_NAME
);
3003 goto disable_device
;
3004 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) &&
3005 !pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3008 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3010 err
= pci_set_consistent_dma_mask(pdev
,
3013 goto release_regions
;
3017 pci_set_master(pdev
);
3021 pci_release_regions(pdev
);
3023 pci_disable_device(pdev
);
3029 bnad_pci_uninit(struct pci_dev
*pdev
)
3031 pci_release_regions(pdev
);
3032 pci_disable_device(pdev
);
3035 static int __devinit
3036 bnad_pci_probe(struct pci_dev
*pdev
,
3037 const struct pci_device_id
*pcidev_id
)
3043 struct net_device
*netdev
;
3044 struct bfa_pcidev pcidev_info
;
3045 unsigned long flags
;
3047 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3048 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3050 mutex_lock(&bnad_fwimg_mutex
);
3051 if (!cna_get_firmware_buf(pdev
)) {
3052 mutex_unlock(&bnad_fwimg_mutex
);
3053 pr_warn("Failed to load Firmware Image!\n");
3056 mutex_unlock(&bnad_fwimg_mutex
);
3059 * Allocates sizeof(struct net_device + struct bnad)
3060 * bnad = netdev->priv
3062 netdev
= alloc_etherdev(sizeof(struct bnad
));
3064 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
3068 bnad
= netdev_priv(netdev
);
3072 * PCI initialization
3073 * Output : using_dac = 1 for 64 bit DMA
3074 * = 0 for 32 bit DMA
3076 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3080 bnad_lock_init(bnad
);
3082 * Initialize bnad structure
3083 * Setup relation between pci_dev & netdev
3084 * Init Tx free tasklet
3086 err
= bnad_init(bnad
, pdev
, netdev
);
3089 /* Initialize netdev structure, set up ethtool ops */
3090 bnad_netdev_init(bnad
, using_dac
);
3092 bnad_enable_msix(bnad
);
3094 /* Get resource requirement form bna */
3095 bna_res_req(&bnad
->res_info
[0]);
3097 /* Allocate resources from bna */
3098 err
= bnad_res_alloc(bnad
);
3104 /* Setup pcidev_info for bna_init() */
3105 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3106 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3107 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3108 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3110 mutex_lock(&bnad
->conf_mutex
);
3112 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3113 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3115 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3117 bnad
->stats
.bna_stats
= &bna
->stats
;
3120 setup_timer(&bnad
->bna
.device
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3121 ((unsigned long)bnad
));
3122 setup_timer(&bnad
->bna
.device
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3123 ((unsigned long)bnad
));
3124 setup_timer(&bnad
->bna
.device
.ioc
.sem_timer
, bnad_ioc_sem_timeout
,
3125 ((unsigned long)bnad
));
3127 /* Now start the timer before calling IOC */
3128 mod_timer(&bnad
->bna
.device
.ioc
.ioc_timer
,
3129 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3133 * Don't care even if err != 0, bna state machine will
3136 err
= bnad_device_enable(bnad
);
3138 /* Get the burnt-in mac */
3139 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3140 bna_port_mac_get(&bna
->port
, &bnad
->perm_addr
);
3141 bnad_set_netdev_perm_addr(bnad
);
3142 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3144 mutex_unlock(&bnad
->conf_mutex
);
3147 * Make sure the link appears down to the stack
3149 netif_carrier_off(netdev
);
3151 /* Finally, reguister with net_device layer */
3152 err
= register_netdev(netdev
);
3154 pr_err("BNA : Registering with netdev failed\n");
3155 goto disable_device
;
3161 mutex_lock(&bnad
->conf_mutex
);
3162 bnad_device_disable(bnad
);
3163 del_timer_sync(&bnad
->bna
.device
.ioc
.ioc_timer
);
3164 del_timer_sync(&bnad
->bna
.device
.ioc
.sem_timer
);
3165 del_timer_sync(&bnad
->bna
.device
.ioc
.hb_timer
);
3166 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3168 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3169 mutex_unlock(&bnad
->conf_mutex
);
3171 bnad_res_free(bnad
);
3172 bnad_disable_msix(bnad
);
3174 bnad_pci_uninit(pdev
);
3175 bnad_lock_uninit(bnad
);
3178 free_netdev(netdev
);
3182 static void __devexit
3183 bnad_pci_remove(struct pci_dev
*pdev
)
3185 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3188 unsigned long flags
;
3193 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3194 bnad
= netdev_priv(netdev
);
3197 unregister_netdev(netdev
);
3199 mutex_lock(&bnad
->conf_mutex
);
3200 bnad_device_disable(bnad
);
3201 del_timer_sync(&bnad
->bna
.device
.ioc
.ioc_timer
);
3202 del_timer_sync(&bnad
->bna
.device
.ioc
.sem_timer
);
3203 del_timer_sync(&bnad
->bna
.device
.ioc
.hb_timer
);
3204 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3206 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3207 mutex_unlock(&bnad
->conf_mutex
);
3209 bnad_res_free(bnad
);
3210 bnad_disable_msix(bnad
);
3211 bnad_pci_uninit(pdev
);
3212 bnad_lock_uninit(bnad
);
3214 free_netdev(netdev
);
3217 const struct pci_device_id bnad_pci_id_table
[] = {
3219 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3220 PCI_DEVICE_ID_BROCADE_CT
),
3221 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3222 .class_mask
= 0xffff00
3226 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3228 static struct pci_driver bnad_pci_driver
= {
3230 .id_table
= bnad_pci_id_table
,
3231 .probe
= bnad_pci_probe
,
3232 .remove
= __devexit_p(bnad_pci_remove
),
3236 bnad_module_init(void)
3240 pr_info("Brocade 10G Ethernet driver\n");
3242 bfa_ioc_auto_recover(bnad_ioc_auto_recover
);
3244 err
= pci_register_driver(&bnad_pci_driver
);
3246 pr_err("bna : PCI registration failed in module init "
3255 bnad_module_exit(void)
3257 pci_unregister_driver(&bnad_pci_driver
);
3260 release_firmware(bfi_fw
);
3263 module_init(bnad_module_init
);
3264 module_exit(bnad_module_exit
);
3266 MODULE_AUTHOR("Brocade");
3267 MODULE_LICENSE("GPL");
3268 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3269 MODULE_VERSION(BNAD_VERSION
);
3270 MODULE_FIRMWARE(CNA_FW_FILE_CT
);