1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
);
38 static int bnx2x_alloc_fp_mem(struct bnx2x
*bp
);
39 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x
*bp
)
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp
, i
) {
47 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
48 bnx2x_poll
, NAPI_POLL_WEIGHT
);
52 static void bnx2x_add_all_napi(struct bnx2x
*bp
)
56 /* Add NAPI objects */
57 for_each_eth_queue(bp
, i
) {
58 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
59 bnx2x_poll
, NAPI_POLL_WEIGHT
);
63 static int bnx2x_calc_num_queues(struct bnx2x
*bp
)
65 int nq
= bnx2x_num_queues
? : netif_get_num_default_rss_queues();
67 /* Reduce memory usage in kdump environment by using only one queue */
68 if (is_kdump_kernel())
71 nq
= clamp(nq
, 1, BNX2X_MAX_QUEUES(bp
));
76 * bnx2x_move_fp - move content of the fastpath structure.
79 * @from: source FP index
80 * @to: destination FP index
82 * Makes sure the contents of the bp->fp[to].napi is kept
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
85 * source onto the target. Update txdata pointers and related
88 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
90 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
91 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
92 struct bnx2x_sp_objs
*from_sp_objs
= &bp
->sp_objs
[from
];
93 struct bnx2x_sp_objs
*to_sp_objs
= &bp
->sp_objs
[to
];
94 struct bnx2x_fp_stats
*from_fp_stats
= &bp
->fp_stats
[from
];
95 struct bnx2x_fp_stats
*to_fp_stats
= &bp
->fp_stats
[to
];
96 int old_max_eth_txqs
, new_max_eth_txqs
;
97 int old_txdata_index
= 0, new_txdata_index
= 0;
98 struct bnx2x_agg_info
*old_tpa_info
= to_fp
->tpa_info
;
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp
->napi
= to_fp
->napi
;
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
110 to_fp
->tpa_info
= old_tpa_info
;
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs
, from_sp_objs
, sizeof(*to_sp_objs
));
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats
, from_fp_stats
, sizeof(*to_fp_stats
));
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
123 old_max_eth_txqs
= BNX2X_NUM_ETH_QUEUES(bp
) * (bp
)->max_cos
;
124 new_max_eth_txqs
= (BNX2X_NUM_ETH_QUEUES(bp
) - from
+ to
) *
126 if (from
== FCOE_IDX(bp
)) {
127 old_txdata_index
= old_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
128 new_txdata_index
= new_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
131 memcpy(&bp
->bnx2x_txq
[new_txdata_index
],
132 &bp
->bnx2x_txq
[old_txdata_index
],
133 sizeof(struct bnx2x_fp_txdata
));
134 to_fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[new_txdata_index
];
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
145 void bnx2x_fill_fw_str(struct bnx2x
*bp
, char *buf
, size_t buf_len
)
148 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
150 phy_fw_ver
[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
152 phy_fw_ver
, PHY_FW_VER_LEN
);
153 strlcpy(buf
, bp
->fw_ver
, buf_len
);
154 snprintf(buf
+ strlen(bp
->fw_ver
), 32 - strlen(bp
->fw_ver
),
156 (bp
->common
.bc_ver
& 0xff0000) >> 16,
157 (bp
->common
.bc_ver
& 0xff00) >> 8,
158 (bp
->common
.bc_ver
& 0xff),
159 ((phy_fw_ver
[0] != '\0') ? " phy " : ""), phy_fw_ver
);
161 bnx2x_vf_fill_fw_str(bp
, buf
, buf_len
);
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 * @delta: number of eth queues which were not allocated
171 static void bnx2x_shrink_eth_fp(struct bnx2x
*bp
, int delta
)
173 int i
, cos
, old_eth_num
= BNX2X_NUM_ETH_QUEUES(bp
);
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 * backward along the array could cause memory to be overridden
178 for (cos
= 1; cos
< bp
->max_cos
; cos
++) {
179 for (i
= 0; i
< old_eth_num
- delta
; i
++) {
180 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
181 int new_idx
= cos
* (old_eth_num
- delta
) + i
;
183 memcpy(&bp
->bnx2x_txq
[new_idx
], fp
->txdata_ptr
[cos
],
184 sizeof(struct bnx2x_fp_txdata
));
185 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[new_idx
];
190 int bnx2x_load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
192 /* free skb in the packet ring at pos idx
193 * return idx of last bd freed
195 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
196 u16 idx
, unsigned int *pkts_compl
,
197 unsigned int *bytes_compl
)
199 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
200 struct eth_tx_start_bd
*tx_start_bd
;
201 struct eth_tx_bd
*tx_data_bd
;
202 struct sk_buff
*skb
= tx_buf
->skb
;
203 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
205 u16 split_bd_len
= 0;
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 DP(NETIF_MSG_TX_DONE
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata
->txq_index
, idx
, tx_buf
, skb
);
213 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
215 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
218 BNX2X_ERR("BAD nbd!\n");
222 new_cons
= nbd
+ tx_buf
->first_bd
;
224 /* Get the next bd */
225 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
227 /* Skip a parse bd... */
229 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
231 if (tx_buf
->flags
& BNX2X_HAS_SECOND_PBD
) {
232 /* Skip second parse bd... */
234 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
239 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
240 split_bd_len
= BD_UNMAP_LEN(tx_data_bd
);
242 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
246 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
247 BD_UNMAP_LEN(tx_start_bd
) + split_bd_len
,
253 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
254 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
255 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
257 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
264 (*bytes_compl
) += skb
->len
;
265 dev_kfree_skb_any(skb
);
268 tx_buf
->first_bd
= 0;
274 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
276 struct netdev_queue
*txq
;
277 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
278 unsigned int pkts_compl
= 0, bytes_compl
= 0;
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp
->panic
))
285 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
286 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
287 sw_cons
= txdata
->tx_pkt_cons
;
289 while (sw_cons
!= hw_cons
) {
292 pkt_cons
= TX_BD(sw_cons
);
294 DP(NETIF_MSG_TX_DONE
,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
296 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
298 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
,
299 &pkts_compl
, &bytes_compl
);
304 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
306 txdata
->tx_pkt_cons
= sw_cons
;
307 txdata
->tx_bd_cons
= bd_cons
;
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
320 if (unlikely(netif_tx_queue_stopped(txq
))) {
321 /* Taking tx_lock() is needed to prevent re-enabling the queue
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
331 __netif_tx_lock(txq
, smp_processor_id());
333 if ((netif_tx_queue_stopped(txq
)) &&
334 (bp
->state
== BNX2X_STATE_OPEN
) &&
335 (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
))
336 netif_tx_wake_queue(txq
);
338 __netif_tx_unlock(txq
);
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
346 u16 last_max
= fp
->last_max_sge
;
348 if (SUB_S16(idx
, last_max
) > 0)
349 fp
->last_max_sge
= idx
;
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
354 struct eth_end_agg_rx_cqe
*cqe
)
356 struct bnx2x
*bp
= fp
->bp
;
357 u16 last_max
, last_elem
, first_elem
;
364 /* First mark all used pages */
365 for (i
= 0; i
< sge_len
; i
++)
366 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
367 RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[i
])));
369 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
370 sge_len
- 1, le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp
->sge_mask
));
374 bnx2x_update_last_max_sge(fp
,
375 le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
377 last_max
= RX_SGE(fp
->last_max_sge
);
378 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
379 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
381 /* If ring is not full */
382 if (last_elem
+ 1 != first_elem
)
385 /* Now update the prod */
386 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
387 if (likely(fp
->sge_mask
[i
]))
390 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
391 delta
+= BIT_VEC64_ELEM_SZ
;
395 fp
->rx_sge_prod
+= delta
;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp
);
400 DP(NETIF_MSG_RX_STATUS
,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp
->last_max_sge
, fp
->rx_sge_prod
);
405 /* Get Toeplitz hash value in the skb using the value from the
406 * CQE (calculated by HW).
408 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
409 const struct eth_fast_path_rx_cqe
*cqe
,
410 enum pkt_hash_types
*rxhash_type
)
412 /* Get Toeplitz hash from CQE */
413 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
414 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
)) {
415 enum eth_rss_hash_type htype
;
417 htype
= cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE
;
418 *rxhash_type
= ((htype
== TCP_IPV4_HASH_TYPE
) ||
419 (htype
== TCP_IPV6_HASH_TYPE
)) ?
420 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
;
422 return le32_to_cpu(cqe
->rss_hash_result
);
424 *rxhash_type
= PKT_HASH_TYPE_NONE
;
428 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
430 struct eth_fast_path_rx_cqe
*cqe
)
432 struct bnx2x
*bp
= fp
->bp
;
433 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
434 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
435 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
437 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
438 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
440 /* print error if current state != stop */
441 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
444 /* Try to map an empty data buffer from the aggregation info */
445 mapping
= dma_map_single(&bp
->pdev
->dev
,
446 first_buf
->data
+ NET_SKB_PAD
,
447 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
454 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
455 /* Move the BD from the consumer to the producer */
456 bnx2x_reuse_rx_data(fp
, cons
, prod
);
457 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
461 /* move empty data from pool to prod */
462 prod_rx_buf
->data
= first_buf
->data
;
463 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
464 /* point prod_bd to new data */
465 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
466 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf
= *cons_rx_buf
;
471 /* mark bin state as START */
472 tpa_info
->parsing_flags
=
473 le16_to_cpu(cqe
->pars_flags
.flags
);
474 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
475 tpa_info
->tpa_state
= BNX2X_TPA_START
;
476 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
477 tpa_info
->placement_offset
= cqe
->placement_offset
;
478 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
, &tpa_info
->rxhash_type
);
479 if (fp
->mode
== TPA_MODE_GRO
) {
480 u16 gro_size
= le16_to_cpu(cqe
->pkt_len_or_gro_seg_len
);
481 tpa_info
->full_page
= SGE_PAGES
/ gro_size
* gro_size
;
482 tpa_info
->gro_size
= gro_size
;
485 #ifdef BNX2X_STOP_ON_ERROR
486 fp
->tpa_queue_used
|= (1 << queue
);
487 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
492 /* Timestamp option length allowed for TPA aggregation:
494 * nop nop kind length echo val
496 #define TPA_TSTAMP_OPT_LEN 12
498 * bnx2x_set_gro_params - compute GRO values
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
504 * @pkt_len: length of all segments
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
508 * Compute number of aggregated segments, and gso_type.
510 static void bnx2x_set_gro_params(struct sk_buff
*skb
, u16 parsing_flags
,
511 u16 len_on_bd
, unsigned int pkt_len
,
512 u16 num_of_coalesced_segs
)
514 /* TPA aggregation won't have either IP options or TCP options
515 * other than timestamp or IPv6 extension headers.
517 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
519 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
520 PRS_FLAG_OVERETH_IPV6
) {
521 hdrs_len
+= sizeof(struct ipv6hdr
);
522 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
524 hdrs_len
+= sizeof(struct iphdr
);
525 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
531 * Otherwise FW would close the aggregation.
533 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
534 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
536 skb_shinfo(skb
)->gso_size
= len_on_bd
- hdrs_len
;
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
541 NAPI_GRO_CB(skb
)->count
= num_of_coalesced_segs
;
544 static int bnx2x_alloc_rx_sge(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
545 u16 index
, gfp_t gfp_mask
)
547 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
548 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
549 struct bnx2x_alloc_pool
*pool
= &fp
->page_pool
;
552 if (!pool
->page
|| (PAGE_SIZE
- pool
->offset
) < SGE_PAGE_SIZE
) {
554 /* put page reference used by the memory pool, since we
555 * won't be using this page as the mempool anymore.
558 put_page(pool
->page
);
560 pool
->page
= alloc_pages(gfp_mask
, PAGES_PER_SGE_SHIFT
);
561 if (unlikely(!pool
->page
)) {
562 BNX2X_ERR("Can't alloc sge\n");
569 mapping
= dma_map_page(&bp
->pdev
->dev
, pool
->page
,
570 pool
->offset
, SGE_PAGE_SIZE
, DMA_FROM_DEVICE
);
571 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
572 BNX2X_ERR("Can't map sge\n");
576 get_page(pool
->page
);
577 sw_buf
->page
= pool
->page
;
578 sw_buf
->offset
= pool
->offset
;
580 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
582 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
583 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
585 pool
->offset
+= SGE_PAGE_SIZE
;
590 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
591 struct bnx2x_agg_info
*tpa_info
,
594 struct eth_end_agg_rx_cqe
*cqe
,
597 struct sw_rx_page
*rx_pg
, old_rx_pg
;
598 u32 i
, frag_len
, frag_size
;
599 int err
, j
, frag_id
= 0;
600 u16 len_on_bd
= tpa_info
->len_on_bd
;
601 u16 full_page
= 0, gro_size
= 0;
603 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
605 if (fp
->mode
== TPA_MODE_GRO
) {
606 gro_size
= tpa_info
->gro_size
;
607 full_page
= tpa_info
->full_page
;
610 /* This is needed in order to enable forwarding support */
612 bnx2x_set_gro_params(skb
, tpa_info
->parsing_flags
, len_on_bd
,
613 le16_to_cpu(cqe
->pkt_len
),
614 le16_to_cpu(cqe
->num_of_coalesced_segs
));
616 #ifdef BNX2X_STOP_ON_ERROR
617 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
) * SGE_PAGES
) {
618 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
620 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
626 /* Run through the SGL and compose the fragmented skb */
627 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
628 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
630 /* FW gives the indices of the SGE as if the ring is an array
631 (meaning that "next" element will consume 2 indices) */
632 if (fp
->mode
== TPA_MODE_GRO
)
633 frag_len
= min_t(u32
, frag_size
, (u32
)full_page
);
635 frag_len
= min_t(u32
, frag_size
, (u32
)SGE_PAGES
);
637 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
640 /* If we fail to allocate a substitute page, we simply stop
641 where we are and drop the whole packet */
642 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
, GFP_ATOMIC
);
644 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
648 dma_unmap_page(&bp
->pdev
->dev
,
649 dma_unmap_addr(&old_rx_pg
, mapping
),
650 SGE_PAGE_SIZE
, DMA_FROM_DEVICE
);
651 /* Add one frag and update the appropriate fields in the skb */
652 if (fp
->mode
== TPA_MODE_LRO
)
653 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
,
654 old_rx_pg
.offset
, frag_len
);
658 for (rem
= frag_len
; rem
> 0; rem
-= gro_size
) {
659 int len
= rem
> gro_size
? gro_size
: rem
;
660 skb_fill_page_desc(skb
, frag_id
++,
662 old_rx_pg
.offset
+ offset
,
665 get_page(old_rx_pg
.page
);
670 skb
->data_len
+= frag_len
;
671 skb
->truesize
+= SGE_PAGES
;
672 skb
->len
+= frag_len
;
674 frag_size
-= frag_len
;
680 static void bnx2x_frag_free(const struct bnx2x_fastpath
*fp
, void *data
)
682 if (fp
->rx_frag_size
)
688 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath
*fp
, gfp_t gfp_mask
)
690 if (fp
->rx_frag_size
) {
691 /* GFP_KERNEL allocations are used only during initialization */
692 if (unlikely(gfpflags_allow_blocking(gfp_mask
)))
693 return (void *)__get_free_page(gfp_mask
);
695 return netdev_alloc_frag(fp
->rx_frag_size
);
698 return kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, gfp_mask
);
702 static void bnx2x_gro_ip_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
704 const struct iphdr
*iph
= ip_hdr(skb
);
707 skb_set_transport_header(skb
, sizeof(struct iphdr
));
710 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
711 iph
->saddr
, iph
->daddr
, 0);
714 static void bnx2x_gro_ipv6_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
716 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
719 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
722 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
723 &iph
->saddr
, &iph
->daddr
, 0);
726 static void bnx2x_gro_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
727 void (*gro_func
)(struct bnx2x
*, struct sk_buff
*))
729 skb_set_network_header(skb
, 0);
731 tcp_gro_complete(skb
);
735 static void bnx2x_gro_receive(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
739 if (skb_shinfo(skb
)->gso_size
) {
740 switch (be16_to_cpu(skb
->protocol
)) {
742 bnx2x_gro_csum(bp
, skb
, bnx2x_gro_ip_csum
);
745 bnx2x_gro_csum(bp
, skb
, bnx2x_gro_ipv6_csum
);
748 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
749 be16_to_cpu(skb
->protocol
));
753 skb_record_rx_queue(skb
, fp
->rx_queue
);
754 napi_gro_receive(&fp
->napi
, skb
);
757 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
758 struct bnx2x_agg_info
*tpa_info
,
760 struct eth_end_agg_rx_cqe
*cqe
,
763 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
764 u8 pad
= tpa_info
->placement_offset
;
765 u16 len
= tpa_info
->len_on_bd
;
766 struct sk_buff
*skb
= NULL
;
767 u8
*new_data
, *data
= rx_buf
->data
;
768 u8 old_tpa_state
= tpa_info
->tpa_state
;
770 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
772 /* If we there was an error during the handling of the TPA_START -
773 * drop this aggregation.
775 if (old_tpa_state
== BNX2X_TPA_ERROR
)
778 /* Try to allocate the new data */
779 new_data
= bnx2x_frag_alloc(fp
, GFP_ATOMIC
);
780 /* Unmap skb in the pool anyway, as we are going to change
781 pool entry status to BNX2X_TPA_STOP even if new skb allocation
783 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
784 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
785 if (likely(new_data
))
786 skb
= build_skb(data
, fp
->rx_frag_size
);
789 #ifdef BNX2X_STOP_ON_ERROR
790 if (pad
+ len
> fp
->rx_buf_size
) {
791 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
792 pad
, len
, fp
->rx_buf_size
);
798 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
800 skb_set_hash(skb
, tpa_info
->rxhash
, tpa_info
->rxhash_type
);
802 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
803 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
805 if (!bnx2x_fill_frag_skb(bp
, fp
, tpa_info
, pages
,
806 skb
, cqe
, cqe_idx
)) {
807 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
808 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tpa_info
->vlan_tag
);
809 bnx2x_gro_receive(bp
, fp
, skb
);
811 DP(NETIF_MSG_RX_STATUS
,
812 "Failed to allocate new pages - dropping packet!\n");
813 dev_kfree_skb_any(skb
);
816 /* put new data in bin */
817 rx_buf
->data
= new_data
;
822 bnx2x_frag_free(fp
, new_data
);
824 /* drop the packet and keep the buffer in the bin */
825 DP(NETIF_MSG_RX_STATUS
,
826 "Failed to allocate or map a new skb - dropping packet!\n");
827 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
++;
830 static int bnx2x_alloc_rx_data(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
831 u16 index
, gfp_t gfp_mask
)
834 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
835 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
838 data
= bnx2x_frag_alloc(fp
, gfp_mask
);
839 if (unlikely(data
== NULL
))
842 mapping
= dma_map_single(&bp
->pdev
->dev
, data
+ NET_SKB_PAD
,
845 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
846 bnx2x_frag_free(fp
, data
);
847 BNX2X_ERR("Can't map rx data\n");
852 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
854 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
855 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
861 void bnx2x_csum_validate(struct sk_buff
*skb
, union eth_rx_cqe
*cqe
,
862 struct bnx2x_fastpath
*fp
,
863 struct bnx2x_eth_q_stats
*qstats
)
865 /* Do nothing if no L4 csum validation was done.
866 * We do not check whether IP csum was validated. For IPv4 we assume
867 * that if the card got as far as validating the L4 csum, it also
868 * validated the IP csum. IPv6 has no IP csum.
870 if (cqe
->fast_path_cqe
.status_flags
&
871 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
)
874 /* If L4 validation was done, check if an error was found. */
876 if (cqe
->fast_path_cqe
.type_error_flags
&
877 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
|
878 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
))
879 qstats
->hw_csum_err
++;
881 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
884 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
886 struct bnx2x
*bp
= fp
->bp
;
887 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
888 u16 sw_comp_cons
, sw_comp_prod
;
890 union eth_rx_cqe
*cqe
;
891 struct eth_fast_path_rx_cqe
*cqe_fp
;
893 #ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp
->panic
))
900 bd_cons
= fp
->rx_bd_cons
;
901 bd_prod
= fp
->rx_bd_prod
;
902 bd_prod_fw
= bd_prod
;
903 sw_comp_cons
= fp
->rx_comp_cons
;
904 sw_comp_prod
= fp
->rx_comp_prod
;
906 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
907 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
908 cqe_fp
= &cqe
->fast_path_cqe
;
910 DP(NETIF_MSG_RX_STATUS
,
911 "queue[%d]: sw_comp_cons %u\n", fp
->index
, sw_comp_cons
);
913 while (BNX2X_IS_CQE_COMPLETED(cqe_fp
)) {
914 struct sw_rx_bd
*rx_buf
= NULL
;
917 enum eth_rx_cqe_type cqe_fp_type
;
921 enum pkt_hash_types rxhash_type
;
923 #ifdef BNX2X_STOP_ON_ERROR
924 if (unlikely(bp
->panic
))
928 bd_prod
= RX_BD(bd_prod
);
929 bd_cons
= RX_BD(bd_cons
);
931 /* A rmb() is required to ensure that the CQE is not read
932 * before it is written by the adapter DMA. PCI ordering
933 * rules will make sure the other fields are written before
934 * the marker at the end of struct eth_fast_path_rx_cqe
935 * but without rmb() a weakly ordered processor can process
936 * stale data. Without the barrier TPA state-machine might
937 * enter inconsistent state and kernel stack might be
938 * provided with incorrect packet description - these lead
939 * to various kernel crashed.
943 cqe_fp_flags
= cqe_fp
->type_error_flags
;
944 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
946 DP(NETIF_MSG_RX_STATUS
,
947 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
948 CQE_TYPE(cqe_fp_flags
),
949 cqe_fp_flags
, cqe_fp
->status_flags
,
950 le32_to_cpu(cqe_fp
->rss_hash_result
),
951 le16_to_cpu(cqe_fp
->vlan_tag
),
952 le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
));
954 /* is this a slowpath msg? */
955 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
956 bnx2x_sp_event(fp
, cqe
);
960 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
963 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
964 struct bnx2x_agg_info
*tpa_info
;
965 u16 frag_size
, pages
;
966 #ifdef BNX2X_STOP_ON_ERROR
968 if (fp
->mode
== TPA_MODE_DISABLED
&&
969 (CQE_TYPE_START(cqe_fp_type
) ||
970 CQE_TYPE_STOP(cqe_fp_type
)))
971 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
972 CQE_TYPE(cqe_fp_type
));
975 if (CQE_TYPE_START(cqe_fp_type
)) {
976 u16 queue
= cqe_fp
->queue_index
;
977 DP(NETIF_MSG_RX_STATUS
,
978 "calling tpa_start on queue %d\n",
981 bnx2x_tpa_start(fp
, queue
,
987 queue
= cqe
->end_agg_cqe
.queue_index
;
988 tpa_info
= &fp
->tpa_info
[queue
];
989 DP(NETIF_MSG_RX_STATUS
,
990 "calling tpa_stop on queue %d\n",
993 frag_size
= le16_to_cpu(cqe
->end_agg_cqe
.pkt_len
) -
996 if (fp
->mode
== TPA_MODE_GRO
)
997 pages
= (frag_size
+ tpa_info
->full_page
- 1) /
1000 pages
= SGE_PAGE_ALIGN(frag_size
) >>
1003 bnx2x_tpa_stop(bp
, fp
, tpa_info
, pages
,
1004 &cqe
->end_agg_cqe
, comp_ring_cons
);
1005 #ifdef BNX2X_STOP_ON_ERROR
1010 bnx2x_update_sge_prod(fp
, pages
, &cqe
->end_agg_cqe
);
1014 len
= le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
);
1015 pad
= cqe_fp
->placement_offset
;
1016 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
1017 dma_unmap_addr(rx_buf
, mapping
),
1018 pad
+ RX_COPY_THRESH
,
1021 prefetch(data
+ pad
); /* speedup eth_type_trans() */
1022 /* is this an error packet? */
1023 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1024 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1025 "ERROR flags %x rx packet %u\n",
1026 cqe_fp_flags
, sw_comp_cons
);
1027 bnx2x_fp_qstats(bp
, fp
)->rx_err_discard_pkt
++;
1031 /* Since we don't have a jumbo ring
1032 * copy small packets if mtu > 1500
1034 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1035 (len
<= RX_COPY_THRESH
)) {
1036 skb
= napi_alloc_skb(&fp
->napi
, len
);
1038 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1039 "ERROR packet dropped because of alloc failure\n");
1040 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
1043 memcpy(skb
->data
, data
+ pad
, len
);
1044 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
1046 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
,
1047 GFP_ATOMIC
) == 0)) {
1048 dma_unmap_single(&bp
->pdev
->dev
,
1049 dma_unmap_addr(rx_buf
, mapping
),
1052 skb
= build_skb(data
, fp
->rx_frag_size
);
1053 if (unlikely(!skb
)) {
1054 bnx2x_frag_free(fp
, data
);
1055 bnx2x_fp_qstats(bp
, fp
)->
1056 rx_skb_alloc_failed
++;
1059 skb_reserve(skb
, pad
);
1061 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1062 "ERROR packet dropped because of alloc failure\n");
1063 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
1065 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
1071 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1073 /* Set Toeplitz hash for a none-LRO skb */
1074 rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
, &rxhash_type
);
1075 skb_set_hash(skb
, rxhash
, rxhash_type
);
1077 skb_checksum_none_assert(skb
);
1079 if (bp
->dev
->features
& NETIF_F_RXCSUM
)
1080 bnx2x_csum_validate(skb
, cqe
, fp
,
1081 bnx2x_fp_qstats(bp
, fp
));
1083 skb_record_rx_queue(skb
, fp
->rx_queue
);
1085 /* Check if this packet was timestamped */
1086 if (unlikely(cqe
->fast_path_cqe
.type_error_flags
&
1087 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT
)))
1088 bnx2x_set_rx_ts(bp
, skb
);
1090 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
1092 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1093 le16_to_cpu(cqe_fp
->vlan_tag
));
1095 napi_gro_receive(&fp
->napi
, skb
);
1097 rx_buf
->data
= NULL
;
1099 bd_cons
= NEXT_RX_IDX(bd_cons
);
1100 bd_prod
= NEXT_RX_IDX(bd_prod
);
1101 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1104 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1105 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1107 /* mark CQE as free */
1108 BNX2X_SEED_CQE(cqe_fp
);
1110 if (rx_pkt
== budget
)
1113 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1114 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1115 cqe_fp
= &cqe
->fast_path_cqe
;
1118 fp
->rx_bd_cons
= bd_cons
;
1119 fp
->rx_bd_prod
= bd_prod_fw
;
1120 fp
->rx_comp_cons
= sw_comp_cons
;
1121 fp
->rx_comp_prod
= sw_comp_prod
;
1123 /* Update producers */
1124 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1127 fp
->rx_pkt
+= rx_pkt
;
1133 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1135 struct bnx2x_fastpath
*fp
= fp_cookie
;
1136 struct bnx2x
*bp
= fp
->bp
;
1140 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1141 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
1143 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1145 #ifdef BNX2X_STOP_ON_ERROR
1146 if (unlikely(bp
->panic
))
1150 /* Handle Rx and Tx according to MSI-X vector */
1151 for_each_cos_in_tx_queue(fp
, cos
)
1152 prefetch(fp
->txdata_ptr
[cos
]->tx_cons_sb
);
1154 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1155 napi_schedule_irqoff(&bnx2x_fp(bp
, fp
->index
, napi
));
1160 /* HW Lock for shared dual port PHYs */
1161 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1163 mutex_lock(&bp
->port
.phy_mutex
);
1165 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1168 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1170 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1172 mutex_unlock(&bp
->port
.phy_mutex
);
1175 /* calculates MF speed according to current linespeed and MF configuration */
1176 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
1178 u16 line_speed
= bp
->link_vars
.line_speed
;
1180 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
1181 bp
->mf_config
[BP_VN(bp
)]);
1183 /* Calculate the current MAX line speed limit for the MF
1186 if (IS_MF_PERCENT_BW(bp
))
1187 line_speed
= (line_speed
* maxCfg
) / 100;
1188 else { /* SD mode */
1189 u16 vn_max_rate
= maxCfg
* 100;
1191 if (vn_max_rate
< line_speed
)
1192 line_speed
= vn_max_rate
;
1200 * bnx2x_fill_report_data - fill link report data to report
1202 * @bp: driver handle
1203 * @data: link state to update
1205 * It uses a none-atomic bit operations because is called under the mutex.
1207 static void bnx2x_fill_report_data(struct bnx2x
*bp
,
1208 struct bnx2x_link_report_data
*data
)
1210 memset(data
, 0, sizeof(*data
));
1213 /* Fill the report data: effective line speed */
1214 data
->line_speed
= bnx2x_get_mf_speed(bp
);
1217 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
1218 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1219 &data
->link_report_flags
);
1221 if (!BNX2X_NUM_ETH_QUEUES(bp
))
1222 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1223 &data
->link_report_flags
);
1226 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1227 __set_bit(BNX2X_LINK_REPORT_FD
,
1228 &data
->link_report_flags
);
1230 /* Rx Flow Control is ON */
1231 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
1232 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1233 &data
->link_report_flags
);
1235 /* Tx Flow Control is ON */
1236 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1237 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1238 &data
->link_report_flags
);
1240 *data
= bp
->vf_link_vars
;
1245 * bnx2x_link_report - report link status to OS.
1247 * @bp: driver handle
1249 * Calls the __bnx2x_link_report() under the same locking scheme
1250 * as a link/PHY state managing code to ensure a consistent link
1254 void bnx2x_link_report(struct bnx2x
*bp
)
1256 bnx2x_acquire_phy_lock(bp
);
1257 __bnx2x_link_report(bp
);
1258 bnx2x_release_phy_lock(bp
);
1262 * __bnx2x_link_report - report link status to OS.
1264 * @bp: driver handle
1266 * None atomic implementation.
1267 * Should be called under the phy_lock.
1269 void __bnx2x_link_report(struct bnx2x
*bp
)
1271 struct bnx2x_link_report_data cur_data
;
1274 if (IS_PF(bp
) && !CHIP_IS_E1(bp
))
1275 bnx2x_read_mf_cfg(bp
);
1277 /* Read the current link report info */
1278 bnx2x_fill_report_data(bp
, &cur_data
);
1280 /* Don't report link down or exactly the same link status twice */
1281 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
1282 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1283 &bp
->last_reported_link
.link_report_flags
) &&
1284 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1285 &cur_data
.link_report_flags
)))
1290 /* We are going to report a new link parameters now -
1291 * remember the current data for the next time.
1293 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
1295 /* propagate status to VFs */
1297 bnx2x_iov_link_update(bp
);
1299 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1300 &cur_data
.link_report_flags
)) {
1301 netif_carrier_off(bp
->dev
);
1302 netdev_err(bp
->dev
, "NIC Link is Down\n");
1308 netif_carrier_on(bp
->dev
);
1310 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
1311 &cur_data
.link_report_flags
))
1316 /* Handle the FC at the end so that only these flags would be
1317 * possibly set. This way we may easily check if there is no FC
1320 if (cur_data
.link_report_flags
) {
1321 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1322 &cur_data
.link_report_flags
)) {
1323 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1324 &cur_data
.link_report_flags
))
1325 flow
= "ON - receive & transmit";
1327 flow
= "ON - receive";
1329 flow
= "ON - transmit";
1334 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1335 cur_data
.line_speed
, duplex
, flow
);
1339 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
1343 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1344 struct eth_rx_sge
*sge
;
1346 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
1348 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
1349 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1352 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
1353 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1357 static void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
1358 struct bnx2x_fastpath
*fp
, int last
)
1362 for (i
= 0; i
< last
; i
++) {
1363 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[i
];
1364 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
1365 u8
*data
= first_buf
->data
;
1368 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
1371 if (tpa_info
->tpa_state
== BNX2X_TPA_START
)
1372 dma_unmap_single(&bp
->pdev
->dev
,
1373 dma_unmap_addr(first_buf
, mapping
),
1374 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1375 bnx2x_frag_free(fp
, data
);
1376 first_buf
->data
= NULL
;
1380 void bnx2x_init_rx_rings_cnic(struct bnx2x
*bp
)
1384 for_each_rx_queue_cnic(bp
, j
) {
1385 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1389 /* Activate BD ring */
1391 * this will generate an interrupt (to the TSTORM)
1392 * must only be done after chip is initialized
1394 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1399 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
1401 int func
= BP_FUNC(bp
);
1405 /* Allocate TPA resources */
1406 for_each_eth_queue(bp
, j
) {
1407 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1410 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1412 if (fp
->mode
!= TPA_MODE_DISABLED
) {
1413 /* Fill the per-aggregation pool */
1414 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1415 struct bnx2x_agg_info
*tpa_info
=
1417 struct sw_rx_bd
*first_buf
=
1418 &tpa_info
->first_buf
;
1421 bnx2x_frag_alloc(fp
, GFP_KERNEL
);
1422 if (!first_buf
->data
) {
1423 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1425 bnx2x_free_tpa_pool(bp
, fp
, i
);
1426 fp
->mode
= TPA_MODE_DISABLED
;
1429 dma_unmap_addr_set(first_buf
, mapping
, 0);
1430 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1433 /* "next page" elements initialization */
1434 bnx2x_set_next_page_sgl(fp
);
1436 /* set SGEs bit mask */
1437 bnx2x_init_sge_ring_bit_mask(fp
);
1439 /* Allocate SGEs and initialize the ring elements */
1440 for (i
= 0, ring_prod
= 0;
1441 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1443 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
,
1445 BNX2X_ERR("was only able to allocate %d rx sges\n",
1447 BNX2X_ERR("disabling TPA for queue[%d]\n",
1449 /* Cleanup already allocated elements */
1450 bnx2x_free_rx_sge_range(bp
, fp
,
1452 bnx2x_free_tpa_pool(bp
, fp
,
1454 fp
->mode
= TPA_MODE_DISABLED
;
1458 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1461 fp
->rx_sge_prod
= ring_prod
;
1465 for_each_eth_queue(bp
, j
) {
1466 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1470 /* Activate BD ring */
1472 * this will generate an interrupt (to the TSTORM)
1473 * must only be done after chip is initialized
1475 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1481 if (CHIP_IS_E1(bp
)) {
1482 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1483 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1484 U64_LO(fp
->rx_comp_mapping
));
1485 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1486 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1487 U64_HI(fp
->rx_comp_mapping
));
1492 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath
*fp
)
1495 struct bnx2x
*bp
= fp
->bp
;
1497 for_each_cos_in_tx_queue(fp
, cos
) {
1498 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
1499 unsigned pkts_compl
= 0, bytes_compl
= 0;
1501 u16 sw_prod
= txdata
->tx_pkt_prod
;
1502 u16 sw_cons
= txdata
->tx_pkt_cons
;
1504 while (sw_cons
!= sw_prod
) {
1505 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
),
1506 &pkts_compl
, &bytes_compl
);
1510 netdev_tx_reset_queue(
1511 netdev_get_tx_queue(bp
->dev
,
1512 txdata
->txq_index
));
1516 static void bnx2x_free_tx_skbs_cnic(struct bnx2x
*bp
)
1520 for_each_tx_queue_cnic(bp
, i
) {
1521 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1525 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1529 for_each_eth_queue(bp
, i
) {
1530 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1534 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1536 struct bnx2x
*bp
= fp
->bp
;
1539 /* ring wasn't allocated */
1540 if (fp
->rx_buf_ring
== NULL
)
1543 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1544 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1545 u8
*data
= rx_buf
->data
;
1549 dma_unmap_single(&bp
->pdev
->dev
,
1550 dma_unmap_addr(rx_buf
, mapping
),
1551 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1553 rx_buf
->data
= NULL
;
1554 bnx2x_frag_free(fp
, data
);
1558 static void bnx2x_free_rx_skbs_cnic(struct bnx2x
*bp
)
1562 for_each_rx_queue_cnic(bp
, j
) {
1563 bnx2x_free_rx_bds(&bp
->fp
[j
]);
1567 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1571 for_each_eth_queue(bp
, j
) {
1572 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1574 bnx2x_free_rx_bds(fp
);
1576 if (fp
->mode
!= TPA_MODE_DISABLED
)
1577 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1581 static void bnx2x_free_skbs_cnic(struct bnx2x
*bp
)
1583 bnx2x_free_tx_skbs_cnic(bp
);
1584 bnx2x_free_rx_skbs_cnic(bp
);
1587 void bnx2x_free_skbs(struct bnx2x
*bp
)
1589 bnx2x_free_tx_skbs(bp
);
1590 bnx2x_free_rx_skbs(bp
);
1593 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1595 /* load old values */
1596 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1598 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1599 /* leave all but MAX value */
1600 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1602 /* set new MAX value */
1603 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1604 & FUNC_MF_CFG_MAX_BW_MASK
;
1606 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1611 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1613 * @bp: driver handle
1614 * @nvecs: number of vectors to be released
1616 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1620 if (nvecs
== offset
)
1623 /* VFs don't have a default SB */
1625 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1626 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1627 bp
->msix_table
[offset
].vector
);
1631 if (CNIC_SUPPORT(bp
)) {
1632 if (nvecs
== offset
)
1637 for_each_eth_queue(bp
, i
) {
1638 if (nvecs
== offset
)
1640 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq\n",
1641 i
, bp
->msix_table
[offset
].vector
);
1643 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1647 void bnx2x_free_irq(struct bnx2x
*bp
)
1649 if (bp
->flags
& USING_MSIX_FLAG
&&
1650 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1651 int nvecs
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_SUPPORT(bp
);
1653 /* vfs don't have a default status block */
1657 bnx2x_free_msix_irqs(bp
, nvecs
);
1659 free_irq(bp
->dev
->irq
, bp
->dev
);
1663 int bnx2x_enable_msix(struct bnx2x
*bp
)
1665 int msix_vec
= 0, i
, rc
;
1667 /* VFs don't have a default status block */
1669 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1670 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1671 bp
->msix_table
[0].entry
);
1675 /* Cnic requires an msix vector for itself */
1676 if (CNIC_SUPPORT(bp
)) {
1677 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1678 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1679 msix_vec
, bp
->msix_table
[msix_vec
].entry
);
1683 /* We need separate vectors for ETH queues only (not FCoE) */
1684 for_each_eth_queue(bp
, i
) {
1685 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1686 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1687 msix_vec
, msix_vec
, i
);
1691 DP(BNX2X_MSG_SP
, "about to request enable msix with %d vectors\n",
1694 rc
= pci_enable_msix_range(bp
->pdev
, &bp
->msix_table
[0],
1695 BNX2X_MIN_MSIX_VEC_CNT(bp
), msix_vec
);
1697 * reconfigure number of tx/rx queues according to available
1700 if (rc
== -ENOSPC
) {
1701 /* Get by with single vector */
1702 rc
= pci_enable_msix_range(bp
->pdev
, &bp
->msix_table
[0], 1, 1);
1704 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1709 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1710 bp
->flags
|= USING_SINGLE_MSIX_FLAG
;
1712 BNX2X_DEV_INFO("set number of queues to 1\n");
1713 bp
->num_ethernet_queues
= 1;
1714 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1715 } else if (rc
< 0) {
1716 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1718 } else if (rc
< msix_vec
) {
1719 /* how less vectors we will have? */
1720 int diff
= msix_vec
- rc
;
1722 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc
);
1725 * decrease number of queues by number of unallocated entries
1727 bp
->num_ethernet_queues
-= diff
;
1728 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1730 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1734 bp
->flags
|= USING_MSIX_FLAG
;
1739 /* fall to INTx if not enough memory */
1741 bp
->flags
|= DISABLE_MSI_FLAG
;
1746 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1748 int i
, rc
, offset
= 0;
1750 /* no default status block for vf */
1752 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1753 bnx2x_msix_sp_int
, 0,
1754 bp
->dev
->name
, bp
->dev
);
1756 BNX2X_ERR("request sp irq failed\n");
1761 if (CNIC_SUPPORT(bp
))
1764 for_each_eth_queue(bp
, i
) {
1765 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1766 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1769 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1770 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1772 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1773 bp
->msix_table
[offset
].vector
, rc
);
1774 bnx2x_free_msix_irqs(bp
, offset
);
1781 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1783 offset
= 1 + CNIC_SUPPORT(bp
);
1784 netdev_info(bp
->dev
,
1785 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1786 bp
->msix_table
[0].vector
,
1787 0, bp
->msix_table
[offset
].vector
,
1788 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1790 offset
= CNIC_SUPPORT(bp
);
1791 netdev_info(bp
->dev
,
1792 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1793 0, bp
->msix_table
[offset
].vector
,
1794 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1799 int bnx2x_enable_msi(struct bnx2x
*bp
)
1803 rc
= pci_enable_msi(bp
->pdev
);
1805 BNX2X_DEV_INFO("MSI is not attainable\n");
1808 bp
->flags
|= USING_MSI_FLAG
;
1813 static int bnx2x_req_irq(struct bnx2x
*bp
)
1815 unsigned long flags
;
1818 if (bp
->flags
& (USING_MSI_FLAG
| USING_MSIX_FLAG
))
1821 flags
= IRQF_SHARED
;
1823 if (bp
->flags
& USING_MSIX_FLAG
)
1824 irq
= bp
->msix_table
[0].vector
;
1826 irq
= bp
->pdev
->irq
;
1828 return request_irq(irq
, bnx2x_interrupt
, flags
, bp
->dev
->name
, bp
->dev
);
1831 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
1834 if (bp
->flags
& USING_MSIX_FLAG
&&
1835 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1836 rc
= bnx2x_req_msix_irqs(bp
);
1840 rc
= bnx2x_req_irq(bp
);
1842 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1845 if (bp
->flags
& USING_MSI_FLAG
) {
1846 bp
->dev
->irq
= bp
->pdev
->irq
;
1847 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1850 if (bp
->flags
& USING_MSIX_FLAG
) {
1851 bp
->dev
->irq
= bp
->msix_table
[0].vector
;
1852 netdev_info(bp
->dev
, "using MSIX IRQ %d\n",
1860 static void bnx2x_napi_enable_cnic(struct bnx2x
*bp
)
1864 for_each_rx_queue_cnic(bp
, i
) {
1865 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1869 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1873 for_each_eth_queue(bp
, i
) {
1874 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1878 static void bnx2x_napi_disable_cnic(struct bnx2x
*bp
)
1882 for_each_rx_queue_cnic(bp
, i
) {
1883 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1887 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1891 for_each_eth_queue(bp
, i
) {
1892 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1896 void bnx2x_netif_start(struct bnx2x
*bp
)
1898 if (netif_running(bp
->dev
)) {
1899 bnx2x_napi_enable(bp
);
1900 if (CNIC_LOADED(bp
))
1901 bnx2x_napi_enable_cnic(bp
);
1902 bnx2x_int_enable(bp
);
1903 if (bp
->state
== BNX2X_STATE_OPEN
)
1904 netif_tx_wake_all_queues(bp
->dev
);
1908 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1910 bnx2x_int_disable_sync(bp
, disable_hw
);
1911 bnx2x_napi_disable(bp
);
1912 if (CNIC_LOADED(bp
))
1913 bnx2x_napi_disable_cnic(bp
);
1916 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1917 void *accel_priv
, select_queue_fallback_t fallback
)
1919 struct bnx2x
*bp
= netdev_priv(dev
);
1921 if (CNIC_LOADED(bp
) && !NO_FCOE(bp
)) {
1922 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1923 u16 ether_type
= ntohs(hdr
->h_proto
);
1925 /* Skip VLAN tag if present */
1926 if (ether_type
== ETH_P_8021Q
) {
1927 struct vlan_ethhdr
*vhdr
=
1928 (struct vlan_ethhdr
*)skb
->data
;
1930 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1933 /* If ethertype is FCoE or FIP - use FCoE ring */
1934 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1935 return bnx2x_fcoe_tx(bp
, txq_index
);
1938 /* select a non-FCoE queue */
1939 return fallback(dev
, skb
) % BNX2X_NUM_ETH_QUEUES(bp
);
1942 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1945 bp
->num_ethernet_queues
= bnx2x_calc_num_queues(bp
);
1947 /* override in STORAGE SD modes */
1948 if (IS_MF_STORAGE_ONLY(bp
))
1949 bp
->num_ethernet_queues
= 1;
1951 /* Add special queues */
1952 bp
->num_cnic_queues
= CNIC_SUPPORT(bp
); /* For FCOE */
1953 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1955 BNX2X_DEV_INFO("set number of queues to %d\n", bp
->num_queues
);
1959 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1961 * @bp: Driver handle
1963 * We currently support for at most 16 Tx queues for each CoS thus we will
1964 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1967 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1968 * index after all ETH L2 indices.
1970 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1971 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1972 * 16..31,...) with indices that are not coupled with any real Tx queue.
1974 * The proper configuration of skb->queue_mapping is handled by
1975 * bnx2x_select_queue() and __skb_tx_hash().
1977 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1978 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1980 static int bnx2x_set_real_num_queues(struct bnx2x
*bp
, int include_cnic
)
1984 tx
= BNX2X_NUM_ETH_QUEUES(bp
) * bp
->max_cos
;
1985 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1987 /* account for fcoe queue */
1988 if (include_cnic
&& !NO_FCOE(bp
)) {
1993 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1995 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1998 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
2000 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
2004 DP(NETIF_MSG_IFUP
, "Setting real num queues to (tx, rx) (%d, %d)\n",
2010 static void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
2014 for_each_queue(bp
, i
) {
2015 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2018 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2021 * Although there are no IP frames expected to arrive to
2022 * this ring we still want to add an
2023 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2026 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
2029 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
2030 IP_HEADER_ALIGNMENT_PADDING
+
2033 BNX2X_FW_RX_ALIGN_END
;
2034 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2035 if (fp
->rx_buf_size
+ NET_SKB_PAD
<= PAGE_SIZE
)
2036 fp
->rx_frag_size
= fp
->rx_buf_size
+ NET_SKB_PAD
;
2038 fp
->rx_frag_size
= 0;
2042 static int bnx2x_init_rss(struct bnx2x
*bp
)
2045 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
2047 /* Prepare the initial contents for the indirection table if RSS is
2050 for (i
= 0; i
< sizeof(bp
->rss_conf_obj
.ind_table
); i
++)
2051 bp
->rss_conf_obj
.ind_table
[i
] =
2053 ethtool_rxfh_indir_default(i
, num_eth_queues
);
2056 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2057 * per-port, so if explicit configuration is needed , do it only
2060 * For 57712 and newer on the other hand it's a per-function
2063 return bnx2x_config_rss_eth(bp
, bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
2066 int bnx2x_rss(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
2067 bool config_hash
, bool enable
)
2069 struct bnx2x_config_rss_params params
= {NULL
};
2071 /* Although RSS is meaningless when there is a single HW queue we
2072 * still need it enabled in order to have HW Rx hash generated.
2074 * if (!is_eth_multi(bp))
2075 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2078 params
.rss_obj
= rss_obj
;
2080 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
2083 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
2085 /* RSS configuration */
2086 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
2087 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
2088 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
2089 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
2090 if (rss_obj
->udp_rss_v4
)
2091 __set_bit(BNX2X_RSS_IPV4_UDP
, ¶ms
.rss_flags
);
2092 if (rss_obj
->udp_rss_v6
)
2093 __set_bit(BNX2X_RSS_IPV6_UDP
, ¶ms
.rss_flags
);
2095 if (!CHIP_IS_E1x(bp
)) {
2096 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2097 __set_bit(BNX2X_RSS_IPV4_VXLAN
, ¶ms
.rss_flags
);
2098 __set_bit(BNX2X_RSS_IPV6_VXLAN
, ¶ms
.rss_flags
);
2100 /* valid only for TUNN_MODE_GRE tunnel mode */
2101 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS
, ¶ms
.rss_flags
);
2104 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
2108 params
.rss_result_mask
= MULTI_MASK
;
2110 memcpy(params
.ind_table
, rss_obj
->ind_table
, sizeof(params
.ind_table
));
2114 netdev_rss_key_fill(params
.rss_key
, T_ETH_RSS_KEY
* 4);
2115 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
2119 return bnx2x_config_rss(bp
, ¶ms
);
2121 return bnx2x_vfpf_config_rss(bp
, ¶ms
);
2124 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
2126 struct bnx2x_func_state_params func_params
= {NULL
};
2128 /* Prepare parameters for function state transitions */
2129 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
2131 func_params
.f_obj
= &bp
->func_obj
;
2132 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
2134 func_params
.params
.hw_init
.load_phase
= load_code
;
2136 return bnx2x_func_state_change(bp
, &func_params
);
2140 * Cleans the object that have internal lists without sending
2141 * ramrods. Should be run when interrupts are disabled.
2143 void bnx2x_squeeze_objects(struct bnx2x
*bp
)
2146 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
2147 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
2148 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->sp_objs
->mac_obj
;
2150 /***************** Cleanup MACs' object first *************************/
2152 /* Wait for completion of requested */
2153 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2154 /* Perform a dry cleanup */
2155 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
2157 /* Clean ETH primary MAC */
2158 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
2159 rc
= mac_obj
->delete_all(bp
, &bp
->sp_objs
->mac_obj
, &vlan_mac_flags
,
2162 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
2164 /* Cleanup UC list */
2166 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
2167 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
2170 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
2172 /***************** Now clean mcast object *****************************/
2173 rparam
.mcast_obj
= &bp
->mcast_obj
;
2174 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
2176 /* Add a DEL command... - Since we're doing a driver cleanup only,
2177 * we take a lock surrounding both the initial send and the CONTs,
2178 * as we don't want a true completion to disrupt us in the middle.
2180 netif_addr_lock_bh(bp
->dev
);
2181 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
2183 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2186 /* ...and wait until all pending commands are cleared */
2187 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2190 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2192 netif_addr_unlock_bh(bp
->dev
);
2196 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2198 netif_addr_unlock_bh(bp
->dev
);
2201 #ifndef BNX2X_STOP_ON_ERROR
2202 #define LOAD_ERROR_EXIT(bp, label) \
2204 (bp)->state = BNX2X_STATE_ERROR; \
2208 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2210 bp->cnic_loaded = false; \
2213 #else /*BNX2X_STOP_ON_ERROR*/
2214 #define LOAD_ERROR_EXIT(bp, label) \
2216 (bp)->state = BNX2X_STATE_ERROR; \
2220 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2222 bp->cnic_loaded = false; \
2226 #endif /*BNX2X_STOP_ON_ERROR*/
2228 static void bnx2x_free_fw_stats_mem(struct bnx2x
*bp
)
2230 BNX2X_PCI_FREE(bp
->fw_stats
, bp
->fw_stats_mapping
,
2231 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2235 static int bnx2x_alloc_fw_stats_mem(struct bnx2x
*bp
)
2237 int num_groups
, vf_headroom
= 0;
2238 int is_fcoe_stats
= NO_FCOE(bp
) ? 0 : 1;
2240 /* number of queues for statistics is number of eth queues + FCoE */
2241 u8 num_queue_stats
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe_stats
;
2243 /* Total number of FW statistics requests =
2244 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2245 * and fcoe l2 queue) stats + num of queues (which includes another 1
2246 * for fcoe l2 queue if applicable)
2248 bp
->fw_stats_num
= 2 + is_fcoe_stats
+ num_queue_stats
;
2250 /* vf stats appear in the request list, but their data is allocated by
2251 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2252 * it is used to determine where to place the vf stats queries in the
2256 vf_headroom
= bnx2x_vf_headroom(bp
);
2258 /* Request is built from stats_query_header and an array of
2259 * stats_query_cmd_group each of which contains
2260 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2261 * configured in the stats_query_header.
2264 (((bp
->fw_stats_num
+ vf_headroom
) / STATS_QUERY_CMD_COUNT
) +
2265 (((bp
->fw_stats_num
+ vf_headroom
) % STATS_QUERY_CMD_COUNT
) ?
2268 DP(BNX2X_MSG_SP
, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2269 bp
->fw_stats_num
, vf_headroom
, num_groups
);
2270 bp
->fw_stats_req_sz
= sizeof(struct stats_query_header
) +
2271 num_groups
* sizeof(struct stats_query_cmd_group
);
2273 /* Data for statistics requests + stats_counter
2274 * stats_counter holds per-STORM counters that are incremented
2275 * when STORM has finished with the current request.
2276 * memory for FCoE offloaded statistics are counted anyway,
2277 * even if they will not be sent.
2278 * VF stats are not accounted for here as the data of VF stats is stored
2279 * in memory allocated by the VF, not here.
2281 bp
->fw_stats_data_sz
= sizeof(struct per_port_stats
) +
2282 sizeof(struct per_pf_stats
) +
2283 sizeof(struct fcoe_statistics_params
) +
2284 sizeof(struct per_queue_stats
) * num_queue_stats
+
2285 sizeof(struct stats_counter
);
2287 bp
->fw_stats
= BNX2X_PCI_ALLOC(&bp
->fw_stats_mapping
,
2288 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2293 bp
->fw_stats_req
= (struct bnx2x_fw_stats_req
*)bp
->fw_stats
;
2294 bp
->fw_stats_req_mapping
= bp
->fw_stats_mapping
;
2295 bp
->fw_stats_data
= (struct bnx2x_fw_stats_data
*)
2296 ((u8
*)bp
->fw_stats
+ bp
->fw_stats_req_sz
);
2297 bp
->fw_stats_data_mapping
= bp
->fw_stats_mapping
+
2298 bp
->fw_stats_req_sz
;
2300 DP(BNX2X_MSG_SP
, "statistics request base address set to %x %x\n",
2301 U64_HI(bp
->fw_stats_req_mapping
),
2302 U64_LO(bp
->fw_stats_req_mapping
));
2303 DP(BNX2X_MSG_SP
, "statistics data base address set to %x %x\n",
2304 U64_HI(bp
->fw_stats_data_mapping
),
2305 U64_LO(bp
->fw_stats_data_mapping
));
2309 bnx2x_free_fw_stats_mem(bp
);
2310 BNX2X_ERR("Can't allocate FW stats memory\n");
2314 /* send load request to mcp and analyze response */
2315 static int bnx2x_nic_load_request(struct bnx2x
*bp
, u32
*load_code
)
2321 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
2322 DRV_MSG_SEQ_NUMBER_MASK
);
2323 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
2325 /* Get current FW pulse sequence */
2326 bp
->fw_drv_pulse_wr_seq
=
2327 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
) &
2328 DRV_PULSE_SEQ_MASK
);
2329 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
2331 param
= DRV_MSG_CODE_LOAD_REQ_WITH_LFA
;
2333 if (IS_MF_SD(bp
) && bnx2x_port_after_undi(bp
))
2334 param
|= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA
;
2337 (*load_code
) = bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, param
);
2339 /* if mcp fails to respond we must abort */
2340 if (!(*load_code
)) {
2341 BNX2X_ERR("MCP response failure, aborting\n");
2345 /* If mcp refused (e.g. other port is in diagnostic mode) we
2348 if ((*load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED
) {
2349 BNX2X_ERR("MCP refused load request, aborting\n");
2355 /* check whether another PF has already loaded FW to chip. In
2356 * virtualized environments a pf from another VM may have already
2357 * initialized the device including loading FW
2359 int bnx2x_compare_fw_ver(struct bnx2x
*bp
, u32 load_code
, bool print_err
)
2361 /* is another pf loaded on this engine? */
2362 if (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
&&
2363 load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
) {
2364 /* build my FW version dword */
2365 u32 my_fw
= (BCM_5710_FW_MAJOR_VERSION
) +
2366 (BCM_5710_FW_MINOR_VERSION
<< 8) +
2367 (BCM_5710_FW_REVISION_VERSION
<< 16) +
2368 (BCM_5710_FW_ENGINEERING_VERSION
<< 24);
2370 /* read loaded FW from chip */
2371 u32 loaded_fw
= REG_RD(bp
, XSEM_REG_PRAM
);
2373 DP(BNX2X_MSG_SP
, "loaded fw %x, my fw %x\n",
2376 /* abort nic load if version mismatch */
2377 if (my_fw
!= loaded_fw
) {
2379 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2382 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2390 /* returns the "mcp load_code" according to global load_count array */
2391 static int bnx2x_nic_load_no_mcp(struct bnx2x
*bp
, int port
)
2393 int path
= BP_PATH(bp
);
2395 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
2396 path
, bnx2x_load_count
[path
][0], bnx2x_load_count
[path
][1],
2397 bnx2x_load_count
[path
][2]);
2398 bnx2x_load_count
[path
][0]++;
2399 bnx2x_load_count
[path
][1 + port
]++;
2400 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
2401 path
, bnx2x_load_count
[path
][0], bnx2x_load_count
[path
][1],
2402 bnx2x_load_count
[path
][2]);
2403 if (bnx2x_load_count
[path
][0] == 1)
2404 return FW_MSG_CODE_DRV_LOAD_COMMON
;
2405 else if (bnx2x_load_count
[path
][1 + port
] == 1)
2406 return FW_MSG_CODE_DRV_LOAD_PORT
;
2408 return FW_MSG_CODE_DRV_LOAD_FUNCTION
;
2411 /* mark PMF if applicable */
2412 static void bnx2x_nic_load_pmf(struct bnx2x
*bp
, u32 load_code
)
2414 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2415 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
2416 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
2418 /* We need the barrier to ensure the ordering between the
2419 * writing to bp->port.pmf here and reading it from the
2420 * bnx2x_periodic_task().
2427 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2430 static void bnx2x_nic_load_afex_dcc(struct bnx2x
*bp
, int load_code
)
2432 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2433 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
2434 (bp
->common
.shmem2_base
)) {
2435 if (SHMEM2_HAS(bp
, dcc_support
))
2436 SHMEM2_WR(bp
, dcc_support
,
2437 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
2438 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
2439 if (SHMEM2_HAS(bp
, afex_driver_support
))
2440 SHMEM2_WR(bp
, afex_driver_support
,
2441 SHMEM_AFEX_SUPPORTED_VERSION_ONE
);
2444 /* Set AFEX default VLAN tag to an invalid value */
2445 bp
->afex_def_vlan_tag
= -1;
2449 * bnx2x_bz_fp - zero content of the fastpath structure.
2451 * @bp: driver handle
2452 * @index: fastpath index to be zeroed
2454 * Makes sure the contents of the bp->fp[index].napi is kept
2457 static void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
2459 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2461 struct napi_struct orig_napi
= fp
->napi
;
2462 struct bnx2x_agg_info
*orig_tpa_info
= fp
->tpa_info
;
2464 /* bzero bnx2x_fastpath contents */
2466 memset(fp
->tpa_info
, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2
*
2467 sizeof(struct bnx2x_agg_info
));
2468 memset(fp
, 0, sizeof(*fp
));
2470 /* Restore the NAPI object as it has been already initialized */
2471 fp
->napi
= orig_napi
;
2472 fp
->tpa_info
= orig_tpa_info
;
2476 fp
->max_cos
= bp
->max_cos
;
2478 /* Special queues support only one CoS */
2481 /* Init txdata pointers */
2483 fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[FCOE_TXQ_IDX(bp
)];
2485 for_each_cos_in_tx_queue(fp
, cos
)
2486 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[cos
*
2487 BNX2X_NUM_ETH_QUEUES(bp
) + index
];
2489 /* set the tpa flag for each queue. The tpa flag determines the queue
2490 * minimal size so it must be set prior to queue memory allocation
2492 if (bp
->dev
->features
& NETIF_F_LRO
)
2493 fp
->mode
= TPA_MODE_LRO
;
2494 else if (bp
->dev
->features
& NETIF_F_GRO
&&
2495 bnx2x_mtu_allows_gro(bp
->dev
->mtu
))
2496 fp
->mode
= TPA_MODE_GRO
;
2498 fp
->mode
= TPA_MODE_DISABLED
;
2500 /* We don't want TPA if it's disabled in bp
2501 * or if this is an FCoE L2 ring.
2503 if (bp
->disable_tpa
|| IS_FCOE_FP(fp
))
2504 fp
->mode
= TPA_MODE_DISABLED
;
2507 void bnx2x_set_os_driver_state(struct bnx2x
*bp
, u32 state
)
2511 if (!IS_MF_BD(bp
) || !SHMEM2_HAS(bp
, os_driver_state
) || IS_VF(bp
))
2514 cur
= SHMEM2_RD(bp
, os_driver_state
[BP_FW_MB_IDX(bp
)]);
2515 DP(NETIF_MSG_IFUP
, "Driver state %08x-->%08x\n",
2518 SHMEM2_WR(bp
, os_driver_state
[BP_FW_MB_IDX(bp
)], state
);
2521 int bnx2x_load_cnic(struct bnx2x
*bp
)
2523 int i
, rc
, port
= BP_PORT(bp
);
2525 DP(NETIF_MSG_IFUP
, "Starting CNIC-related load\n");
2527 mutex_init(&bp
->cnic_mutex
);
2530 rc
= bnx2x_alloc_mem_cnic(bp
);
2532 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2533 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2537 rc
= bnx2x_alloc_fp_mem_cnic(bp
);
2539 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2540 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2543 /* Update the number of queues with the cnic queues */
2544 rc
= bnx2x_set_real_num_queues(bp
, 1);
2546 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2547 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2550 /* Add all CNIC NAPI objects */
2551 bnx2x_add_all_napi_cnic(bp
);
2552 DP(NETIF_MSG_IFUP
, "cnic napi added\n");
2553 bnx2x_napi_enable_cnic(bp
);
2555 rc
= bnx2x_init_hw_func_cnic(bp
);
2557 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic1
);
2559 bnx2x_nic_init_cnic(bp
);
2562 /* Enable Timer scan */
2563 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
2565 /* setup cnic queues */
2566 for_each_cnic_queue(bp
, i
) {
2567 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2569 BNX2X_ERR("Queue setup failed\n");
2570 LOAD_ERROR_EXIT(bp
, load_error_cnic2
);
2575 /* Initialize Rx filter. */
2576 bnx2x_set_rx_mode_inner(bp
);
2578 /* re-read iscsi info */
2579 bnx2x_get_iscsi_info(bp
);
2580 bnx2x_setup_cnic_irq_info(bp
);
2581 bnx2x_setup_cnic_info(bp
);
2582 bp
->cnic_loaded
= true;
2583 if (bp
->state
== BNX2X_STATE_OPEN
)
2584 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
2586 DP(NETIF_MSG_IFUP
, "Ending successfully CNIC-related load\n");
2590 #ifndef BNX2X_STOP_ON_ERROR
2592 /* Disable Timer scan */
2593 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
2596 bnx2x_napi_disable_cnic(bp
);
2597 /* Update the number of queues without the cnic queues */
2598 if (bnx2x_set_real_num_queues(bp
, 0))
2599 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2601 BNX2X_ERR("CNIC-related load failed\n");
2602 bnx2x_free_fp_mem_cnic(bp
);
2603 bnx2x_free_mem_cnic(bp
);
2605 #endif /* ! BNX2X_STOP_ON_ERROR */
2608 /* must be called with rtnl_lock */
2609 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
2611 int port
= BP_PORT(bp
);
2612 int i
, rc
= 0, load_code
= 0;
2614 DP(NETIF_MSG_IFUP
, "Starting NIC load\n");
2616 "CNIC is %s\n", CNIC_ENABLED(bp
) ? "enabled" : "disabled");
2618 #ifdef BNX2X_STOP_ON_ERROR
2619 if (unlikely(bp
->panic
)) {
2620 BNX2X_ERR("Can't load NIC when there is panic\n");
2625 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
2627 /* zero the structure w/o any lock, before SP handler is initialized */
2628 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
2629 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
2630 &bp
->last_reported_link
.link_report_flags
);
2633 /* must be called before memory allocation and HW init */
2634 bnx2x_ilt_set_info(bp
);
2637 * Zero fastpath structures preserving invariants like napi, which are
2638 * allocated only once, fp index, max_cos, bp pointer.
2639 * Also set fp->mode and txdata_ptr.
2641 DP(NETIF_MSG_IFUP
, "num queues: %d", bp
->num_queues
);
2642 for_each_queue(bp
, i
)
2644 memset(bp
->bnx2x_txq
, 0, (BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+
2645 bp
->num_cnic_queues
) *
2646 sizeof(struct bnx2x_fp_txdata
));
2648 bp
->fcoe_init
= false;
2650 /* Set the receive queues buffer size */
2651 bnx2x_set_rx_buf_size(bp
);
2654 rc
= bnx2x_alloc_mem(bp
);
2656 BNX2X_ERR("Unable to allocate bp memory\n");
2661 /* need to be done after alloc mem, since it's self adjusting to amount
2662 * of memory available for RSS queues
2664 rc
= bnx2x_alloc_fp_mem(bp
);
2666 BNX2X_ERR("Unable to allocate memory for fps\n");
2667 LOAD_ERROR_EXIT(bp
, load_error0
);
2670 /* Allocated memory for FW statistics */
2671 if (bnx2x_alloc_fw_stats_mem(bp
))
2672 LOAD_ERROR_EXIT(bp
, load_error0
);
2674 /* request pf to initialize status blocks */
2676 rc
= bnx2x_vfpf_init(bp
);
2678 LOAD_ERROR_EXIT(bp
, load_error0
);
2681 /* As long as bnx2x_alloc_mem() may possibly update
2682 * bp->num_queues, bnx2x_set_real_num_queues() should always
2683 * come after it. At this stage cnic queues are not counted.
2685 rc
= bnx2x_set_real_num_queues(bp
, 0);
2687 BNX2X_ERR("Unable to set real_num_queues\n");
2688 LOAD_ERROR_EXIT(bp
, load_error0
);
2691 /* configure multi cos mappings in kernel.
2692 * this configuration may be overridden by a multi class queue
2693 * discipline or by a dcbx negotiation result.
2695 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
2697 /* Add all NAPI objects */
2698 bnx2x_add_all_napi(bp
);
2699 DP(NETIF_MSG_IFUP
, "napi added\n");
2700 bnx2x_napi_enable(bp
);
2703 /* set pf load just before approaching the MCP */
2704 bnx2x_set_pf_load(bp
);
2706 /* if mcp exists send load request and analyze response */
2707 if (!BP_NOMCP(bp
)) {
2708 /* attempt to load pf */
2709 rc
= bnx2x_nic_load_request(bp
, &load_code
);
2711 LOAD_ERROR_EXIT(bp
, load_error1
);
2713 /* what did mcp say? */
2714 rc
= bnx2x_compare_fw_ver(bp
, load_code
, true);
2716 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2717 LOAD_ERROR_EXIT(bp
, load_error2
);
2720 load_code
= bnx2x_nic_load_no_mcp(bp
, port
);
2723 /* mark pmf if applicable */
2724 bnx2x_nic_load_pmf(bp
, load_code
);
2726 /* Init Function state controlling object */
2727 bnx2x__init_func_obj(bp
);
2730 rc
= bnx2x_init_hw(bp
, load_code
);
2732 BNX2X_ERR("HW init failed, aborting\n");
2733 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2734 LOAD_ERROR_EXIT(bp
, load_error2
);
2738 bnx2x_pre_irq_nic_init(bp
);
2740 /* Connect to IRQs */
2741 rc
= bnx2x_setup_irqs(bp
);
2743 BNX2X_ERR("setup irqs failed\n");
2745 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2746 LOAD_ERROR_EXIT(bp
, load_error2
);
2749 /* Init per-function objects */
2751 /* Setup NIC internals and enable interrupts */
2752 bnx2x_post_irq_nic_init(bp
, load_code
);
2754 bnx2x_init_bp_objs(bp
);
2755 bnx2x_iov_nic_init(bp
);
2757 /* Set AFEX default VLAN tag to an invalid value */
2758 bp
->afex_def_vlan_tag
= -1;
2759 bnx2x_nic_load_afex_dcc(bp
, load_code
);
2760 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
2761 rc
= bnx2x_func_start(bp
);
2763 BNX2X_ERR("Function start failed!\n");
2764 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2766 LOAD_ERROR_EXIT(bp
, load_error3
);
2769 /* Send LOAD_DONE command to MCP */
2770 if (!BP_NOMCP(bp
)) {
2771 load_code
= bnx2x_fw_command(bp
,
2772 DRV_MSG_CODE_LOAD_DONE
, 0);
2774 BNX2X_ERR("MCP response failure, aborting\n");
2776 LOAD_ERROR_EXIT(bp
, load_error3
);
2780 /* initialize FW coalescing state machines in RAM */
2781 bnx2x_update_coalesce(bp
);
2784 /* setup the leading queue */
2785 rc
= bnx2x_setup_leading(bp
);
2787 BNX2X_ERR("Setup leading failed!\n");
2788 LOAD_ERROR_EXIT(bp
, load_error3
);
2791 /* set up the rest of the queues */
2792 for_each_nondefault_eth_queue(bp
, i
) {
2794 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], false);
2796 rc
= bnx2x_vfpf_setup_q(bp
, &bp
->fp
[i
], false);
2798 BNX2X_ERR("Queue %d setup failed\n", i
);
2799 LOAD_ERROR_EXIT(bp
, load_error3
);
2804 rc
= bnx2x_init_rss(bp
);
2806 BNX2X_ERR("PF RSS init failed\n");
2807 LOAD_ERROR_EXIT(bp
, load_error3
);
2810 /* Now when Clients are configured we are ready to work */
2811 bp
->state
= BNX2X_STATE_OPEN
;
2813 /* Configure a ucast MAC */
2815 rc
= bnx2x_set_eth_mac(bp
, true);
2817 rc
= bnx2x_vfpf_config_mac(bp
, bp
->dev
->dev_addr
, bp
->fp
->index
,
2820 BNX2X_ERR("Setting Ethernet MAC failed\n");
2821 LOAD_ERROR_EXIT(bp
, load_error3
);
2824 if (IS_PF(bp
) && bp
->pending_max
) {
2825 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
2826 bp
->pending_max
= 0;
2830 rc
= bnx2x_initial_phy_init(bp
, load_mode
);
2832 LOAD_ERROR_EXIT(bp
, load_error3
);
2834 bp
->link_params
.feature_config_flags
&= ~FEATURE_CONFIG_BOOT_FROM_SAN
;
2836 /* Start fast path */
2838 /* Re-configure vlan filters */
2839 rc
= bnx2x_vlan_reconfigure_vid(bp
);
2841 LOAD_ERROR_EXIT(bp
, load_error3
);
2843 /* Initialize Rx filter. */
2844 bnx2x_set_rx_mode_inner(bp
);
2846 if (bp
->flags
& PTP_SUPPORTED
) {
2848 bnx2x_configure_ptp_filters(bp
);
2851 switch (load_mode
) {
2853 /* Tx queue should be only re-enabled */
2854 netif_tx_wake_all_queues(bp
->dev
);
2858 netif_tx_start_all_queues(bp
->dev
);
2859 smp_mb__after_atomic();
2863 case LOAD_LOOPBACK_EXT
:
2864 bp
->state
= BNX2X_STATE_DIAG
;
2872 bnx2x_update_drv_flags(bp
, 1 << DRV_FLAGS_PORT_MASK
, 0);
2874 bnx2x__link_status_update(bp
);
2876 /* start the timer */
2877 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2879 if (CNIC_ENABLED(bp
))
2880 bnx2x_load_cnic(bp
);
2883 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_GET_DRV_VERSION
, 0);
2885 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2886 /* mark driver is loaded in shmem2 */
2888 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2889 val
&= ~DRV_FLAGS_MTU_MASK
;
2890 val
|= (bp
->dev
->mtu
<< DRV_FLAGS_MTU_SHIFT
);
2891 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2892 val
| DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED
|
2893 DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2896 /* Wait for all pending SP commands to complete */
2897 if (IS_PF(bp
) && !bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
2898 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2899 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
2903 /* Update driver data for On-Chip MFW dump. */
2905 bnx2x_update_mfw_dump(bp
);
2907 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2908 if (bp
->port
.pmf
&& (bp
->state
!= BNX2X_STATE_DIAG
))
2909 bnx2x_dcbx_init(bp
, false);
2911 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp
))
2912 bnx2x_set_os_driver_state(bp
, OS_DRIVER_STATE_ACTIVE
);
2914 DP(NETIF_MSG_IFUP
, "Ending successfully NIC load\n");
2918 #ifndef BNX2X_STOP_ON_ERROR
2921 bnx2x_int_disable_sync(bp
, 1);
2923 /* Clean queueable objects */
2924 bnx2x_squeeze_objects(bp
);
2927 /* Free SKBs, SGEs, TPA pool and driver internals */
2928 bnx2x_free_skbs(bp
);
2929 for_each_rx_queue(bp
, i
)
2930 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2935 if (IS_PF(bp
) && !BP_NOMCP(bp
)) {
2936 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
2937 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
2942 bnx2x_napi_disable(bp
);
2943 bnx2x_del_all_napi(bp
);
2945 /* clear pf_load status, as it was already set */
2947 bnx2x_clear_pf_load(bp
);
2949 bnx2x_free_fw_stats_mem(bp
);
2950 bnx2x_free_fp_mem(bp
);
2954 #endif /* ! BNX2X_STOP_ON_ERROR */
2957 int bnx2x_drain_tx_queues(struct bnx2x
*bp
)
2961 /* Wait until tx fastpath tasks complete */
2962 for_each_tx_queue(bp
, i
) {
2963 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2965 for_each_cos_in_tx_queue(fp
, cos
)
2966 rc
= bnx2x_clean_tx_queue(bp
, fp
->txdata_ptr
[cos
]);
2973 /* must be called with rtnl_lock */
2974 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
, bool keep_link
)
2977 bool global
= false;
2979 DP(NETIF_MSG_IFUP
, "Starting NIC unload\n");
2981 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp
))
2982 bnx2x_set_os_driver_state(bp
, OS_DRIVER_STATE_DISABLED
);
2984 /* mark driver is unloaded in shmem2 */
2985 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2987 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2988 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2989 val
& ~DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2992 if (IS_PF(bp
) && bp
->recovery_state
!= BNX2X_RECOVERY_DONE
&&
2993 (bp
->state
== BNX2X_STATE_CLOSED
||
2994 bp
->state
== BNX2X_STATE_ERROR
)) {
2995 /* We can get here if the driver has been unloaded
2996 * during parity error recovery and is either waiting for a
2997 * leader to complete or for other functions to unload and
2998 * then ifdown has been issued. In this case we want to
2999 * unload and let other functions to complete a recovery
3002 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
3004 bnx2x_release_leader_lock(bp
);
3007 DP(NETIF_MSG_IFDOWN
, "Releasing a leadership...\n");
3008 BNX2X_ERR("Can't unload in closed or error state\n");
3012 /* Nothing to do during unload if previous bnx2x_nic_load()
3013 * have not completed successfully - all resources are released.
3015 * we can get here only after unsuccessful ndo_* callback, during which
3016 * dev->IFF_UP flag is still on.
3018 if (bp
->state
== BNX2X_STATE_CLOSED
|| bp
->state
== BNX2X_STATE_ERROR
)
3021 /* It's important to set the bp->state to the value different from
3022 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3023 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3025 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
3028 /* indicate to VFs that the PF is going down */
3029 bnx2x_iov_channel_down(bp
);
3031 if (CNIC_LOADED(bp
))
3032 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
3035 bnx2x_tx_disable(bp
);
3036 netdev_reset_tc(bp
->dev
);
3038 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
3040 del_timer_sync(&bp
->timer
);
3043 /* Set ALWAYS_ALIVE bit in shmem */
3044 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
3045 bnx2x_drv_pulse(bp
);
3046 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
3047 bnx2x_save_statistics(bp
);
3050 /* wait till consumers catch up with producers in all queues */
3051 bnx2x_drain_tx_queues(bp
);
3053 /* if VF indicate to PF this function is going down (PF will delete sp
3054 * elements and clear initializations
3057 bnx2x_vfpf_close_vf(bp
);
3058 else if (unload_mode
!= UNLOAD_RECOVERY
)
3059 /* if this is a normal/close unload need to clean up chip*/
3060 bnx2x_chip_cleanup(bp
, unload_mode
, keep_link
);
3062 /* Send the UNLOAD_REQUEST to the MCP */
3063 bnx2x_send_unload_req(bp
, unload_mode
);
3065 /* Prevent transactions to host from the functions on the
3066 * engine that doesn't reset global blocks in case of global
3067 * attention once global blocks are reset and gates are opened
3068 * (the engine which leader will perform the recovery
3071 if (!CHIP_IS_E1x(bp
))
3072 bnx2x_pf_disable(bp
);
3074 /* Disable HW interrupts, NAPI */
3075 bnx2x_netif_stop(bp
, 1);
3076 /* Delete all NAPI objects */
3077 bnx2x_del_all_napi(bp
);
3078 if (CNIC_LOADED(bp
))
3079 bnx2x_del_all_napi_cnic(bp
);
3083 /* Report UNLOAD_DONE to MCP */
3084 bnx2x_send_unload_done(bp
, false);
3088 * At this stage no more interrupts will arrive so we may safely clean
3089 * the queueable objects here in case they failed to get cleaned so far.
3092 bnx2x_squeeze_objects(bp
);
3094 /* There should be no more pending SP commands at this stage */
3099 /* clear pending work in rtnl task */
3100 bp
->sp_rtnl_state
= 0;
3103 /* Free SKBs, SGEs, TPA pool and driver internals */
3104 bnx2x_free_skbs(bp
);
3105 if (CNIC_LOADED(bp
))
3106 bnx2x_free_skbs_cnic(bp
);
3107 for_each_rx_queue(bp
, i
)
3108 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
3110 bnx2x_free_fp_mem(bp
);
3111 if (CNIC_LOADED(bp
))
3112 bnx2x_free_fp_mem_cnic(bp
);
3115 if (CNIC_LOADED(bp
))
3116 bnx2x_free_mem_cnic(bp
);
3120 bp
->state
= BNX2X_STATE_CLOSED
;
3121 bp
->cnic_loaded
= false;
3123 /* Clear driver version indication in shmem */
3125 bnx2x_update_mng_version(bp
);
3127 /* Check if there are pending parity attentions. If there are - set
3128 * RECOVERY_IN_PROGRESS.
3130 if (IS_PF(bp
) && bnx2x_chk_parity_attn(bp
, &global
, false)) {
3131 bnx2x_set_reset_in_progress(bp
);
3133 /* Set RESET_IS_GLOBAL if needed */
3135 bnx2x_set_reset_global(bp
);
3138 /* The last driver must disable a "close the gate" if there is no
3139 * parity attention or "process kill" pending.
3142 !bnx2x_clear_pf_load(bp
) &&
3143 bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
3144 bnx2x_disable_close_the_gate(bp
);
3146 DP(NETIF_MSG_IFUP
, "Ending NIC unload\n");
3151 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
3155 /* If there is no power capability, silently succeed */
3156 if (!bp
->pdev
->pm_cap
) {
3157 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3161 pci_read_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
3165 pci_write_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
3166 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
3167 PCI_PM_CTRL_PME_STATUS
));
3169 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
3170 /* delay required during transition out of D3hot */
3175 /* If there are other clients above don't
3176 shut down the power */
3177 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
3179 /* Don't shut down the power for emulation and FPGA */
3180 if (CHIP_REV_IS_SLOW(bp
))
3183 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3187 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
3189 pci_write_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
3192 /* No more memory access after this point until
3193 * device is brought back to D0.
3198 dev_err(&bp
->pdev
->dev
, "Can't support state = %d\n", state
);
3205 * net_device service functions
3207 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
3211 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
3213 struct bnx2x
*bp
= fp
->bp
;
3216 #ifdef BNX2X_STOP_ON_ERROR
3217 if (unlikely(bp
->panic
)) {
3218 napi_complete(napi
);
3222 for_each_cos_in_tx_queue(fp
, cos
)
3223 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
3224 bnx2x_tx_int(bp
, fp
->txdata_ptr
[cos
]);
3226 if (bnx2x_has_rx_work(fp
)) {
3227 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
3229 /* must not complete if we consumed full budget */
3230 if (work_done
>= budget
)
3234 /* Fall out from the NAPI loop if needed */
3235 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3237 /* No need to update SB for FCoE L2 ring as long as
3238 * it's connected to the default SB and the SB
3239 * has been updated when NAPI was scheduled.
3241 if (IS_FCOE_FP(fp
)) {
3242 napi_complete(napi
);
3245 bnx2x_update_fpsb_idx(fp
);
3246 /* bnx2x_has_rx_work() reads the status block,
3247 * thus we need to ensure that status block indices
3248 * have been actually read (bnx2x_update_fpsb_idx)
3249 * prior to this check (bnx2x_has_rx_work) so that
3250 * we won't write the "newer" value of the status block
3251 * to IGU (if there was a DMA right after
3252 * bnx2x_has_rx_work and if there is no rmb, the memory
3253 * reading (bnx2x_update_fpsb_idx) may be postponed
3254 * to right before bnx2x_ack_sb). In this case there
3255 * will never be another interrupt until there is
3256 * another update of the status block, while there
3257 * is still unhandled work.
3261 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3262 napi_complete(napi
);
3263 /* Re-enable interrupts */
3264 DP(NETIF_MSG_RX_STATUS
,
3265 "Update index to %d\n", fp
->fp_hc_idx
);
3266 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
3267 le16_to_cpu(fp
->fp_hc_idx
),
3277 /* we split the first BD into headers and data BDs
3278 * to ease the pain of our fellow microcode engineers
3279 * we use one mapping for both BDs
3281 static u16
bnx2x_tx_split(struct bnx2x
*bp
,
3282 struct bnx2x_fp_txdata
*txdata
,
3283 struct sw_tx_bd
*tx_buf
,
3284 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
3287 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
3288 struct eth_tx_bd
*d_tx_bd
;
3290 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
3292 /* first fix first BD */
3293 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
3295 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d (%x:%x)\n",
3296 h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
, h_tx_bd
->addr_lo
);
3298 /* now get a new data BD
3299 * (after the pbd) and fill it */
3300 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3301 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3303 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
3304 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
3306 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3307 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3308 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
3310 /* this marks the BD as one that has no individual mapping */
3311 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
3313 DP(NETIF_MSG_TX_QUEUED
,
3314 "TSO split data size is %d (%x:%x)\n",
3315 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
3318 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
3323 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3324 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3325 static __le16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
3327 __sum16 tsum
= (__force __sum16
) csum
;
3330 tsum
= ~csum_fold(csum_sub((__force __wsum
) csum
,
3331 csum_partial(t_header
- fix
, fix
, 0)));
3334 tsum
= ~csum_fold(csum_add((__force __wsum
) csum
,
3335 csum_partial(t_header
, -fix
, 0)));
3337 return bswab16(tsum
);
3340 static u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
3346 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3349 protocol
= vlan_get_protocol(skb
);
3350 if (protocol
== htons(ETH_P_IPV6
)) {
3352 prot
= ipv6_hdr(skb
)->nexthdr
;
3355 prot
= ip_hdr(skb
)->protocol
;
3358 if (!CHIP_IS_E1x(bp
) && skb
->encapsulation
) {
3359 if (inner_ip_hdr(skb
)->version
== 6) {
3360 rc
|= XMIT_CSUM_ENC_V6
;
3361 if (inner_ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3362 rc
|= XMIT_CSUM_TCP
;
3364 rc
|= XMIT_CSUM_ENC_V4
;
3365 if (inner_ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3366 rc
|= XMIT_CSUM_TCP
;
3369 if (prot
== IPPROTO_TCP
)
3370 rc
|= XMIT_CSUM_TCP
;
3372 if (skb_is_gso(skb
)) {
3373 if (skb_is_gso_v6(skb
)) {
3374 rc
|= (XMIT_GSO_V6
| XMIT_CSUM_TCP
);
3375 if (rc
& XMIT_CSUM_ENC
)
3376 rc
|= XMIT_GSO_ENC_V6
;
3378 rc
|= (XMIT_GSO_V4
| XMIT_CSUM_TCP
);
3379 if (rc
& XMIT_CSUM_ENC
)
3380 rc
|= XMIT_GSO_ENC_V4
;
3387 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3388 /* check if packet requires linearization (packet is too fragmented)
3389 no need to check fragmentation if page size > 8K (there will be no
3390 violation to FW restrictions) */
3391 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
3396 int first_bd_sz
= 0;
3398 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3399 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
3401 if (xmit_type
& XMIT_GSO
) {
3402 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
3403 /* Check if LSO packet needs to be copied:
3404 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3405 int wnd_size
= MAX_FETCH_BD
- 3;
3406 /* Number of windows to check */
3407 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
3412 /* Headers length */
3413 if (xmit_type
& XMIT_GSO_ENC
)
3414 hlen
= (int)(skb_inner_transport_header(skb
) -
3416 inner_tcp_hdrlen(skb
);
3418 hlen
= (int)(skb_transport_header(skb
) -
3419 skb
->data
) + tcp_hdrlen(skb
);
3421 /* Amount of data (w/o headers) on linear part of SKB*/
3422 first_bd_sz
= skb_headlen(skb
) - hlen
;
3424 wnd_sum
= first_bd_sz
;
3426 /* Calculate the first sum - it's special */
3427 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
3429 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
3431 /* If there was data on linear skb data - check it */
3432 if (first_bd_sz
> 0) {
3433 if (unlikely(wnd_sum
< lso_mss
)) {
3438 wnd_sum
-= first_bd_sz
;
3441 /* Others are easier: run through the frag list and
3442 check all windows */
3443 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
3445 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
3447 if (unlikely(wnd_sum
< lso_mss
)) {
3452 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
3455 /* in non-LSO too fragmented packet should always
3462 if (unlikely(to_copy
))
3463 DP(NETIF_MSG_TX_QUEUED
,
3464 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3465 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
3466 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
3473 * bnx2x_set_pbd_gso - update PBD in GSO case.
3477 * @xmit_type: xmit flags
3479 static void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
3480 struct eth_tx_parse_bd_e1x
*pbd
,
3483 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
3484 pbd
->tcp_send_seq
= bswab32(tcp_hdr(skb
)->seq
);
3485 pbd
->tcp_flags
= pbd_tcp_flags(tcp_hdr(skb
));
3487 if (xmit_type
& XMIT_GSO_V4
) {
3488 pbd
->ip_id
= bswab16(ip_hdr(skb
)->id
);
3489 pbd
->tcp_pseudo_csum
=
3490 bswab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
3492 0, IPPROTO_TCP
, 0));
3494 pbd
->tcp_pseudo_csum
=
3495 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3496 &ipv6_hdr(skb
)->daddr
,
3497 0, IPPROTO_TCP
, 0));
3501 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
);
3505 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3507 * @bp: driver handle
3509 * @parsing_data: data to be updated
3510 * @xmit_type: xmit flags
3512 * 57712/578xx related, when skb has encapsulation
3514 static u8
bnx2x_set_pbd_csum_enc(struct bnx2x
*bp
, struct sk_buff
*skb
,
3515 u32
*parsing_data
, u32 xmit_type
)
3518 ((((u8
*)skb_inner_transport_header(skb
) - skb
->data
) >> 1) <<
3519 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT
) &
3520 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W
;
3522 if (xmit_type
& XMIT_CSUM_TCP
) {
3523 *parsing_data
|= ((inner_tcp_hdrlen(skb
) / 4) <<
3524 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3525 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3527 return skb_inner_transport_header(skb
) +
3528 inner_tcp_hdrlen(skb
) - skb
->data
;
3531 /* We support checksum offload for TCP and UDP only.
3532 * No need to pass the UDP header length - it's a constant.
3534 return skb_inner_transport_header(skb
) +
3535 sizeof(struct udphdr
) - skb
->data
;
3539 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3541 * @bp: driver handle
3543 * @parsing_data: data to be updated
3544 * @xmit_type: xmit flags
3546 * 57712/578xx related
3548 static u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
3549 u32
*parsing_data
, u32 xmit_type
)
3552 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
3553 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT
) &
3554 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W
;
3556 if (xmit_type
& XMIT_CSUM_TCP
) {
3557 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
3558 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3559 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3561 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
3563 /* We support checksum offload for TCP and UDP only.
3564 * No need to pass the UDP header length - it's a constant.
3566 return skb_transport_header(skb
) + sizeof(struct udphdr
) - skb
->data
;
3569 /* set FW indication according to inner or outer protocols if tunneled */
3570 static void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3571 struct eth_tx_start_bd
*tx_start_bd
,
3574 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
3576 if (xmit_type
& (XMIT_CSUM_ENC_V6
| XMIT_CSUM_V6
))
3577 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
3579 if (!(xmit_type
& XMIT_CSUM_TCP
))
3580 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
3584 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3586 * @bp: driver handle
3588 * @pbd: parse BD to be updated
3589 * @xmit_type: xmit flags
3591 static u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3592 struct eth_tx_parse_bd_e1x
*pbd
,
3595 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
3597 /* for now NS flag is not used in Linux */
3600 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3601 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
3603 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
3604 skb_network_header(skb
)) >> 1;
3606 hlen
+= pbd
->ip_hlen_w
;
3608 /* We support checksum offload for TCP and UDP only */
3609 if (xmit_type
& XMIT_CSUM_TCP
)
3610 hlen
+= tcp_hdrlen(skb
) / 2;
3612 hlen
+= sizeof(struct udphdr
) / 2;
3614 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
3617 if (xmit_type
& XMIT_CSUM_TCP
) {
3618 pbd
->tcp_pseudo_csum
= bswab16(tcp_hdr(skb
)->check
);
3621 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
3623 DP(NETIF_MSG_TX_QUEUED
,
3624 "hlen %d fix %d csum before fix %x\n",
3625 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
3627 /* HW bug: fixup the CSUM */
3628 pbd
->tcp_pseudo_csum
=
3629 bnx2x_csum_fix(skb_transport_header(skb
),
3632 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
3633 pbd
->tcp_pseudo_csum
);
3639 static void bnx2x_update_pbds_gso_enc(struct sk_buff
*skb
,
3640 struct eth_tx_parse_bd_e2
*pbd_e2
,
3641 struct eth_tx_parse_2nd_bd
*pbd2
,
3646 u8 outerip_off
, outerip_len
= 0;
3648 /* from outer IP to transport */
3649 hlen_w
= (skb_inner_transport_header(skb
) -
3650 skb_network_header(skb
)) >> 1;
3653 hlen_w
+= inner_tcp_hdrlen(skb
) >> 1;
3655 pbd2
->fw_ip_hdr_to_payload_w
= hlen_w
;
3657 /* outer IP header info */
3658 if (xmit_type
& XMIT_CSUM_V4
) {
3659 struct iphdr
*iph
= ip_hdr(skb
);
3660 u32 csum
= (__force u32
)(~iph
->check
) -
3661 (__force u32
)iph
->tot_len
-
3662 (__force u32
)iph
->frag_off
;
3664 outerip_len
= iph
->ihl
<< 1;
3666 pbd2
->fw_ip_csum_wo_len_flags_frag
=
3667 bswab16(csum_fold((__force __wsum
)csum
));
3669 pbd2
->fw_ip_hdr_to_payload_w
=
3670 hlen_w
- ((sizeof(struct ipv6hdr
)) >> 1);
3671 pbd_e2
->data
.tunnel_data
.flags
|=
3672 ETH_TUNNEL_DATA_IPV6_OUTER
;
3675 pbd2
->tcp_send_seq
= bswab32(inner_tcp_hdr(skb
)->seq
);
3677 pbd2
->tcp_flags
= pbd_tcp_flags(inner_tcp_hdr(skb
));
3679 /* inner IP header info */
3680 if (xmit_type
& XMIT_CSUM_ENC_V4
) {
3681 pbd2
->hw_ip_id
= bswab16(inner_ip_hdr(skb
)->id
);
3683 pbd_e2
->data
.tunnel_data
.pseudo_csum
=
3684 bswab16(~csum_tcpudp_magic(
3685 inner_ip_hdr(skb
)->saddr
,
3686 inner_ip_hdr(skb
)->daddr
,
3687 0, IPPROTO_TCP
, 0));
3689 pbd_e2
->data
.tunnel_data
.pseudo_csum
=
3690 bswab16(~csum_ipv6_magic(
3691 &inner_ipv6_hdr(skb
)->saddr
,
3692 &inner_ipv6_hdr(skb
)->daddr
,
3693 0, IPPROTO_TCP
, 0));
3696 outerip_off
= (skb_network_header(skb
) - skb
->data
) >> 1;
3701 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT
) |
3702 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3703 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT
);
3705 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
3706 SET_FLAG(*global_data
, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST
, 1);
3707 pbd2
->tunnel_udp_hdr_start_w
= skb_transport_offset(skb
) >> 1;
3711 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff
*skb
, u32
*parsing_data
,
3714 struct ipv6hdr
*ipv6
;
3716 if (!(xmit_type
& (XMIT_GSO_ENC_V6
| XMIT_GSO_V6
)))
3719 if (xmit_type
& XMIT_GSO_ENC_V6
)
3720 ipv6
= inner_ipv6_hdr(skb
);
3721 else /* XMIT_GSO_V6 */
3722 ipv6
= ipv6_hdr(skb
);
3724 if (ipv6
->nexthdr
== NEXTHDR_IPV6
)
3725 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
3728 /* called with netif_tx_lock
3729 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3730 * netif_wake_queue()
3732 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3734 struct bnx2x
*bp
= netdev_priv(dev
);
3736 struct netdev_queue
*txq
;
3737 struct bnx2x_fp_txdata
*txdata
;
3738 struct sw_tx_bd
*tx_buf
;
3739 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
3740 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
3741 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
3742 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
3743 struct eth_tx_parse_2nd_bd
*pbd2
= NULL
;
3744 u32 pbd_e2_parsing_data
= 0;
3745 u16 pkt_prod
, bd_prod
;
3748 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
3751 __le16 pkt_size
= 0;
3753 u8 mac_type
= UNICAST_ADDRESS
;
3755 #ifdef BNX2X_STOP_ON_ERROR
3756 if (unlikely(bp
->panic
))
3757 return NETDEV_TX_BUSY
;
3760 txq_index
= skb_get_queue_mapping(skb
);
3761 txq
= netdev_get_tx_queue(dev
, txq_index
);
3763 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + (CNIC_LOADED(bp
) ? 1 : 0));
3765 txdata
= &bp
->bnx2x_txq
[txq_index
];
3767 /* enable this debug print to view the transmission queue being used
3768 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3769 txq_index, fp_index, txdata_index); */
3771 /* enable this debug print to view the transmission details
3772 DP(NETIF_MSG_TX_QUEUED,
3773 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3774 txdata->cid, fp_index, txdata_index, txdata, fp); */
3776 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
3777 skb_shinfo(skb
)->nr_frags
+
3779 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT
))) {
3780 /* Handle special storage cases separately */
3781 if (txdata
->tx_ring_size
== 0) {
3782 struct bnx2x_eth_q_stats
*q_stats
=
3783 bnx2x_fp_qstats(bp
, txdata
->parent_fp
);
3784 q_stats
->driver_filtered_tx_pkt
++;
3786 return NETDEV_TX_OK
;
3788 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3789 netif_tx_stop_queue(txq
);
3790 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3792 return NETDEV_TX_BUSY
;
3795 DP(NETIF_MSG_TX_QUEUED
,
3796 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3797 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
3798 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
,
3801 eth
= (struct ethhdr
*)skb
->data
;
3803 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3804 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
3805 if (is_broadcast_ether_addr(eth
->h_dest
))
3806 mac_type
= BROADCAST_ADDRESS
;
3808 mac_type
= MULTICAST_ADDRESS
;
3811 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3812 /* First, check if we need to linearize the skb (due to FW
3813 restrictions). No need to check fragmentation if page size > 8K
3814 (there will be no violation to FW restrictions) */
3815 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
3816 /* Statistics of linearization */
3818 if (skb_linearize(skb
) != 0) {
3819 DP(NETIF_MSG_TX_QUEUED
,
3820 "SKB linearization failed - silently dropping this SKB\n");
3821 dev_kfree_skb_any(skb
);
3822 return NETDEV_TX_OK
;
3826 /* Map skb linear data for DMA */
3827 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
3828 skb_headlen(skb
), DMA_TO_DEVICE
);
3829 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3830 DP(NETIF_MSG_TX_QUEUED
,
3831 "SKB mapping failed - silently dropping this SKB\n");
3832 dev_kfree_skb_any(skb
);
3833 return NETDEV_TX_OK
;
3836 Please read carefully. First we use one BD which we mark as start,
3837 then we have a parsing info BD (used for TSO or xsum),
3838 and only then we have the rest of the TSO BDs.
3839 (don't forget to mark the last one as last,
3840 and to unmap only AFTER you write to the BD ...)
3841 And above all, all pdb sizes are in words - NOT DWORDS!
3844 /* get current pkt produced now - advance it just before sending packet
3845 * since mapping of pages may fail and cause packet to be dropped
3847 pkt_prod
= txdata
->tx_pkt_prod
;
3848 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
3850 /* get a tx_buf and first BD
3851 * tx_start_bd may be changed during SPLIT,
3852 * but first_bd will always stay first
3854 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
3855 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
3856 first_bd
= tx_start_bd
;
3858 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3860 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
3861 if (!(bp
->flags
& TX_TIMESTAMPING_EN
)) {
3862 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3863 } else if (bp
->ptp_tx_skb
) {
3864 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3866 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3867 /* schedule check for Tx timestamp */
3868 bp
->ptp_tx_skb
= skb_get(skb
);
3869 bp
->ptp_tx_start
= jiffies
;
3870 schedule_work(&bp
->ptp_task
);
3874 /* header nbd: indirectly zero other flags! */
3875 tx_start_bd
->general_data
= 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
;
3877 /* remember the first BD of the packet */
3878 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
3882 DP(NETIF_MSG_TX_QUEUED
,
3883 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3884 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
3886 if (skb_vlan_tag_present(skb
)) {
3887 tx_start_bd
->vlan_or_ethertype
=
3888 cpu_to_le16(skb_vlan_tag_get(skb
));
3889 tx_start_bd
->bd_flags
.as_bitfield
|=
3890 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
3892 /* when transmitting in a vf, start bd must hold the ethertype
3893 * for fw to enforce it
3895 #ifndef BNX2X_STOP_ON_ERROR
3898 tx_start_bd
->vlan_or_ethertype
=
3899 cpu_to_le16(ntohs(eth
->h_proto
));
3900 #ifndef BNX2X_STOP_ON_ERROR
3902 /* used by FW for packet accounting */
3903 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
3907 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3909 /* turn on parsing and get a BD */
3910 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3912 if (xmit_type
& XMIT_CSUM
)
3913 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
3915 if (!CHIP_IS_E1x(bp
)) {
3916 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
3917 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
3919 if (xmit_type
& XMIT_CSUM_ENC
) {
3920 u16 global_data
= 0;
3922 /* Set PBD in enc checksum offload case */
3923 hlen
= bnx2x_set_pbd_csum_enc(bp
, skb
,
3924 &pbd_e2_parsing_data
,
3927 /* turn on 2nd parsing and get a BD */
3928 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3930 pbd2
= &txdata
->tx_desc_ring
[bd_prod
].parse_2nd_bd
;
3932 memset(pbd2
, 0, sizeof(*pbd2
));
3934 pbd_e2
->data
.tunnel_data
.ip_hdr_start_inner_w
=
3935 (skb_inner_network_header(skb
) -
3938 if (xmit_type
& XMIT_GSO_ENC
)
3939 bnx2x_update_pbds_gso_enc(skb
, pbd_e2
, pbd2
,
3943 pbd2
->global_data
= cpu_to_le16(global_data
);
3945 /* add addition parse BD indication to start BD */
3946 SET_FLAG(tx_start_bd
->general_data
,
3947 ETH_TX_START_BD_PARSE_NBDS
, 1);
3948 /* set encapsulation flag in start BD */
3949 SET_FLAG(tx_start_bd
->general_data
,
3950 ETH_TX_START_BD_TUNNEL_EXIST
, 1);
3952 tx_buf
->flags
|= BNX2X_HAS_SECOND_PBD
;
3955 } else if (xmit_type
& XMIT_CSUM
) {
3956 /* Set PBD in checksum offload case w/o encapsulation */
3957 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
3958 &pbd_e2_parsing_data
,
3962 bnx2x_set_ipv6_ext_e2(skb
, &pbd_e2_parsing_data
, xmit_type
);
3963 /* Add the macs to the parsing BD if this is a vf or if
3964 * Tx Switching is enabled.
3967 /* override GRE parameters in BD */
3968 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.src_hi
,
3969 &pbd_e2
->data
.mac_addr
.src_mid
,
3970 &pbd_e2
->data
.mac_addr
.src_lo
,
3973 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.dst_hi
,
3974 &pbd_e2
->data
.mac_addr
.dst_mid
,
3975 &pbd_e2
->data
.mac_addr
.dst_lo
,
3978 if (bp
->flags
& TX_SWITCHING
)
3979 bnx2x_set_fw_mac_addr(
3980 &pbd_e2
->data
.mac_addr
.dst_hi
,
3981 &pbd_e2
->data
.mac_addr
.dst_mid
,
3982 &pbd_e2
->data
.mac_addr
.dst_lo
,
3984 #ifdef BNX2X_STOP_ON_ERROR
3985 /* Enforce security is always set in Stop on Error -
3986 * source mac should be present in the parsing BD
3988 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.src_hi
,
3989 &pbd_e2
->data
.mac_addr
.src_mid
,
3990 &pbd_e2
->data
.mac_addr
.src_lo
,
3995 SET_FLAG(pbd_e2_parsing_data
,
3996 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE
, mac_type
);
3998 u16 global_data
= 0;
3999 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
4000 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
4001 /* Set PBD in checksum offload case */
4002 if (xmit_type
& XMIT_CSUM
)
4003 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
4005 SET_FLAG(global_data
,
4006 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE
, mac_type
);
4007 pbd_e1x
->global_data
|= cpu_to_le16(global_data
);
4010 /* Setup the data pointer of the first BD of the packet */
4011 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
4012 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
4013 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
4014 pkt_size
= tx_start_bd
->nbytes
;
4016 DP(NETIF_MSG_TX_QUEUED
,
4017 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4018 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
4019 le16_to_cpu(tx_start_bd
->nbytes
),
4020 tx_start_bd
->bd_flags
.as_bitfield
,
4021 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
4023 if (xmit_type
& XMIT_GSO
) {
4025 DP(NETIF_MSG_TX_QUEUED
,
4026 "TSO packet len %d hlen %d total len %d tso size %d\n",
4027 skb
->len
, hlen
, skb_headlen(skb
),
4028 skb_shinfo(skb
)->gso_size
);
4030 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
4032 if (unlikely(skb_headlen(skb
) > hlen
)) {
4034 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
4038 if (!CHIP_IS_E1x(bp
))
4039 pbd_e2_parsing_data
|=
4040 (skb_shinfo(skb
)->gso_size
<<
4041 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
4042 ETH_TX_PARSE_BD_E2_LSO_MSS
;
4044 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
4047 /* Set the PBD's parsing_data field if not zero
4048 * (for the chips newer than 57711).
4050 if (pbd_e2_parsing_data
)
4051 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
4053 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
4055 /* Handle fragmented skb */
4056 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4057 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4059 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
4060 skb_frag_size(frag
), DMA_TO_DEVICE
);
4061 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
4062 unsigned int pkts_compl
= 0, bytes_compl
= 0;
4064 DP(NETIF_MSG_TX_QUEUED
,
4065 "Unable to map page - dropping packet...\n");
4067 /* we need unmap all buffers already mapped
4069 * first_bd->nbd need to be properly updated
4070 * before call to bnx2x_free_tx_pkt
4072 first_bd
->nbd
= cpu_to_le16(nbd
);
4073 bnx2x_free_tx_pkt(bp
, txdata
,
4074 TX_BD(txdata
->tx_pkt_prod
),
4075 &pkts_compl
, &bytes_compl
);
4076 return NETDEV_TX_OK
;
4079 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
4080 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
4081 if (total_pkt_bd
== NULL
)
4082 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
4084 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
4085 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
4086 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
4087 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
4090 DP(NETIF_MSG_TX_QUEUED
,
4091 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4092 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
4093 le16_to_cpu(tx_data_bd
->nbytes
));
4096 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
4098 /* update with actual num BDs */
4099 first_bd
->nbd
= cpu_to_le16(nbd
);
4101 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
4103 /* now send a tx doorbell, counting the next BD
4104 * if the packet contains or ends with it
4106 if (TX_BD_POFF(bd_prod
) < nbd
)
4109 /* total_pkt_bytes should be set on the first data BD if
4110 * it's not an LSO packet and there is more than one
4111 * data BD. In this case pkt_size is limited by an MTU value.
4112 * However we prefer to set it for an LSO packet (while we don't
4113 * have to) in order to save some CPU cycles in a none-LSO
4114 * case, when we much more care about them.
4116 if (total_pkt_bd
!= NULL
)
4117 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
4120 DP(NETIF_MSG_TX_QUEUED
,
4121 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4122 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
4123 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
4124 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
4125 le16_to_cpu(pbd_e1x
->total_hlen_w
));
4127 DP(NETIF_MSG_TX_QUEUED
,
4128 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4130 pbd_e2
->data
.mac_addr
.dst_hi
,
4131 pbd_e2
->data
.mac_addr
.dst_mid
,
4132 pbd_e2
->data
.mac_addr
.dst_lo
,
4133 pbd_e2
->data
.mac_addr
.src_hi
,
4134 pbd_e2
->data
.mac_addr
.src_mid
,
4135 pbd_e2
->data
.mac_addr
.src_lo
,
4136 pbd_e2
->parsing_data
);
4137 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
4139 netdev_tx_sent_queue(txq
, skb
->len
);
4141 skb_tx_timestamp(skb
);
4143 txdata
->tx_pkt_prod
++;
4145 * Make sure that the BD data is updated before updating the producer
4146 * since FW might read the BD right after the producer is updated.
4147 * This is only applicable for weak-ordered memory model archs such
4148 * as IA-64. The following barrier is also mandatory since FW will
4149 * assumes packets must have BDs.
4153 txdata
->tx_db
.data
.prod
+= nbd
;
4156 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
4160 txdata
->tx_bd_prod
+= nbd
;
4162 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_DESC_PER_TX_PKT
)) {
4163 netif_tx_stop_queue(txq
);
4165 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4166 * ordering of set_bit() in netif_tx_stop_queue() and read of
4170 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
4171 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
)
4172 netif_tx_wake_queue(txq
);
4176 return NETDEV_TX_OK
;
4179 void bnx2x_get_c2s_mapping(struct bnx2x
*bp
, u8
*c2s_map
, u8
*c2s_default
)
4181 int mfw_vn
= BP_FW_MB_IDX(bp
);
4184 /* If the shmem shouldn't affect configuration, reflect */
4185 if (!IS_MF_BD(bp
)) {
4188 for (i
= 0; i
< BNX2X_MAX_PRIORITY
; i
++)
4195 tmp
= SHMEM2_RD(bp
, c2s_pcp_map_lower
[mfw_vn
]);
4196 tmp
= (__force u32
)be32_to_cpu((__force __be32
)tmp
);
4197 c2s_map
[0] = tmp
& 0xff;
4198 c2s_map
[1] = (tmp
>> 8) & 0xff;
4199 c2s_map
[2] = (tmp
>> 16) & 0xff;
4200 c2s_map
[3] = (tmp
>> 24) & 0xff;
4202 tmp
= SHMEM2_RD(bp
, c2s_pcp_map_upper
[mfw_vn
]);
4203 tmp
= (__force u32
)be32_to_cpu((__force __be32
)tmp
);
4204 c2s_map
[4] = tmp
& 0xff;
4205 c2s_map
[5] = (tmp
>> 8) & 0xff;
4206 c2s_map
[6] = (tmp
>> 16) & 0xff;
4207 c2s_map
[7] = (tmp
>> 24) & 0xff;
4209 tmp
= SHMEM2_RD(bp
, c2s_pcp_map_default
[mfw_vn
]);
4210 tmp
= (__force u32
)be32_to_cpu((__force __be32
)tmp
);
4211 *c2s_default
= (tmp
>> (8 * mfw_vn
)) & 0xff;
4215 * bnx2x_setup_tc - routine to configure net_device for multi tc
4217 * @netdev: net device to configure
4218 * @tc: number of traffic classes to enable
4220 * callback connected to the ndo_setup_tc function pointer
4222 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
4224 struct bnx2x
*bp
= netdev_priv(dev
);
4225 u8 c2s_map
[BNX2X_MAX_PRIORITY
], c2s_def
;
4226 int cos
, prio
, count
, offset
;
4228 /* setup tc must be called under rtnl lock */
4231 /* no traffic classes requested. Aborting */
4233 netdev_reset_tc(dev
);
4237 /* requested to support too many traffic classes */
4238 if (num_tc
> bp
->max_cos
) {
4239 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4240 num_tc
, bp
->max_cos
);
4244 /* declare amount of supported traffic classes */
4245 if (netdev_set_num_tc(dev
, num_tc
)) {
4246 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc
);
4250 bnx2x_get_c2s_mapping(bp
, c2s_map
, &c2s_def
);
4252 /* configure priority to traffic class mapping */
4253 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
4254 int outer_prio
= c2s_map
[prio
];
4256 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[outer_prio
]);
4257 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
4258 "mapping priority %d to tc %d\n",
4259 outer_prio
, bp
->prio_to_cos
[outer_prio
]);
4262 /* Use this configuration to differentiate tc0 from other COSes
4263 This can be used for ets or pfc, and save the effort of setting
4264 up a multio class queue disc or negotiating DCBX with a switch
4265 netdev_set_prio_tc_map(dev, 0, 0);
4266 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4267 for (prio = 1; prio < 16; prio++) {
4268 netdev_set_prio_tc_map(dev, prio, 1);
4269 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4272 /* configure traffic class to transmission queue mapping */
4273 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
4274 count
= BNX2X_NUM_ETH_QUEUES(bp
);
4275 offset
= cos
* BNX2X_NUM_NON_CNIC_QUEUES(bp
);
4276 netdev_set_tc_queue(dev
, cos
, count
, offset
);
4277 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
4278 "mapping tc %d to offset %d count %d\n",
4279 cos
, offset
, count
);
4285 /* called with rtnl_lock */
4286 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
4288 struct sockaddr
*addr
= p
;
4289 struct bnx2x
*bp
= netdev_priv(dev
);
4292 if (!is_valid_ether_addr(addr
->sa_data
)) {
4293 BNX2X_ERR("Requested MAC address is not valid\n");
4297 if (IS_MF_STORAGE_ONLY(bp
)) {
4298 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4302 if (netif_running(dev
)) {
4303 rc
= bnx2x_set_eth_mac(bp
, false);
4308 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4310 if (netif_running(dev
))
4311 rc
= bnx2x_set_eth_mac(bp
, true);
4313 if (IS_PF(bp
) && SHMEM2_HAS(bp
, curr_cfg
))
4314 SHMEM2_WR(bp
, curr_cfg
, CURR_CFG_MET_OS
);
4319 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
4321 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
4322 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
4327 if (IS_FCOE_IDX(fp_index
)) {
4328 memset(sb
, 0, sizeof(union host_hc_status_block
));
4329 fp
->status_blk_mapping
= 0;
4332 if (!CHIP_IS_E1x(bp
))
4333 BNX2X_PCI_FREE(sb
->e2_sb
,
4334 bnx2x_fp(bp
, fp_index
,
4335 status_blk_mapping
),
4336 sizeof(struct host_hc_status_block_e2
));
4338 BNX2X_PCI_FREE(sb
->e1x_sb
,
4339 bnx2x_fp(bp
, fp_index
,
4340 status_blk_mapping
),
4341 sizeof(struct host_hc_status_block_e1x
));
4345 if (!skip_rx_queue(bp
, fp_index
)) {
4346 bnx2x_free_rx_bds(fp
);
4348 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4349 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
4350 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
4351 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
4352 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4354 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
4355 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
4356 sizeof(struct eth_fast_path_rx_cqe
) *
4360 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
4361 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
4362 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
4363 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4367 if (!skip_tx_queue(bp
, fp_index
)) {
4368 /* fastpath tx rings: tx_buf tx_desc */
4369 for_each_cos_in_tx_queue(fp
, cos
) {
4370 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4372 DP(NETIF_MSG_IFDOWN
,
4373 "freeing tx memory of fp %d cos %d cid %d\n",
4374 fp_index
, cos
, txdata
->cid
);
4376 BNX2X_FREE(txdata
->tx_buf_ring
);
4377 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
4378 txdata
->tx_desc_mapping
,
4379 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4382 /* end of fastpath */
4385 static void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
)
4388 for_each_cnic_queue(bp
, i
)
4389 bnx2x_free_fp_mem_at(bp
, i
);
4392 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
4395 for_each_eth_queue(bp
, i
)
4396 bnx2x_free_fp_mem_at(bp
, i
);
4399 static void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
4401 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
4402 if (!CHIP_IS_E1x(bp
)) {
4403 bnx2x_fp(bp
, index
, sb_index_values
) =
4404 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
4405 bnx2x_fp(bp
, index
, sb_running_index
) =
4406 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
4408 bnx2x_fp(bp
, index
, sb_index_values
) =
4409 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
4410 bnx2x_fp(bp
, index
, sb_running_index
) =
4411 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
4415 /* Returns the number of actually allocated BDs */
4416 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
4419 struct bnx2x
*bp
= fp
->bp
;
4420 u16 ring_prod
, cqe_ring_prod
;
4421 int i
, failure_cnt
= 0;
4423 fp
->rx_comp_cons
= 0;
4424 cqe_ring_prod
= ring_prod
= 0;
4426 /* This routine is called only during fo init so
4427 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4429 for (i
= 0; i
< rx_ring_size
; i
++) {
4430 if (bnx2x_alloc_rx_data(bp
, fp
, ring_prod
, GFP_KERNEL
) < 0) {
4434 ring_prod
= NEXT_RX_IDX(ring_prod
);
4435 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4436 WARN_ON(ring_prod
<= (i
- failure_cnt
));
4440 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4441 i
- failure_cnt
, fp
->index
);
4443 fp
->rx_bd_prod
= ring_prod
;
4444 /* Limit the CQE producer by the CQE ring size */
4445 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
4447 fp
->rx_pkt
= fp
->rx_calls
= 0;
4449 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
+= failure_cnt
;
4451 return i
- failure_cnt
;
4454 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
4458 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4459 struct eth_rx_cqe_next_page
*nextpg
;
4461 nextpg
= (struct eth_rx_cqe_next_page
*)
4462 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4464 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4465 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4467 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4468 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4472 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
4474 union host_hc_status_block
*sb
;
4475 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4478 int rx_ring_size
= 0;
4480 if (!bp
->rx_ring_size
&& IS_MF_STORAGE_ONLY(bp
)) {
4481 rx_ring_size
= MIN_RX_SIZE_NONTPA
;
4482 bp
->rx_ring_size
= rx_ring_size
;
4483 } else if (!bp
->rx_ring_size
) {
4484 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
4486 if (CHIP_IS_E3(bp
)) {
4487 u32 cfg
= SHMEM_RD(bp
,
4488 dev_info
.port_hw_config
[BP_PORT(bp
)].
4491 /* Decrease ring size for 1G functions */
4492 if ((cfg
& PORT_HW_CFG_NET_SERDES_IF_MASK
) ==
4493 PORT_HW_CFG_NET_SERDES_IF_SGMII
)
4497 /* allocate at least number of buffers required by FW */
4498 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
4499 MIN_RX_SIZE_TPA
, rx_ring_size
);
4501 bp
->rx_ring_size
= rx_ring_size
;
4502 } else /* if rx_ring_size specified - use it */
4503 rx_ring_size
= bp
->rx_ring_size
;
4505 DP(BNX2X_MSG_SP
, "calculated rx_ring_size %d\n", rx_ring_size
);
4508 sb
= &bnx2x_fp(bp
, index
, status_blk
);
4510 if (!IS_FCOE_IDX(index
)) {
4512 if (!CHIP_IS_E1x(bp
)) {
4513 sb
->e2_sb
= BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, status_blk_mapping
),
4514 sizeof(struct host_hc_status_block_e2
));
4518 sb
->e1x_sb
= BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, status_blk_mapping
),
4519 sizeof(struct host_hc_status_block_e1x
));
4525 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4526 * set shortcuts for it.
4528 if (!IS_FCOE_IDX(index
))
4529 set_sb_shortcuts(bp
, index
);
4532 if (!skip_tx_queue(bp
, index
)) {
4533 /* fastpath tx rings: tx_buf tx_desc */
4534 for_each_cos_in_tx_queue(fp
, cos
) {
4535 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4538 "allocating tx memory of fp %d cos %d\n",
4541 txdata
->tx_buf_ring
= kcalloc(NUM_TX_BD
,
4542 sizeof(struct sw_tx_bd
),
4544 if (!txdata
->tx_buf_ring
)
4546 txdata
->tx_desc_ring
= BNX2X_PCI_ALLOC(&txdata
->tx_desc_mapping
,
4547 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4548 if (!txdata
->tx_desc_ring
)
4554 if (!skip_rx_queue(bp
, index
)) {
4555 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4556 bnx2x_fp(bp
, index
, rx_buf_ring
) =
4557 kcalloc(NUM_RX_BD
, sizeof(struct sw_rx_bd
), GFP_KERNEL
);
4558 if (!bnx2x_fp(bp
, index
, rx_buf_ring
))
4560 bnx2x_fp(bp
, index
, rx_desc_ring
) =
4561 BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, rx_desc_mapping
),
4562 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4563 if (!bnx2x_fp(bp
, index
, rx_desc_ring
))
4566 /* Seed all CQEs by 1s */
4567 bnx2x_fp(bp
, index
, rx_comp_ring
) =
4568 BNX2X_PCI_FALLOC(&bnx2x_fp(bp
, index
, rx_comp_mapping
),
4569 sizeof(struct eth_fast_path_rx_cqe
) * NUM_RCQ_BD
);
4570 if (!bnx2x_fp(bp
, index
, rx_comp_ring
))
4574 bnx2x_fp(bp
, index
, rx_page_ring
) =
4575 kcalloc(NUM_RX_SGE
, sizeof(struct sw_rx_page
),
4577 if (!bnx2x_fp(bp
, index
, rx_page_ring
))
4579 bnx2x_fp(bp
, index
, rx_sge_ring
) =
4580 BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, rx_sge_mapping
),
4581 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4582 if (!bnx2x_fp(bp
, index
, rx_sge_ring
))
4585 bnx2x_set_next_page_rx_bd(fp
);
4588 bnx2x_set_next_page_rx_cq(fp
);
4591 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
4592 if (ring_size
< rx_ring_size
)
4598 /* handles low memory cases */
4600 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4602 /* FW will drop all packets if queue is not big enough,
4603 * In these cases we disable the queue
4604 * Min size is different for OOO, TPA and non-TPA queues
4606 if (ring_size
< (fp
->mode
== TPA_MODE_DISABLED
?
4607 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
4608 /* release memory allocated for this queue */
4609 bnx2x_free_fp_mem_at(bp
, index
);
4615 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
)
4619 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX(bp
)))
4620 /* we will fail load process instead of mark
4628 static int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
4632 /* 1. Allocate FP for leading - fatal if error
4633 * 2. Allocate RSS - fix number of queues if error
4637 if (bnx2x_alloc_fp_mem_at(bp
, 0))
4641 for_each_nondefault_eth_queue(bp
, i
)
4642 if (bnx2x_alloc_fp_mem_at(bp
, i
))
4645 /* handle memory failures */
4646 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
4647 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
4650 bnx2x_shrink_eth_fp(bp
, delta
);
4651 if (CNIC_SUPPORT(bp
))
4652 /* move non eth FPs next to last eth FP
4653 * must be done in that order
4654 * FCOE_IDX < FWD_IDX < OOO_IDX
4657 /* move FCoE fp even NO_FCOE_FLAG is on */
4658 bnx2x_move_fp(bp
, FCOE_IDX(bp
), FCOE_IDX(bp
) - delta
);
4659 bp
->num_ethernet_queues
-= delta
;
4660 bp
->num_queues
= bp
->num_ethernet_queues
+
4661 bp
->num_cnic_queues
;
4662 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4663 bp
->num_queues
+ delta
, bp
->num_queues
);
4669 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
4673 for (i
= 0; i
< bp
->fp_array_size
; i
++)
4674 kfree(bp
->fp
[i
].tpa_info
);
4677 kfree(bp
->fp_stats
);
4678 kfree(bp
->bnx2x_txq
);
4679 kfree(bp
->msix_table
);
4683 int bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
4685 struct bnx2x_fastpath
*fp
;
4686 struct msix_entry
*tbl
;
4687 struct bnx2x_ilt
*ilt
;
4688 int msix_table_size
= 0;
4689 int fp_array_size
, txq_array_size
;
4693 * The biggest MSI-X table we might need is as a maximum number of fast
4694 * path IGU SBs plus default SB (for PF only).
4696 msix_table_size
= bp
->igu_sb_cnt
;
4699 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size
);
4701 /* fp array: RSS plus CNIC related L2 queues */
4702 fp_array_size
= BNX2X_MAX_RSS_COUNT(bp
) + CNIC_SUPPORT(bp
);
4703 bp
->fp_array_size
= fp_array_size
;
4704 BNX2X_DEV_INFO("fp_array_size %d\n", bp
->fp_array_size
);
4706 fp
= kcalloc(bp
->fp_array_size
, sizeof(*fp
), GFP_KERNEL
);
4709 for (i
= 0; i
< bp
->fp_array_size
; i
++) {
4711 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2
,
4712 sizeof(struct bnx2x_agg_info
), GFP_KERNEL
);
4713 if (!(fp
[i
].tpa_info
))
4719 /* allocate sp objs */
4720 bp
->sp_objs
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_sp_objs
),
4725 /* allocate fp_stats */
4726 bp
->fp_stats
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_fp_stats
),
4731 /* Allocate memory for the transmission queues array */
4733 BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+ CNIC_SUPPORT(bp
);
4734 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size
);
4736 bp
->bnx2x_txq
= kcalloc(txq_array_size
, sizeof(struct bnx2x_fp_txdata
),
4742 tbl
= kcalloc(msix_table_size
, sizeof(*tbl
), GFP_KERNEL
);
4745 bp
->msix_table
= tbl
;
4748 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
4755 bnx2x_free_mem_bp(bp
);
4759 int bnx2x_reload_if_running(struct net_device
*dev
)
4761 struct bnx2x
*bp
= netdev_priv(dev
);
4763 if (unlikely(!netif_running(dev
)))
4766 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
, true);
4767 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
4770 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
4772 u32 sel_phy_idx
= 0;
4773 if (bp
->link_params
.num_phys
<= 1)
4776 if (bp
->link_vars
.link_up
) {
4777 sel_phy_idx
= EXT_PHY1
;
4778 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4779 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
4780 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
4781 sel_phy_idx
= EXT_PHY2
;
4784 switch (bnx2x_phy_selection(&bp
->link_params
)) {
4785 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
4786 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
4787 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
4788 sel_phy_idx
= EXT_PHY1
;
4790 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
4791 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
4792 sel_phy_idx
= EXT_PHY2
;
4799 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
4801 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
4803 * The selected activated PHY is always after swapping (in case PHY
4804 * swapping is enabled). So when swapping is enabled, we need to reverse
4808 if (bp
->link_params
.multi_phy_config
&
4809 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
4810 if (sel_phy_idx
== EXT_PHY1
)
4811 sel_phy_idx
= EXT_PHY2
;
4812 else if (sel_phy_idx
== EXT_PHY2
)
4813 sel_phy_idx
= EXT_PHY1
;
4815 return LINK_CONFIG_IDX(sel_phy_idx
);
4818 #ifdef NETDEV_FCOE_WWNN
4819 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
4821 struct bnx2x
*bp
= netdev_priv(dev
);
4822 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
4825 case NETDEV_FCOE_WWNN
:
4826 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
4827 cp
->fcoe_wwn_node_name_lo
);
4829 case NETDEV_FCOE_WWPN
:
4830 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
4831 cp
->fcoe_wwn_port_name_lo
);
4834 BNX2X_ERR("Wrong WWN type requested - %d\n", type
);
4842 /* called with rtnl_lock */
4843 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
4845 struct bnx2x
*bp
= netdev_priv(dev
);
4847 if (pci_num_vf(bp
->pdev
)) {
4848 DP(BNX2X_MSG_IOV
, "VFs are enabled, can not change MTU\n");
4852 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4853 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4857 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
4858 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
)) {
4859 BNX2X_ERR("Can't support requested MTU size\n");
4863 /* This does not race with packet allocation
4864 * because the actual alloc size is
4865 * only updated as part of load
4869 if (IS_PF(bp
) && SHMEM2_HAS(bp
, curr_cfg
))
4870 SHMEM2_WR(bp
, curr_cfg
, CURR_CFG_MET_OS
);
4872 return bnx2x_reload_if_running(dev
);
4875 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
4876 netdev_features_t features
)
4878 struct bnx2x
*bp
= netdev_priv(dev
);
4880 if (pci_num_vf(bp
->pdev
)) {
4881 netdev_features_t changed
= dev
->features
^ features
;
4883 /* Revert the requested changes in features if they
4884 * would require internal reload of PF in bnx2x_set_features().
4886 if (!(features
& NETIF_F_RXCSUM
) && !bp
->disable_tpa
) {
4887 features
&= ~NETIF_F_RXCSUM
;
4888 features
|= dev
->features
& NETIF_F_RXCSUM
;
4891 if (changed
& NETIF_F_LOOPBACK
) {
4892 features
&= ~NETIF_F_LOOPBACK
;
4893 features
|= dev
->features
& NETIF_F_LOOPBACK
;
4897 /* TPA requires Rx CSUM offloading */
4898 if (!(features
& NETIF_F_RXCSUM
)) {
4899 features
&= ~NETIF_F_LRO
;
4900 features
&= ~NETIF_F_GRO
;
4906 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
4908 struct bnx2x
*bp
= netdev_priv(dev
);
4909 netdev_features_t changes
= features
^ dev
->features
;
4910 bool bnx2x_reload
= false;
4913 /* VFs or non SRIOV PFs should be able to change loopback feature */
4914 if (!pci_num_vf(bp
->pdev
)) {
4915 if (features
& NETIF_F_LOOPBACK
) {
4916 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
4917 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
4918 bnx2x_reload
= true;
4921 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
4922 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
4923 bnx2x_reload
= true;
4928 /* if GRO is changed while LRO is enabled, don't force a reload */
4929 if ((changes
& NETIF_F_GRO
) && (features
& NETIF_F_LRO
))
4930 changes
&= ~NETIF_F_GRO
;
4932 /* if GRO is changed while HW TPA is off, don't force a reload */
4933 if ((changes
& NETIF_F_GRO
) && bp
->disable_tpa
)
4934 changes
&= ~NETIF_F_GRO
;
4937 bnx2x_reload
= true;
4940 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
) {
4941 dev
->features
= features
;
4942 rc
= bnx2x_reload_if_running(dev
);
4945 /* else: bnx2x_nic_load() will be called at end of recovery */
4951 void bnx2x_tx_timeout(struct net_device
*dev
)
4953 struct bnx2x
*bp
= netdev_priv(dev
);
4955 #ifdef BNX2X_STOP_ON_ERROR
4960 /* This allows the netif to be shutdown gracefully before resetting */
4961 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_TX_TIMEOUT
, 0);
4964 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4966 struct net_device
*dev
= pci_get_drvdata(pdev
);
4970 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4973 bp
= netdev_priv(dev
);
4977 pci_save_state(pdev
);
4979 if (!netif_running(dev
)) {
4984 netif_device_detach(dev
);
4986 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
4988 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
4995 int bnx2x_resume(struct pci_dev
*pdev
)
4997 struct net_device
*dev
= pci_get_drvdata(pdev
);
5002 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
5005 bp
= netdev_priv(dev
);
5007 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
5008 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5014 pci_restore_state(pdev
);
5016 if (!netif_running(dev
)) {
5021 bnx2x_set_power_state(bp
, PCI_D0
);
5022 netif_device_attach(dev
);
5024 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
5031 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
5035 BNX2X_ERR("bad context pointer %p\n", cxt
);
5039 /* ustorm cxt validation */
5040 cxt
->ustorm_ag_context
.cdu_usage
=
5041 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
5042 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
5043 /* xcontext validation */
5044 cxt
->xstorm_ag_context
.cdu_reserved
=
5045 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
5046 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
5049 static void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
5050 u8 fw_sb_id
, u8 sb_index
,
5053 u32 addr
= BAR_CSTRORM_INTMEM
+
5054 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
5055 REG_WR8(bp
, addr
, ticks
);
5057 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5058 port
, fw_sb_id
, sb_index
, ticks
);
5061 static void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
5062 u16 fw_sb_id
, u8 sb_index
,
5065 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
5066 u32 addr
= BAR_CSTRORM_INTMEM
+
5067 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
5068 u8 flags
= REG_RD8(bp
, addr
);
5070 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
5071 flags
|= enable_flag
;
5072 REG_WR8(bp
, addr
, flags
);
5074 "port %x fw_sb_id %d sb_index %d disable %d\n",
5075 port
, fw_sb_id
, sb_index
, disable
);
5078 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
5079 u8 sb_index
, u8 disable
, u16 usec
)
5081 int port
= BP_PORT(bp
);
5082 u8 ticks
= usec
/ BNX2X_BTR
;
5084 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
5086 disable
= disable
? 1 : (usec
? 0 : 1);
5087 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);
5090 void bnx2x_schedule_sp_rtnl(struct bnx2x
*bp
, enum sp_rtnl_flag flag
,
5093 smp_mb__before_atomic();
5094 set_bit(flag
, &bp
->sp_rtnl_state
);
5095 smp_mb__after_atomic();
5096 DP((BNX2X_MSG_SP
| verbose
), "Scheduling sp_rtnl task [Flag: %d]\n",
5098 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
5100 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl
);