1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x
*bp
, u16 abs_fid
,
31 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_VF_TO_PF_OFFSET(abs_fid
),
33 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_VF_TO_PF_OFFSET(abs_fid
),
35 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_VF_TO_PF_OFFSET(abs_fid
),
37 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_VF_TO_PF_OFFSET(abs_fid
),
41 static void storm_memset_func_en(struct bnx2x
*bp
, u16 abs_fid
,
44 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(abs_fid
),
46 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(abs_fid
),
48 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(abs_fid
),
50 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(abs_fid
),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x
*bp
, u16 abs_vfid
)
59 if (bnx2x_vf(bp
, idx
, abs_vfid
) == abs_vfid
)
65 struct bnx2x_virtf
*bnx2x_vf_by_abs_fid(struct bnx2x
*bp
, u16 abs_vfid
)
67 u16 idx
= (u16
)bnx2x_vf_idx_by_abs_fid(bp
, abs_vfid
);
68 return (idx
< BNX2X_NR_VIRTFN(bp
)) ? BP_VF(bp
, idx
) : NULL
;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
72 u8 igu_sb_id
, u8 segment
, u16 index
, u8 op
,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data
= IGU_REG_COMMAND_REG_32LSB_DATA
;
78 u32 igu_addr_ctl
= IGU_REG_COMMAND_REG_CTRL
;
79 u32 func_encode
= vf
->abs_vfid
;
80 u32 addr_encode
= IGU_CMD_E2_PROD_UPD_BASE
+ igu_sb_id
;
81 struct igu_regular cmd_data
= {0};
83 cmd_data
.sb_id_and_flags
=
84 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
85 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
86 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
87 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
89 ctl
= addr_encode
<< IGU_CTRL_REG_ADDRESS_SHIFT
|
90 func_encode
<< IGU_CTRL_REG_FID_SHIFT
|
91 IGU_CTRL_CMD_TYPE_WR
<< IGU_CTRL_REG_TYPE_SHIFT
;
93 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data
.sb_id_and_flags
, igu_addr_data
);
95 REG_WR(bp
, igu_addr_data
, cmd_data
.sb_id_and_flags
);
99 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp
, igu_addr_ctl
, ctl
);
105 /* VFOP - VF slow-path operation support */
107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
109 /* VFOP operations states */
110 enum bnx2x_vfop_qctor_state
{
111 BNX2X_VFOP_QCTOR_INIT
,
112 BNX2X_VFOP_QCTOR_SETUP
,
113 BNX2X_VFOP_QCTOR_INT_EN
116 enum bnx2x_vfop_qdtor_state
{
117 BNX2X_VFOP_QDTOR_HALT
,
118 BNX2X_VFOP_QDTOR_TERMINATE
,
119 BNX2X_VFOP_QDTOR_CFCDEL
,
120 BNX2X_VFOP_QDTOR_DONE
123 enum bnx2x_vfop_vlan_mac_state
{
124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE
,
125 BNX2X_VFOP_VLAN_MAC_CLEAR
,
126 BNX2X_VFOP_VLAN_MAC_CHK_DONE
,
127 BNX2X_VFOP_MAC_CONFIG_LIST
,
128 BNX2X_VFOP_VLAN_CONFIG_LIST
,
129 BNX2X_VFOP_VLAN_CONFIG_LIST_0
132 enum bnx2x_vfop_qsetup_state
{
133 BNX2X_VFOP_QSETUP_CTOR
,
134 BNX2X_VFOP_QSETUP_VLAN0
,
135 BNX2X_VFOP_QSETUP_DONE
138 enum bnx2x_vfop_mcast_state
{
139 BNX2X_VFOP_MCAST_DEL
,
140 BNX2X_VFOP_MCAST_ADD
,
141 BNX2X_VFOP_MCAST_CHK_DONE
143 enum bnx2x_vfop_qflr_state
{
144 BNX2X_VFOP_QFLR_CLR_VLAN
,
145 BNX2X_VFOP_QFLR_CLR_MAC
,
146 BNX2X_VFOP_QFLR_TERMINATE
,
150 enum bnx2x_vfop_flr_state
{
151 BNX2X_VFOP_FLR_QUEUES
,
155 enum bnx2x_vfop_close_state
{
156 BNX2X_VFOP_CLOSE_QUEUES
,
160 enum bnx2x_vfop_rxmode_state
{
161 BNX2X_VFOP_RXMODE_CONFIG
,
162 BNX2X_VFOP_RXMODE_DONE
165 enum bnx2x_vfop_qteardown_state
{
166 BNX2X_VFOP_QTEARDOWN_RXMODE
,
167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN
,
168 BNX2X_VFOP_QTEARDOWN_CLR_MAC
,
169 BNX2X_VFOP_QTEARDOWN_QDTOR
,
170 BNX2X_VFOP_QTEARDOWN_DONE
173 enum bnx2x_vfop_rss_state
{
174 BNX2X_VFOP_RSS_CONFIG
,
178 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
180 void bnx2x_vfop_qctor_dump_tx(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
181 struct bnx2x_queue_init_params
*init_params
,
182 struct bnx2x_queue_setup_params
*setup_params
,
183 u16 q_idx
, u16 sb_idx
)
186 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
190 init_params
->tx
.sb_cq_index
,
191 init_params
->tx
.hc_rate
,
193 setup_params
->txq_params
.traffic_type
);
196 void bnx2x_vfop_qctor_dump_rx(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
197 struct bnx2x_queue_init_params
*init_params
,
198 struct bnx2x_queue_setup_params
*setup_params
,
199 u16 q_idx
, u16 sb_idx
)
201 struct bnx2x_rxq_setup_params
*rxq_params
= &setup_params
->rxq_params
;
203 DP(BNX2X_MSG_IOV
, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
204 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
208 init_params
->rx
.sb_cq_index
,
209 init_params
->rx
.hc_rate
,
210 setup_params
->gen_params
.mtu
,
212 rxq_params
->sge_buf_sz
,
213 rxq_params
->max_sges_pkt
,
214 rxq_params
->tpa_agg_sz
,
216 rxq_params
->drop_flags
,
217 rxq_params
->cache_line_log
);
220 void bnx2x_vfop_qctor_prep(struct bnx2x
*bp
,
221 struct bnx2x_virtf
*vf
,
222 struct bnx2x_vf_queue
*q
,
223 struct bnx2x_vfop_qctor_params
*p
,
224 unsigned long q_type
)
226 struct bnx2x_queue_init_params
*init_p
= &p
->qstate
.params
.init
;
227 struct bnx2x_queue_setup_params
*setup_p
= &p
->prep_qsetup
;
231 /* Enable host coalescing in the transition to INIT state */
232 if (test_bit(BNX2X_Q_FLG_HC
, &init_p
->rx
.flags
))
233 __set_bit(BNX2X_Q_FLG_HC_EN
, &init_p
->rx
.flags
);
235 if (test_bit(BNX2X_Q_FLG_HC
, &init_p
->tx
.flags
))
236 __set_bit(BNX2X_Q_FLG_HC_EN
, &init_p
->tx
.flags
);
239 init_p
->rx
.fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
240 init_p
->tx
.fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
243 init_p
->cxts
[0] = q
->cxt
;
247 /* Setup-op general parameters */
248 setup_p
->gen_params
.spcl_id
= vf
->sp_cl_id
;
249 setup_p
->gen_params
.stat_id
= vfq_stat_id(vf
, q
);
251 /* Setup-op pause params:
252 * Nothing to do, the pause thresholds are set by default to 0 which
253 * effectively turns off the feature for this queue. We don't want
254 * one queue (VF) to interfering with another queue (another VF)
256 if (vf
->cfg_flags
& VF_CFG_FW_FC
)
257 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
260 * collect statistics, zero statistics, local-switching, security,
261 * OV for Flex10, RSS and MCAST for leading
263 if (test_bit(BNX2X_Q_FLG_STATS
, &setup_p
->flags
))
264 __set_bit(BNX2X_Q_FLG_ZERO_STATS
, &setup_p
->flags
);
266 /* for VFs, enable tx switching, bd coherency, and mac address
269 __set_bit(BNX2X_Q_FLG_TX_SWITCH
, &setup_p
->flags
);
270 __set_bit(BNX2X_Q_FLG_TX_SEC
, &setup_p
->flags
);
271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF
, &setup_p
->flags
);
273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX
, &q_type
)) {
275 struct bnx2x_rxq_setup_params
*rxq_p
= &setup_p
->rxq_params
;
277 rxq_p
->cl_qzone_id
= vfq_qzone_id(vf
, q
);
278 rxq_p
->fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
279 rxq_p
->rss_engine_id
= FW_VF_HANDLE(vf
->abs_vfid
);
281 if (test_bit(BNX2X_Q_FLG_TPA
, &setup_p
->flags
))
282 rxq_p
->max_tpa_queues
= BNX2X_VF_MAX_TPA_AGG_QUEUES
;
285 /* Setup-op tx parameters */
286 if (test_bit(BNX2X_Q_TYPE_HAS_TX
, &q_type
)) {
287 setup_p
->txq_params
.tss_leading_cl_id
= vf
->leading_rss
;
288 setup_p
->txq_params
.fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
292 /* VFOP queue construction */
293 static void bnx2x_vfop_qctor(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
295 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
296 struct bnx2x_vfop_args_qctor
*args
= &vfop
->args
.qctor
;
297 struct bnx2x_queue_state_params
*q_params
= &vfop
->op_p
->qctor
.qstate
;
298 enum bnx2x_vfop_qctor_state state
= vfop
->state
;
300 bnx2x_vfop_reset_wq(vf
);
305 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
308 case BNX2X_VFOP_QCTOR_INIT
:
310 /* has this queue already been opened? */
311 if (bnx2x_get_q_logical_state(bp
, q_params
->q_obj
) ==
312 BNX2X_Q_LOGICAL_STATE_ACTIVE
) {
314 "Entered qctor but queue was already up. Aborting gracefully\n");
319 vfop
->state
= BNX2X_VFOP_QCTOR_SETUP
;
321 q_params
->cmd
= BNX2X_Q_CMD_INIT
;
322 vfop
->rc
= bnx2x_queue_state_change(bp
, q_params
);
324 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
326 case BNX2X_VFOP_QCTOR_SETUP
:
328 vfop
->state
= BNX2X_VFOP_QCTOR_INT_EN
;
330 /* copy pre-prepared setup params to the queue-state params */
331 vfop
->op_p
->qctor
.qstate
.params
.setup
=
332 vfop
->op_p
->qctor
.prep_qsetup
;
334 q_params
->cmd
= BNX2X_Q_CMD_SETUP
;
335 vfop
->rc
= bnx2x_queue_state_change(bp
, q_params
);
337 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
339 case BNX2X_VFOP_QCTOR_INT_EN
:
341 /* enable interrupts */
342 bnx2x_vf_igu_ack_sb(bp
, vf
, vf_igu_sb(vf
, args
->sb_idx
),
343 USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
346 bnx2x_vfop_default(state
);
349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
350 vf
->abs_vfid
, args
->qid
, q_params
->cmd
, vfop
->rc
);
352 bnx2x_vfop_end(bp
, vf
, vfop
);
357 static int bnx2x_vfop_qctor_cmd(struct bnx2x
*bp
,
358 struct bnx2x_virtf
*vf
,
359 struct bnx2x_vfop_cmd
*cmd
,
362 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
365 vf
->op_params
.qctor
.qstate
.q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
367 vfop
->args
.qctor
.qid
= qid
;
368 vfop
->args
.qctor
.sb_idx
= bnx2x_vfq(vf
, qid
, sb_idx
);
370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT
,
371 bnx2x_vfop_qctor
, cmd
->done
);
372 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_qctor
,
378 /* VFOP queue destruction */
379 static void bnx2x_vfop_qdtor(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
381 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
382 struct bnx2x_vfop_args_qdtor
*qdtor
= &vfop
->args
.qdtor
;
383 struct bnx2x_queue_state_params
*q_params
= &vfop
->op_p
->qctor
.qstate
;
384 enum bnx2x_vfop_qdtor_state state
= vfop
->state
;
386 bnx2x_vfop_reset_wq(vf
);
391 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
394 case BNX2X_VFOP_QDTOR_HALT
:
396 /* has this queue already been stopped? */
397 if (bnx2x_get_q_logical_state(bp
, q_params
->q_obj
) ==
398 BNX2X_Q_LOGICAL_STATE_STOPPED
) {
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
403 vfop
->state
= BNX2X_VFOP_QDTOR_DONE
;
405 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
409 vfop
->state
= BNX2X_VFOP_QDTOR_TERMINATE
;
411 q_params
->cmd
= BNX2X_Q_CMD_HALT
;
412 vfop
->rc
= bnx2x_queue_state_change(bp
, q_params
);
414 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
416 case BNX2X_VFOP_QDTOR_TERMINATE
:
418 vfop
->state
= BNX2X_VFOP_QDTOR_CFCDEL
;
420 q_params
->cmd
= BNX2X_Q_CMD_TERMINATE
;
421 vfop
->rc
= bnx2x_queue_state_change(bp
, q_params
);
423 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
425 case BNX2X_VFOP_QDTOR_CFCDEL
:
427 vfop
->state
= BNX2X_VFOP_QDTOR_DONE
;
429 q_params
->cmd
= BNX2X_Q_CMD_CFC_DEL
;
430 vfop
->rc
= bnx2x_queue_state_change(bp
, q_params
);
432 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
434 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
435 vf
->abs_vfid
, qdtor
->qid
, q_params
->cmd
, vfop
->rc
);
437 case BNX2X_VFOP_QDTOR_DONE
:
438 /* invalidate the context */
440 qdtor
->cxt
->ustorm_ag_context
.cdu_usage
= 0;
441 qdtor
->cxt
->xstorm_ag_context
.cdu_reserved
= 0;
443 bnx2x_vfop_end(bp
, vf
, vfop
);
446 bnx2x_vfop_default(state
);
452 static int bnx2x_vfop_qdtor_cmd(struct bnx2x
*bp
,
453 struct bnx2x_virtf
*vf
,
454 struct bnx2x_vfop_cmd
*cmd
,
457 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
460 struct bnx2x_queue_state_params
*qstate
=
461 &vf
->op_params
.qctor
.qstate
;
463 memset(qstate
, 0, sizeof(*qstate
));
464 qstate
->q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
466 vfop
->args
.qdtor
.qid
= qid
;
467 vfop
->args
.qdtor
.cxt
= bnx2x_vfq(vf
, qid
, cxt
);
469 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT
,
470 bnx2x_vfop_qdtor
, cmd
->done
);
471 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_qdtor
,
474 DP(BNX2X_MSG_IOV
, "VF[%d] failed to add a vfop. rc %d\n",
475 vf
->abs_vfid
, vfop
->rc
);
480 bnx2x_vf_set_igu_info(struct bnx2x
*bp
, u8 igu_sb_id
, u8 abs_vfid
)
482 struct bnx2x_virtf
*vf
= bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
484 /* the first igu entry belonging to VFs of this PF */
485 if (!BP_VFDB(bp
)->first_vf_igu_entry
)
486 BP_VFDB(bp
)->first_vf_igu_entry
= igu_sb_id
;
488 /* the first igu entry belonging to this VF */
489 if (!vf_sb_count(vf
))
490 vf
->igu_base_id
= igu_sb_id
;
495 BP_VFDB(bp
)->vf_sbs_pool
++;
498 /* VFOP MAC/VLAN helpers */
499 static inline void bnx2x_vfop_credit(struct bnx2x
*bp
,
500 struct bnx2x_vfop
*vfop
,
501 struct bnx2x_vlan_mac_obj
*obj
)
503 struct bnx2x_vfop_args_filters
*args
= &vfop
->args
.filters
;
505 /* update credit only if there is no error
506 * and a valid credit counter
508 if (!vfop
->rc
&& args
->credit
) {
509 struct list_head
*pos
;
513 read_lock
= bnx2x_vlan_mac_h_read_lock(bp
, obj
);
515 DP(BNX2X_MSG_SP
, "Failed to take vlan mac read head; continuing anyway\n");
517 list_for_each(pos
, &obj
->head
)
521 bnx2x_vlan_mac_h_read_unlock(bp
, obj
);
523 atomic_set(args
->credit
, cnt
);
527 static int bnx2x_vfop_set_user_req(struct bnx2x
*bp
,
528 struct bnx2x_vfop_filter
*pos
,
529 struct bnx2x_vlan_mac_data
*user_req
)
531 user_req
->cmd
= pos
->add
? BNX2X_VLAN_MAC_ADD
:
535 case BNX2X_VFOP_FILTER_MAC
:
536 memcpy(user_req
->u
.mac
.mac
, pos
->mac
, ETH_ALEN
);
538 case BNX2X_VFOP_FILTER_VLAN
:
539 user_req
->u
.vlan
.vlan
= pos
->vid
;
542 BNX2X_ERR("Invalid filter type, skipping\n");
548 static int bnx2x_vfop_config_list(struct bnx2x
*bp
,
549 struct bnx2x_vfop_filters
*filters
,
550 struct bnx2x_vlan_mac_ramrod_params
*vlan_mac
)
552 struct bnx2x_vfop_filter
*pos
, *tmp
;
553 struct list_head rollback_list
, *filters_list
= &filters
->head
;
554 struct bnx2x_vlan_mac_data
*user_req
= &vlan_mac
->user_req
;
557 INIT_LIST_HEAD(&rollback_list
);
559 list_for_each_entry_safe(pos
, tmp
, filters_list
, link
) {
560 if (bnx2x_vfop_set_user_req(bp
, pos
, user_req
))
563 rc
= bnx2x_config_vlan_mac(bp
, vlan_mac
);
565 cnt
+= pos
->add
? 1 : -1;
566 list_move(&pos
->link
, &rollback_list
);
568 } else if (rc
== -EEXIST
) {
571 BNX2X_ERR("Failed to add a new vlan_mac command\n");
576 /* rollback if error or too many rules added */
577 if (rc
|| cnt
> filters
->add_cnt
) {
578 BNX2X_ERR("error or too many rules added. Performing rollback\n");
579 list_for_each_entry_safe(pos
, tmp
, &rollback_list
, link
) {
580 pos
->add
= !pos
->add
; /* reverse op */
581 bnx2x_vfop_set_user_req(bp
, pos
, user_req
);
582 bnx2x_config_vlan_mac(bp
, vlan_mac
);
583 list_del(&pos
->link
);
589 filters
->add_cnt
= cnt
;
593 /* VFOP set VLAN/MAC */
594 static void bnx2x_vfop_vlan_mac(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
596 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
597 struct bnx2x_vlan_mac_ramrod_params
*vlan_mac
= &vfop
->op_p
->vlan_mac
;
598 struct bnx2x_vlan_mac_obj
*obj
= vlan_mac
->vlan_mac_obj
;
599 struct bnx2x_vfop_filters
*filters
= vfop
->args
.filters
.multi_filter
;
601 enum bnx2x_vfop_vlan_mac_state state
= vfop
->state
;
606 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
608 bnx2x_vfop_reset_wq(vf
);
611 case BNX2X_VFOP_VLAN_MAC_CLEAR
:
613 vfop
->state
= BNX2X_VFOP_VLAN_MAC_CHK_DONE
;
616 vfop
->rc
= obj
->delete_all(bp
, obj
,
617 &vlan_mac
->user_req
.vlan_mac_flags
,
618 &vlan_mac
->ramrod_flags
);
620 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
622 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE
:
624 vfop
->state
= BNX2X_VFOP_VLAN_MAC_CHK_DONE
;
627 vfop
->rc
= bnx2x_config_vlan_mac(bp
, vlan_mac
);
628 if (vfop
->rc
== -EEXIST
)
631 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
633 case BNX2X_VFOP_VLAN_MAC_CHK_DONE
:
634 vfop
->rc
= !!obj
->raw
.check_pending(&obj
->raw
);
635 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
637 case BNX2X_VFOP_MAC_CONFIG_LIST
:
639 vfop
->state
= BNX2X_VFOP_VLAN_MAC_CHK_DONE
;
642 vfop
->rc
= bnx2x_vfop_config_list(bp
, filters
, vlan_mac
);
646 set_bit(RAMROD_CONT
, &vlan_mac
->ramrod_flags
);
647 vfop
->rc
= bnx2x_config_vlan_mac(bp
, vlan_mac
);
648 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
650 case BNX2X_VFOP_VLAN_CONFIG_LIST
:
652 vfop
->state
= BNX2X_VFOP_VLAN_MAC_CHK_DONE
;
655 vfop
->rc
= bnx2x_vfop_config_list(bp
, filters
, vlan_mac
);
657 set_bit(RAMROD_CONT
, &vlan_mac
->ramrod_flags
);
658 vfop
->rc
= bnx2x_config_vlan_mac(bp
, vlan_mac
);
660 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
663 bnx2x_vfop_default(state
);
666 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop
->rc
);
669 bnx2x_vfop_credit(bp
, vfop
, obj
);
670 bnx2x_vfop_end(bp
, vf
, vfop
);
675 struct bnx2x_vfop_vlan_mac_flags
{
683 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params
*ramrod
,
684 struct bnx2x_vfop_vlan_mac_flags
*flags
)
686 struct bnx2x_vlan_mac_data
*ureq
= &ramrod
->user_req
;
688 memset(ramrod
, 0, sizeof(*ramrod
));
692 set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod
->ramrod_flags
);
693 if (flags
->single_cmd
)
694 set_bit(RAMROD_EXEC
, &ramrod
->ramrod_flags
);
697 if (flags
->dont_consume
)
698 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
, &ureq
->vlan_mac_flags
);
701 ureq
->cmd
= flags
->add
? BNX2X_VLAN_MAC_ADD
: BNX2X_VLAN_MAC_DEL
;
705 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params
*ramrod
,
706 struct bnx2x_vfop_vlan_mac_flags
*flags
)
708 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod
, flags
);
709 set_bit(BNX2X_ETH_MAC
, &ramrod
->user_req
.vlan_mac_flags
);
712 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x
*bp
,
713 struct bnx2x_virtf
*vf
,
714 struct bnx2x_vfop_cmd
*cmd
,
715 int qid
, bool drv_only
)
717 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
721 struct bnx2x_vfop_args_filters filters
= {
722 .multi_filter
= NULL
, /* single */
723 .credit
= NULL
, /* consume credit */
725 struct bnx2x_vfop_vlan_mac_flags flags
= {
726 .drv_only
= drv_only
,
727 .dont_consume
= (filters
.credit
!= NULL
),
729 .add
= false /* don't care */,
731 struct bnx2x_vlan_mac_ramrod_params
*ramrod
=
732 &vf
->op_params
.vlan_mac
;
734 /* set ramrod params */
735 bnx2x_vfop_mac_prep_ramrod(ramrod
, &flags
);
738 rc
= validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, mac_obj
));
741 ramrod
->vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, mac_obj
);
744 vfop
->args
.filters
= filters
;
746 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR
,
747 bnx2x_vfop_vlan_mac
, cmd
->done
);
748 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_vlan_mac
,
754 int bnx2x_vfop_mac_list_cmd(struct bnx2x
*bp
,
755 struct bnx2x_virtf
*vf
,
756 struct bnx2x_vfop_cmd
*cmd
,
757 struct bnx2x_vfop_filters
*macs
,
758 int qid
, bool drv_only
)
760 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
764 struct bnx2x_vfop_args_filters filters
= {
765 .multi_filter
= macs
,
766 .credit
= NULL
, /* consume credit */
768 struct bnx2x_vfop_vlan_mac_flags flags
= {
769 .drv_only
= drv_only
,
770 .dont_consume
= (filters
.credit
!= NULL
),
772 .add
= false, /* don't care since only the items in the
773 * filters list affect the sp operation,
774 * not the list itself
777 struct bnx2x_vlan_mac_ramrod_params
*ramrod
=
778 &vf
->op_params
.vlan_mac
;
780 /* set ramrod params */
781 bnx2x_vfop_mac_prep_ramrod(ramrod
, &flags
);
784 rc
= validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, mac_obj
));
787 ramrod
->vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, mac_obj
);
790 filters
.multi_filter
->add_cnt
= BNX2X_VFOP_FILTER_ADD_CNT_MAX
;
791 vfop
->args
.filters
= filters
;
793 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST
,
794 bnx2x_vfop_vlan_mac
, cmd
->done
);
795 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_vlan_mac
,
801 int bnx2x_vfop_vlan_set_cmd(struct bnx2x
*bp
,
802 struct bnx2x_virtf
*vf
,
803 struct bnx2x_vfop_cmd
*cmd
,
804 int qid
, u16 vid
, bool add
)
806 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
810 struct bnx2x_vfop_args_filters filters
= {
811 .multi_filter
= NULL
, /* single command */
812 .credit
= &bnx2x_vfq(vf
, qid
, vlan_count
),
814 struct bnx2x_vfop_vlan_mac_flags flags
= {
816 .dont_consume
= (filters
.credit
!= NULL
),
820 struct bnx2x_vlan_mac_ramrod_params
*ramrod
=
821 &vf
->op_params
.vlan_mac
;
823 /* set ramrod params */
824 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod
, &flags
);
825 ramrod
->user_req
.u
.vlan
.vlan
= vid
;
828 rc
= validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, vlan_obj
));
831 ramrod
->vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, vlan_obj
);
834 vfop
->args
.filters
= filters
;
836 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE
,
837 bnx2x_vfop_vlan_mac
, cmd
->done
);
838 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_vlan_mac
,
844 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x
*bp
,
845 struct bnx2x_virtf
*vf
,
846 struct bnx2x_vfop_cmd
*cmd
,
847 int qid
, bool drv_only
)
849 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
853 struct bnx2x_vfop_args_filters filters
= {
854 .multi_filter
= NULL
, /* single command */
855 .credit
= &bnx2x_vfq(vf
, qid
, vlan_count
),
857 struct bnx2x_vfop_vlan_mac_flags flags
= {
858 .drv_only
= drv_only
,
859 .dont_consume
= (filters
.credit
!= NULL
),
861 .add
= false, /* don't care */
863 struct bnx2x_vlan_mac_ramrod_params
*ramrod
=
864 &vf
->op_params
.vlan_mac
;
866 /* set ramrod params */
867 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod
, &flags
);
870 rc
= validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, vlan_obj
));
873 ramrod
->vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, vlan_obj
);
876 vfop
->args
.filters
= filters
;
878 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR
,
879 bnx2x_vfop_vlan_mac
, cmd
->done
);
880 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_vlan_mac
,
886 int bnx2x_vfop_vlan_list_cmd(struct bnx2x
*bp
,
887 struct bnx2x_virtf
*vf
,
888 struct bnx2x_vfop_cmd
*cmd
,
889 struct bnx2x_vfop_filters
*vlans
,
890 int qid
, bool drv_only
)
892 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
896 struct bnx2x_vfop_args_filters filters
= {
897 .multi_filter
= vlans
,
898 .credit
= &bnx2x_vfq(vf
, qid
, vlan_count
),
900 struct bnx2x_vfop_vlan_mac_flags flags
= {
901 .drv_only
= drv_only
,
902 .dont_consume
= (filters
.credit
!= NULL
),
904 .add
= false, /* don't care */
906 struct bnx2x_vlan_mac_ramrod_params
*ramrod
=
907 &vf
->op_params
.vlan_mac
;
909 /* set ramrod params */
910 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod
, &flags
);
913 rc
= validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, vlan_obj
));
916 ramrod
->vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, vlan_obj
);
919 filters
.multi_filter
->add_cnt
= vf_vlan_rules_cnt(vf
) -
920 atomic_read(filters
.credit
);
922 vfop
->args
.filters
= filters
;
924 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST
,
925 bnx2x_vfop_vlan_mac
, cmd
->done
);
926 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_vlan_mac
,
932 /* VFOP queue setup (queue constructor + set vlan 0) */
933 static void bnx2x_vfop_qsetup(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
935 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
936 int qid
= vfop
->args
.qctor
.qid
;
937 enum bnx2x_vfop_qsetup_state state
= vfop
->state
;
938 struct bnx2x_vfop_cmd cmd
= {
939 .done
= bnx2x_vfop_qsetup
,
946 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
949 case BNX2X_VFOP_QSETUP_CTOR
:
950 /* init the queue ctor command */
951 vfop
->state
= BNX2X_VFOP_QSETUP_VLAN0
;
952 vfop
->rc
= bnx2x_vfop_qctor_cmd(bp
, vf
, &cmd
, qid
);
957 case BNX2X_VFOP_QSETUP_VLAN0
:
958 /* skip if non-leading or FPGA/EMU*/
962 /* init the queue set-vlan command (for vlan 0) */
963 vfop
->state
= BNX2X_VFOP_QSETUP_DONE
;
964 vfop
->rc
= bnx2x_vfop_vlan_set_cmd(bp
, vf
, &cmd
, qid
, 0, true);
969 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf
->abs_vfid
, qid
, vfop
->rc
);
971 case BNX2X_VFOP_QSETUP_DONE
:
972 vf
->cfg_flags
|= VF_CFG_VLAN
;
973 smp_mb__before_clear_bit();
974 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN
,
976 smp_mb__after_clear_bit();
977 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
978 bnx2x_vfop_end(bp
, vf
, vfop
);
981 bnx2x_vfop_default(state
);
985 int bnx2x_vfop_qsetup_cmd(struct bnx2x
*bp
,
986 struct bnx2x_virtf
*vf
,
987 struct bnx2x_vfop_cmd
*cmd
,
990 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
993 vfop
->args
.qctor
.qid
= qid
;
995 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR
,
996 bnx2x_vfop_qsetup
, cmd
->done
);
997 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_qsetup
,
1003 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1004 static void bnx2x_vfop_qflr(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1006 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
1007 int qid
= vfop
->args
.qx
.qid
;
1008 enum bnx2x_vfop_qflr_state state
= vfop
->state
;
1009 struct bnx2x_queue_state_params
*qstate
;
1010 struct bnx2x_vfop_cmd cmd
;
1012 bnx2x_vfop_reset_wq(vf
);
1017 DP(BNX2X_MSG_IOV
, "VF[%d] STATE: %d\n", vf
->abs_vfid
, state
);
1019 cmd
.done
= bnx2x_vfop_qflr
;
1023 case BNX2X_VFOP_QFLR_CLR_VLAN
:
1024 /* vlan-clear-all: driver-only, don't consume credit */
1025 vfop
->state
= BNX2X_VFOP_QFLR_CLR_MAC
;
1026 if (!validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, vlan_obj
)))
1027 vfop
->rc
= bnx2x_vfop_vlan_delall_cmd(bp
, vf
, &cmd
, qid
,
1031 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
1033 case BNX2X_VFOP_QFLR_CLR_MAC
:
1034 /* mac-clear-all: driver only consume credit */
1035 vfop
->state
= BNX2X_VFOP_QFLR_TERMINATE
;
1036 if (!validate_vlan_mac(bp
, &bnx2x_vfq(vf
, qid
, mac_obj
)))
1037 vfop
->rc
= bnx2x_vfop_mac_delall_cmd(bp
, vf
, &cmd
, qid
,
1040 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1041 vf
->abs_vfid
, vfop
->rc
);
1044 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
1046 case BNX2X_VFOP_QFLR_TERMINATE
:
1047 qstate
= &vfop
->op_p
->qctor
.qstate
;
1048 memset(qstate
, 0, sizeof(*qstate
));
1049 qstate
->q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
1050 vfop
->state
= BNX2X_VFOP_QFLR_DONE
;
1052 DP(BNX2X_MSG_IOV
, "VF[%d] qstate during flr was %d\n",
1053 vf
->abs_vfid
, qstate
->q_obj
->state
);
1055 if (qstate
->q_obj
->state
!= BNX2X_Q_STATE_RESET
) {
1056 qstate
->q_obj
->state
= BNX2X_Q_STATE_STOPPED
;
1057 qstate
->cmd
= BNX2X_Q_CMD_TERMINATE
;
1058 vfop
->rc
= bnx2x_queue_state_change(bp
, qstate
);
1059 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_VERIFY_PEND
);
1065 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1066 vf
->abs_vfid
, qid
, vfop
->rc
);
1068 case BNX2X_VFOP_QFLR_DONE
:
1069 bnx2x_vfop_end(bp
, vf
, vfop
);
1072 bnx2x_vfop_default(state
);
1078 static int bnx2x_vfop_qflr_cmd(struct bnx2x
*bp
,
1079 struct bnx2x_virtf
*vf
,
1080 struct bnx2x_vfop_cmd
*cmd
,
1083 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
1086 vfop
->args
.qx
.qid
= qid
;
1087 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN
,
1088 bnx2x_vfop_qflr
, cmd
->done
);
1089 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_qflr
,
1095 /* VFOP multi-casts */
1096 static void bnx2x_vfop_mcast(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1098 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
1099 struct bnx2x_mcast_ramrod_params
*mcast
= &vfop
->op_p
->mcast
;
1100 struct bnx2x_raw_obj
*raw
= &mcast
->mcast_obj
->raw
;
1101 struct bnx2x_vfop_args_mcast
*args
= &vfop
->args
.mc_list
;
1102 enum bnx2x_vfop_mcast_state state
= vfop
->state
;
1105 bnx2x_vfop_reset_wq(vf
);
1110 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
1113 case BNX2X_VFOP_MCAST_DEL
:
1114 /* clear existing mcasts */
1115 vfop
->state
= BNX2X_VFOP_MCAST_ADD
;
1116 vfop
->rc
= bnx2x_config_mcast(bp
, mcast
, BNX2X_MCAST_CMD_DEL
);
1117 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_CONT
);
1119 case BNX2X_VFOP_MCAST_ADD
:
1120 if (raw
->check_pending(raw
))
1124 /* update mcast list on the ramrod params */
1125 INIT_LIST_HEAD(&mcast
->mcast_list
);
1126 for (i
= 0; i
< args
->mc_num
; i
++)
1127 list_add_tail(&(args
->mc
[i
].link
),
1128 &mcast
->mcast_list
);
1129 /* add new mcasts */
1130 vfop
->state
= BNX2X_VFOP_MCAST_CHK_DONE
;
1131 vfop
->rc
= bnx2x_config_mcast(bp
, mcast
,
1132 BNX2X_MCAST_CMD_ADD
);
1134 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
1136 case BNX2X_VFOP_MCAST_CHK_DONE
:
1137 vfop
->rc
= raw
->check_pending(raw
) ? 1 : 0;
1138 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
1140 bnx2x_vfop_default(state
);
1143 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop
->rc
);
1146 bnx2x_vfop_end(bp
, vf
, vfop
);
1151 int bnx2x_vfop_mcast_cmd(struct bnx2x
*bp
,
1152 struct bnx2x_virtf
*vf
,
1153 struct bnx2x_vfop_cmd
*cmd
,
1154 bnx2x_mac_addr_t
*mcasts
,
1155 int mcast_num
, bool drv_only
)
1157 struct bnx2x_vfop
*vfop
= NULL
;
1158 size_t mc_sz
= mcast_num
* sizeof(struct bnx2x_mcast_list_elem
);
1159 struct bnx2x_mcast_list_elem
*mc
= mc_sz
? kzalloc(mc_sz
, GFP_KERNEL
) :
1163 vfop
= bnx2x_vfop_add(bp
, vf
);
1166 struct bnx2x_mcast_ramrod_params
*ramrod
=
1167 &vf
->op_params
.mcast
;
1169 /* set ramrod params */
1170 memset(ramrod
, 0, sizeof(*ramrod
));
1171 ramrod
->mcast_obj
= &vf
->mcast_obj
;
1173 set_bit(RAMROD_DRV_CLR_ONLY
,
1174 &ramrod
->ramrod_flags
);
1176 /* copy mcasts pointers */
1177 vfop
->args
.mc_list
.mc_num
= mcast_num
;
1178 vfop
->args
.mc_list
.mc
= mc
;
1179 for (i
= 0; i
< mcast_num
; i
++)
1180 mc
[i
].mac
= mcasts
[i
];
1182 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL
,
1183 bnx2x_vfop_mcast
, cmd
->done
);
1184 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_mcast
,
1194 static void bnx2x_vfop_rxmode(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1196 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
1197 struct bnx2x_rx_mode_ramrod_params
*ramrod
= &vfop
->op_p
->rx_mode
;
1198 enum bnx2x_vfop_rxmode_state state
= vfop
->state
;
1200 bnx2x_vfop_reset_wq(vf
);
1205 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
1208 case BNX2X_VFOP_RXMODE_CONFIG
:
1210 vfop
->state
= BNX2X_VFOP_RXMODE_DONE
;
1212 vfop
->rc
= bnx2x_config_rx_mode(bp
, ramrod
);
1213 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
1215 BNX2X_ERR("RXMODE error: rc %d\n", vfop
->rc
);
1217 case BNX2X_VFOP_RXMODE_DONE
:
1218 bnx2x_vfop_end(bp
, vf
, vfop
);
1221 bnx2x_vfop_default(state
);
1227 int bnx2x_vfop_rxmode_cmd(struct bnx2x
*bp
,
1228 struct bnx2x_virtf
*vf
,
1229 struct bnx2x_vfop_cmd
*cmd
,
1230 int qid
, unsigned long accept_flags
)
1232 struct bnx2x_vf_queue
*vfq
= vfq_get(vf
, qid
);
1233 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
1236 struct bnx2x_rx_mode_ramrod_params
*ramrod
=
1237 &vf
->op_params
.rx_mode
;
1239 memset(ramrod
, 0, sizeof(*ramrod
));
1241 /* Prepare ramrod parameters */
1242 ramrod
->cid
= vfq
->cid
;
1243 ramrod
->cl_id
= vfq_cl_id(vf
, vfq
);
1244 ramrod
->rx_mode_obj
= &bp
->rx_mode_obj
;
1245 ramrod
->func_id
= FW_VF_HANDLE(vf
->abs_vfid
);
1247 ramrod
->rx_accept_flags
= accept_flags
;
1248 ramrod
->tx_accept_flags
= accept_flags
;
1249 ramrod
->pstate
= &vf
->filter_state
;
1250 ramrod
->state
= BNX2X_FILTER_RX_MODE_PENDING
;
1252 set_bit(BNX2X_FILTER_RX_MODE_PENDING
, &vf
->filter_state
);
1253 set_bit(RAMROD_RX
, &ramrod
->ramrod_flags
);
1254 set_bit(RAMROD_TX
, &ramrod
->ramrod_flags
);
1257 bnx2x_vf_sp(bp
, vf
, rx_mode_rdata
.e2
);
1258 ramrod
->rdata_mapping
=
1259 bnx2x_vf_sp_map(bp
, vf
, rx_mode_rdata
.e2
);
1261 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG
,
1262 bnx2x_vfop_rxmode
, cmd
->done
);
1263 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_rxmode
,
1269 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1272 static void bnx2x_vfop_qdown(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1274 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
1275 int qid
= vfop
->args
.qx
.qid
;
1276 enum bnx2x_vfop_qteardown_state state
= vfop
->state
;
1277 struct bnx2x_vfop_cmd cmd
;
1282 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
1284 cmd
.done
= bnx2x_vfop_qdown
;
1288 case BNX2X_VFOP_QTEARDOWN_RXMODE
:
1290 vfop
->state
= BNX2X_VFOP_QTEARDOWN_CLR_VLAN
;
1291 vfop
->rc
= bnx2x_vfop_rxmode_cmd(bp
, vf
, &cmd
, qid
, 0);
1296 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN
:
1297 /* vlan-clear-all: don't consume credit */
1298 vfop
->state
= BNX2X_VFOP_QTEARDOWN_CLR_MAC
;
1299 vfop
->rc
= bnx2x_vfop_vlan_delall_cmd(bp
, vf
, &cmd
, qid
, false);
1304 case BNX2X_VFOP_QTEARDOWN_CLR_MAC
:
1305 /* mac-clear-all: consume credit */
1306 vfop
->state
= BNX2X_VFOP_QTEARDOWN_QDTOR
;
1307 vfop
->rc
= bnx2x_vfop_mac_delall_cmd(bp
, vf
, &cmd
, qid
, false);
1312 case BNX2X_VFOP_QTEARDOWN_QDTOR
:
1313 /* run the queue destruction flow */
1314 DP(BNX2X_MSG_IOV
, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1315 vfop
->state
= BNX2X_VFOP_QTEARDOWN_DONE
;
1316 DP(BNX2X_MSG_IOV
, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1317 vfop
->rc
= bnx2x_vfop_qdtor_cmd(bp
, vf
, &cmd
, qid
);
1318 DP(BNX2X_MSG_IOV
, "returned from cmd\n");
1323 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1324 vf
->abs_vfid
, qid
, vfop
->rc
);
1326 case BNX2X_VFOP_QTEARDOWN_DONE
:
1327 bnx2x_vfop_end(bp
, vf
, vfop
);
1330 bnx2x_vfop_default(state
);
1334 int bnx2x_vfop_qdown_cmd(struct bnx2x
*bp
,
1335 struct bnx2x_virtf
*vf
,
1336 struct bnx2x_vfop_cmd
*cmd
,
1339 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
1341 /* for non leading queues skip directly to qdown sate */
1343 vfop
->args
.qx
.qid
= qid
;
1344 bnx2x_vfop_opset(qid
== LEADING_IDX
?
1345 BNX2X_VFOP_QTEARDOWN_RXMODE
:
1346 BNX2X_VFOP_QTEARDOWN_QDTOR
, bnx2x_vfop_qdown
,
1348 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_qdown
,
1355 /* VF enable primitives
1356 * when pretend is required the caller is responsible
1357 * for calling pretend prior to calling these routines
1360 /* internal vf enable - until vf is enabled internally all transactions
1361 * are blocked. This routine should always be called last with pretend.
1363 static void bnx2x_vf_enable_internal(struct bnx2x
*bp
, u8 enable
)
1365 REG_WR(bp
, PGLUE_B_REG_INTERNAL_VFID_ENABLE
, enable
? 1 : 0);
1368 /* clears vf error in all semi blocks */
1369 static void bnx2x_vf_semi_clear_err(struct bnx2x
*bp
, u8 abs_vfid
)
1371 REG_WR(bp
, TSEM_REG_VFPF_ERR_NUM
, abs_vfid
);
1372 REG_WR(bp
, USEM_REG_VFPF_ERR_NUM
, abs_vfid
);
1373 REG_WR(bp
, CSEM_REG_VFPF_ERR_NUM
, abs_vfid
);
1374 REG_WR(bp
, XSEM_REG_VFPF_ERR_NUM
, abs_vfid
);
1377 static void bnx2x_vf_pglue_clear_err(struct bnx2x
*bp
, u8 abs_vfid
)
1379 u32 was_err_group
= (2 * BP_PATH(bp
) + abs_vfid
) >> 5;
1380 u32 was_err_reg
= 0;
1382 switch (was_err_group
) {
1384 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
;
1387 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR
;
1390 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR
;
1393 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR
;
1396 REG_WR(bp
, was_err_reg
, 1 << (abs_vfid
& 0x1f));
1399 static void bnx2x_vf_igu_reset(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1404 /* Set VF masks and configuration - pretend */
1405 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
1407 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_LSB
, 0);
1408 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_MSB
, 0);
1409 REG_WR(bp
, IGU_REG_SB_MASK_LSB
, 0);
1410 REG_WR(bp
, IGU_REG_SB_MASK_MSB
, 0);
1411 REG_WR(bp
, IGU_REG_PBA_STATUS_LSB
, 0);
1412 REG_WR(bp
, IGU_REG_PBA_STATUS_MSB
, 0);
1414 val
= REG_RD(bp
, IGU_REG_VF_CONFIGURATION
);
1415 val
|= (IGU_VF_CONF_FUNC_EN
| IGU_VF_CONF_MSI_MSIX_EN
);
1416 if (vf
->cfg_flags
& VF_CFG_INT_SIMD
)
1417 val
|= IGU_VF_CONF_SINGLE_ISR_EN
;
1418 val
&= ~IGU_VF_CONF_PARENT_MASK
;
1419 val
|= BP_FUNC(bp
) << IGU_VF_CONF_PARENT_SHIFT
; /* parent PF */
1420 REG_WR(bp
, IGU_REG_VF_CONFIGURATION
, val
);
1423 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1424 vf
->abs_vfid
, REG_RD(bp
, IGU_REG_VF_CONFIGURATION
));
1426 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
1428 /* iterate over all queues, clear sb consumer */
1429 for (i
= 0; i
< vf_sb_count(vf
); i
++) {
1430 u8 igu_sb_id
= vf_igu_sb(vf
, i
);
1432 /* zero prod memory */
1433 REG_WR(bp
, IGU_REG_PROD_CONS_MEMORY
+ igu_sb_id
* 4, 0);
1435 /* clear sb state machine */
1436 bnx2x_igu_clear_sb_gen(bp
, vf
->abs_vfid
, igu_sb_id
,
1439 /* disable + update */
1440 bnx2x_vf_igu_ack_sb(bp
, vf
, igu_sb_id
, USTORM_ID
, 0,
1441 IGU_INT_DISABLE
, 1);
1445 void bnx2x_vf_enable_access(struct bnx2x
*bp
, u8 abs_vfid
)
1447 /* set the VF-PF association in the FW */
1448 storm_memset_vf_to_pf(bp
, FW_VF_HANDLE(abs_vfid
), BP_FUNC(bp
));
1449 storm_memset_func_en(bp
, FW_VF_HANDLE(abs_vfid
), 1);
1451 /* clear vf errors*/
1452 bnx2x_vf_semi_clear_err(bp
, abs_vfid
);
1453 bnx2x_vf_pglue_clear_err(bp
, abs_vfid
);
1455 /* internal vf-enable - pretend */
1456 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, abs_vfid
));
1457 DP(BNX2X_MSG_IOV
, "enabling internal access for vf %x\n", abs_vfid
);
1458 bnx2x_vf_enable_internal(bp
, true);
1459 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
1462 static void bnx2x_vf_enable_traffic(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1464 /* Reset vf in IGU interrupts are still disabled */
1465 bnx2x_vf_igu_reset(bp
, vf
);
1467 /* pretend to enable the vf with the PBF */
1468 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
1469 REG_WR(bp
, PBF_REG_DISABLE_VF
, 0);
1470 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
1473 static u8
bnx2x_vf_is_pcie_pending(struct bnx2x
*bp
, u8 abs_vfid
)
1475 struct pci_dev
*dev
;
1476 struct bnx2x_virtf
*vf
= bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
1481 dev
= pci_get_bus_and_slot(vf
->bus
, vf
->devfn
);
1483 return bnx2x_is_pcie_pending(dev
);
1487 int bnx2x_vf_flr_clnup_epilog(struct bnx2x
*bp
, u8 abs_vfid
)
1489 /* Verify no pending pci transactions */
1490 if (bnx2x_vf_is_pcie_pending(bp
, abs_vfid
))
1491 BNX2X_ERR("PCIE Transactions still pending\n");
1496 /* must be called after the number of PF queues and the number of VFs are
1500 bnx2x_iov_static_resc(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1502 struct vf_pf_resc_request
*resc
= &vf
->alloc_resc
;
1505 /* will be set only during VF-ACQUIRE */
1509 /* no credit calculations for macs (just yet) */
1510 resc
->num_mac_filters
= 1;
1512 /* divvy up vlan rules */
1513 vlan_count
= bp
->vlans_pool
.check(&bp
->vlans_pool
);
1514 vlan_count
= 1 << ilog2(vlan_count
);
1515 resc
->num_vlan_filters
= vlan_count
/ BNX2X_NR_VIRTFN(bp
);
1517 /* no real limitation */
1518 resc
->num_mc_filters
= 0;
1520 /* num_sbs already set */
1521 resc
->num_sbs
= vf
->sb_count
;
1525 static void bnx2x_vf_free_resc(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1527 /* reset the state variables */
1528 bnx2x_iov_static_resc(bp
, vf
);
1529 vf
->state
= VF_FREE
;
1532 static void bnx2x_vf_flr_clnup_hw(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1534 u32 poll_cnt
= bnx2x_flr_clnup_poll_count(bp
);
1536 /* DQ usage counter */
1537 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
1538 bnx2x_flr_clnup_poll_hw_counter(bp
, DORQ_REG_VF_USAGE_CNT
,
1539 "DQ VF usage counter timed out",
1541 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
1543 /* FW cleanup command - poll for the results */
1544 if (bnx2x_send_final_clnup(bp
, (u8
)FW_VF_HANDLE(vf
->abs_vfid
),
1546 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf
->abs_vfid
);
1548 /* verify TX hw is flushed */
1549 bnx2x_tx_hw_flushed(bp
, poll_cnt
);
1552 static void bnx2x_vfop_flr(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1554 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
1555 struct bnx2x_vfop_args_qx
*qx
= &vfop
->args
.qx
;
1556 enum bnx2x_vfop_flr_state state
= vfop
->state
;
1557 struct bnx2x_vfop_cmd cmd
= {
1558 .done
= bnx2x_vfop_flr
,
1565 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
1568 case BNX2X_VFOP_FLR_QUEUES
:
1569 /* the cleanup operations are valid if and only if the VF
1570 * was first acquired.
1572 if (++(qx
->qid
) < vf_rxq_count(vf
)) {
1573 vfop
->rc
= bnx2x_vfop_qflr_cmd(bp
, vf
, &cmd
,
1579 /* remove multicasts */
1580 vfop
->state
= BNX2X_VFOP_FLR_HW
;
1581 vfop
->rc
= bnx2x_vfop_mcast_cmd(bp
, vf
, &cmd
, NULL
,
1586 case BNX2X_VFOP_FLR_HW
:
1588 /* dispatch final cleanup and wait for HW queues to flush */
1589 bnx2x_vf_flr_clnup_hw(bp
, vf
);
1591 /* release VF resources */
1592 bnx2x_vf_free_resc(bp
, vf
);
1594 /* re-open the mailbox */
1595 bnx2x_vf_enable_mbx(bp
, vf
->abs_vfid
);
1599 bnx2x_vfop_default(state
);
1602 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf
->abs_vfid
, vfop
->rc
);
1604 vf
->flr_clnup_stage
= VF_FLR_ACK
;
1605 bnx2x_vfop_end(bp
, vf
, vfop
);
1606 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_FLR
);
1609 static int bnx2x_vfop_flr_cmd(struct bnx2x
*bp
,
1610 struct bnx2x_virtf
*vf
,
1611 vfop_handler_t done
)
1613 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
1615 vfop
->args
.qx
.qid
= -1; /* loop */
1616 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES
,
1617 bnx2x_vfop_flr
, done
);
1618 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_flr
, false);
1623 static void bnx2x_vf_flr_clnup(struct bnx2x
*bp
, struct bnx2x_virtf
*prev_vf
)
1625 int i
= prev_vf
? prev_vf
->index
+ 1 : 0;
1626 struct bnx2x_virtf
*vf
;
1628 /* find next VF to cleanup */
1631 i
< BNX2X_NR_VIRTFN(bp
) &&
1632 (bnx2x_vf(bp
, i
, state
) != VF_RESET
||
1633 bnx2x_vf(bp
, i
, flr_clnup_stage
) != VF_FLR_CLN
);
1637 DP(BNX2X_MSG_IOV
, "next vf to cleanup: %d. Num of vfs: %d\n", i
,
1638 BNX2X_NR_VIRTFN(bp
));
1640 if (i
< BNX2X_NR_VIRTFN(bp
)) {
1643 /* lock the vf pf channel */
1644 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_FLR
);
1646 /* invoke the VF FLR SM */
1647 if (bnx2x_vfop_flr_cmd(bp
, vf
, bnx2x_vf_flr_clnup
)) {
1648 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1651 /* mark the VF to be ACKED and continue */
1652 vf
->flr_clnup_stage
= VF_FLR_ACK
;
1653 goto next_vf_to_clean
;
1658 /* we are done, update vf records */
1659 for_each_vf(bp
, i
) {
1662 if (vf
->flr_clnup_stage
!= VF_FLR_ACK
)
1665 vf
->flr_clnup_stage
= VF_FLR_EPILOG
;
1668 /* Acknowledge the handled VFs.
1669 * we are acknowledge all the vfs which an flr was requested for, even
1670 * if amongst them there are such that we never opened, since the mcp
1671 * will interrupt us immediately again if we only ack some of the bits,
1672 * resulting in an endless loop. This can happen for example in KVM
1673 * where an 'all ones' flr request is sometimes given by hyper visor
1675 DP(BNX2X_MSG_MCP
, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1676 bp
->vfdb
->flrd_vfs
[0], bp
->vfdb
->flrd_vfs
[1]);
1677 for (i
= 0; i
< FLRD_VFS_DWORDS
; i
++)
1678 SHMEM2_WR(bp
, drv_ack_vf_disabled
[BP_FW_MB_IDX(bp
)][i
],
1679 bp
->vfdb
->flrd_vfs
[i
]);
1681 bnx2x_fw_command(bp
, DRV_MSG_CODE_VF_DISABLED_DONE
, 0);
1683 /* clear the acked bits - better yet if the MCP implemented
1684 * write to clear semantics
1686 for (i
= 0; i
< FLRD_VFS_DWORDS
; i
++)
1687 SHMEM2_WR(bp
, drv_ack_vf_disabled
[BP_FW_MB_IDX(bp
)][i
], 0);
1690 void bnx2x_vf_handle_flr_event(struct bnx2x
*bp
)
1694 /* Read FLR'd VFs */
1695 for (i
= 0; i
< FLRD_VFS_DWORDS
; i
++)
1696 bp
->vfdb
->flrd_vfs
[i
] = SHMEM2_RD(bp
, mcp_vf_disabled
[i
]);
1699 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1700 bp
->vfdb
->flrd_vfs
[0], bp
->vfdb
->flrd_vfs
[1]);
1702 for_each_vf(bp
, i
) {
1703 struct bnx2x_virtf
*vf
= BP_VF(bp
, i
);
1706 if (vf
->abs_vfid
< 32)
1707 reset
= bp
->vfdb
->flrd_vfs
[0] & (1 << vf
->abs_vfid
);
1709 reset
= bp
->vfdb
->flrd_vfs
[1] &
1710 (1 << (vf
->abs_vfid
- 32));
1713 /* set as reset and ready for cleanup */
1714 vf
->state
= VF_RESET
;
1715 vf
->flr_clnup_stage
= VF_FLR_CLN
;
1718 "Initiating Final cleanup for VF %d\n",
1723 /* do the FLR cleanup for all marked VFs*/
1724 bnx2x_vf_flr_clnup(bp
, NULL
);
1727 /* IOV global initialization routines */
1728 void bnx2x_iov_init_dq(struct bnx2x
*bp
)
1733 /* Set the DQ such that the CID reflect the abs_vfid */
1734 REG_WR(bp
, DORQ_REG_VF_NORM_VF_BASE
, 0);
1735 REG_WR(bp
, DORQ_REG_MAX_RVFID_SIZE
, ilog2(BNX2X_MAX_NUM_OF_VFS
));
1737 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1740 REG_WR(bp
, DORQ_REG_VF_NORM_CID_BASE
, BNX2X_FIRST_VF_CID
);
1742 /* The VF window size is the log2 of the max number of CIDs per VF */
1743 REG_WR(bp
, DORQ_REG_VF_NORM_CID_WND_SIZE
, BNX2X_VF_CID_WND
);
1745 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1746 * the Pf doorbell size although the 2 are independent.
1748 REG_WR(bp
, DORQ_REG_VF_NORM_CID_OFST
, 3);
1750 /* No security checks for now -
1751 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1752 * CID range 0 - 0x1ffff
1754 REG_WR(bp
, DORQ_REG_VF_TYPE_MASK_0
, 1);
1755 REG_WR(bp
, DORQ_REG_VF_TYPE_VALUE_0
, 0);
1756 REG_WR(bp
, DORQ_REG_VF_TYPE_MIN_MCID_0
, 0);
1757 REG_WR(bp
, DORQ_REG_VF_TYPE_MAX_MCID_0
, 0x1ffff);
1759 /* set the VF doorbell threshold */
1760 REG_WR(bp
, DORQ_REG_VF_USAGE_CT_LIMIT
, 4);
1763 void bnx2x_iov_init_dmae(struct bnx2x
*bp
)
1765 if (pci_find_ext_capability(bp
->pdev
, PCI_EXT_CAP_ID_SRIOV
))
1766 REG_WR(bp
, DMAE_REG_BACKWARD_COMP_EN
, 0);
1769 static int bnx2x_vf_bus(struct bnx2x
*bp
, int vfid
)
1771 struct pci_dev
*dev
= bp
->pdev
;
1772 struct bnx2x_sriov
*iov
= &bp
->vfdb
->sriov
;
1774 return dev
->bus
->number
+ ((dev
->devfn
+ iov
->offset
+
1775 iov
->stride
* vfid
) >> 8);
1778 static int bnx2x_vf_devfn(struct bnx2x
*bp
, int vfid
)
1780 struct pci_dev
*dev
= bp
->pdev
;
1781 struct bnx2x_sriov
*iov
= &bp
->vfdb
->sriov
;
1783 return (dev
->devfn
+ iov
->offset
+ iov
->stride
* vfid
) & 0xff;
1786 static void bnx2x_vf_set_bars(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1789 struct pci_dev
*dev
= bp
->pdev
;
1790 struct bnx2x_sriov
*iov
= &bp
->vfdb
->sriov
;
1792 for (i
= 0, n
= 0; i
< PCI_SRIOV_NUM_BARS
; i
+= 2, n
++) {
1793 u64 start
= pci_resource_start(dev
, PCI_IOV_RESOURCES
+ i
);
1794 u32 size
= pci_resource_len(dev
, PCI_IOV_RESOURCES
+ i
);
1797 vf
->bars
[n
].bar
= start
+ size
* vf
->abs_vfid
;
1798 vf
->bars
[n
].size
= size
;
1802 static int bnx2x_ari_enabled(struct pci_dev
*dev
)
1804 return dev
->bus
->self
&& dev
->bus
->self
->ari_enabled
;
1808 bnx2x_get_vf_igu_cam_info(struct bnx2x
*bp
)
1812 u8 fid
, current_pf
= 0;
1814 /* IGU in normal mode - read CAM */
1815 for (sb_id
= 0; sb_id
< IGU_REG_MAPPING_MEMORY_SIZE
; sb_id
++) {
1816 val
= REG_RD(bp
, IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
1817 if (!(val
& IGU_REG_MAPPING_MEMORY_VALID
))
1819 fid
= GET_FIELD((val
), IGU_REG_MAPPING_MEMORY_FID
);
1820 if (fid
& IGU_FID_ENCODE_IS_PF
)
1821 current_pf
= fid
& IGU_FID_PF_NUM_MASK
;
1822 else if (current_pf
== BP_FUNC(bp
))
1823 bnx2x_vf_set_igu_info(bp
, sb_id
,
1824 (fid
& IGU_FID_VF_NUM_MASK
));
1825 DP(BNX2X_MSG_IOV
, "%s[%d], igu_sb_id=%d, msix=%d\n",
1826 ((fid
& IGU_FID_ENCODE_IS_PF
) ? "PF" : "VF"),
1827 ((fid
& IGU_FID_ENCODE_IS_PF
) ? (fid
& IGU_FID_PF_NUM_MASK
) :
1828 (fid
& IGU_FID_VF_NUM_MASK
)), sb_id
,
1829 GET_FIELD((val
), IGU_REG_MAPPING_MEMORY_VECTOR
));
1831 DP(BNX2X_MSG_IOV
, "vf_sbs_pool is %d\n", BP_VFDB(bp
)->vf_sbs_pool
);
1834 static void __bnx2x_iov_free_vfdb(struct bnx2x
*bp
)
1837 kfree(bp
->vfdb
->vfqs
);
1838 kfree(bp
->vfdb
->vfs
);
1844 static int bnx2x_sriov_pci_cfg_info(struct bnx2x
*bp
, struct bnx2x_sriov
*iov
)
1847 struct pci_dev
*dev
= bp
->pdev
;
1849 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
1851 BNX2X_ERR("failed to find SRIOV capability in device\n");
1856 DP(BNX2X_MSG_IOV
, "sriov ext pos %d\n", pos
);
1857 pci_read_config_word(dev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
1858 pci_read_config_word(dev
, pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total
);
1859 pci_read_config_word(dev
, pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial
);
1860 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
1861 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
1862 pci_read_config_dword(dev
, pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
1863 pci_read_config_dword(dev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
1864 pci_read_config_byte(dev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
1869 static int bnx2x_sriov_info(struct bnx2x
*bp
, struct bnx2x_sriov
*iov
)
1873 /* read the SRIOV capability structure
1874 * The fields can be read via configuration read or
1875 * directly from the device (starting at offset PCICFG_OFFSET)
1877 if (bnx2x_sriov_pci_cfg_info(bp
, iov
))
1880 /* get the number of SRIOV bars */
1883 /* read the first_vfid */
1884 val
= REG_RD(bp
, PCICFG_OFFSET
+ GRC_CONFIG_REG_PF_INIT_VF
);
1885 iov
->first_vf_in_pf
= ((val
& GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK
)
1886 * 8) - (BNX2X_MAX_NUM_OF_VFS
* BP_PATH(bp
));
1889 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1891 iov
->first_vf_in_pf
, iov
->nres
, iov
->cap
, iov
->ctrl
, iov
->total
,
1892 iov
->initial
, iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
1897 /* must be called after PF bars are mapped */
1898 int bnx2x_iov_init_one(struct bnx2x
*bp
, int int_mode_param
,
1902 struct bnx2x_sriov
*iov
;
1903 struct pci_dev
*dev
= bp
->pdev
;
1911 /* verify sriov capability is present in configuration space */
1912 if (!pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
))
1915 /* verify chip revision */
1916 if (CHIP_IS_E1x(bp
))
1919 /* check if SRIOV support is turned off */
1923 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1924 if (BNX2X_L2_MAX_CID(bp
) >= BNX2X_FIRST_VF_CID
) {
1925 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1926 BNX2X_L2_MAX_CID(bp
), BNX2X_FIRST_VF_CID
);
1930 /* SRIOV can be enabled only with MSIX */
1931 if (int_mode_param
== BNX2X_INT_MODE_MSI
||
1932 int_mode_param
== BNX2X_INT_MODE_INTX
) {
1933 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1938 /* verify ari is enabled */
1939 if (!bnx2x_ari_enabled(bp
->pdev
)) {
1940 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1944 /* verify igu is in normal mode */
1945 if (CHIP_INT_MODE_IS_BC(bp
)) {
1946 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1950 /* allocate the vfs database */
1951 bp
->vfdb
= kzalloc(sizeof(*(bp
->vfdb
)), GFP_KERNEL
);
1953 BNX2X_ERR("failed to allocate vf database\n");
1958 /* get the sriov info - Linux already collected all the pertinent
1959 * information, however the sriov structure is for the private use
1960 * of the pci module. Also we want this information regardless
1961 * of the hyper-visor.
1963 iov
= &(bp
->vfdb
->sriov
);
1964 err
= bnx2x_sriov_info(bp
, iov
);
1968 /* SR-IOV capability was enabled but there are no VFs*/
1969 if (iov
->total
== 0)
1972 iov
->nr_virtfn
= min_t(u16
, iov
->total
, num_vfs_param
);
1974 DP(BNX2X_MSG_IOV
, "num_vfs_param was %d, nr_virtfn was %d\n",
1975 num_vfs_param
, iov
->nr_virtfn
);
1977 /* allocate the vf array */
1978 bp
->vfdb
->vfs
= kzalloc(sizeof(struct bnx2x_virtf
) *
1979 BNX2X_NR_VIRTFN(bp
), GFP_KERNEL
);
1980 if (!bp
->vfdb
->vfs
) {
1981 BNX2X_ERR("failed to allocate vf array\n");
1986 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1987 for_each_vf(bp
, i
) {
1988 bnx2x_vf(bp
, i
, index
) = i
;
1989 bnx2x_vf(bp
, i
, abs_vfid
) = iov
->first_vf_in_pf
+ i
;
1990 bnx2x_vf(bp
, i
, state
) = VF_FREE
;
1991 INIT_LIST_HEAD(&bnx2x_vf(bp
, i
, op_list_head
));
1992 mutex_init(&bnx2x_vf(bp
, i
, op_mutex
));
1993 bnx2x_vf(bp
, i
, op_current
) = CHANNEL_TLV_NONE
;
1996 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1997 bnx2x_get_vf_igu_cam_info(bp
);
1999 /* allocate the queue arrays for all VFs */
2000 bp
->vfdb
->vfqs
= kzalloc(
2001 BNX2X_MAX_NUM_VF_QUEUES
* sizeof(struct bnx2x_vf_queue
),
2004 DP(BNX2X_MSG_IOV
, "bp->vfdb->vfqs was %p\n", bp
->vfdb
->vfqs
);
2006 if (!bp
->vfdb
->vfqs
) {
2007 BNX2X_ERR("failed to allocate vf queue array\n");
2014 DP(BNX2X_MSG_IOV
, "Failed err=%d\n", err
);
2015 __bnx2x_iov_free_vfdb(bp
);
2019 void bnx2x_iov_remove_one(struct bnx2x
*bp
)
2021 /* if SRIOV is not enabled there's nothing to do */
2025 DP(BNX2X_MSG_IOV
, "about to call disable sriov\n");
2026 pci_disable_sriov(bp
->pdev
);
2027 DP(BNX2X_MSG_IOV
, "sriov disabled\n");
2029 /* free vf database */
2030 __bnx2x_iov_free_vfdb(bp
);
2033 void bnx2x_iov_free_mem(struct bnx2x
*bp
)
2040 /* free vfs hw contexts */
2041 for (i
= 0; i
< BNX2X_VF_CIDS
/ILT_PAGE_CIDS
; i
++) {
2042 struct hw_dma
*cxt
= &bp
->vfdb
->context
[i
];
2043 BNX2X_PCI_FREE(cxt
->addr
, cxt
->mapping
, cxt
->size
);
2046 BNX2X_PCI_FREE(BP_VFDB(bp
)->sp_dma
.addr
,
2047 BP_VFDB(bp
)->sp_dma
.mapping
,
2048 BP_VFDB(bp
)->sp_dma
.size
);
2050 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp
)->addr
,
2051 BP_VF_MBX_DMA(bp
)->mapping
,
2052 BP_VF_MBX_DMA(bp
)->size
);
2054 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp
)->addr
,
2055 BP_VF_BULLETIN_DMA(bp
)->mapping
,
2056 BP_VF_BULLETIN_DMA(bp
)->size
);
2059 int bnx2x_iov_alloc_mem(struct bnx2x
*bp
)
2067 /* allocate vfs hw contexts */
2068 tot_size
= (BP_VFDB(bp
)->sriov
.first_vf_in_pf
+ BNX2X_NR_VIRTFN(bp
)) *
2069 BNX2X_CIDS_PER_VF
* sizeof(union cdu_context
);
2071 for (i
= 0; i
< BNX2X_VF_CIDS
/ILT_PAGE_CIDS
; i
++) {
2072 struct hw_dma
*cxt
= BP_VF_CXT_PAGE(bp
, i
);
2073 cxt
->size
= min_t(size_t, tot_size
, CDU_ILT_PAGE_SZ
);
2076 BNX2X_PCI_ALLOC(cxt
->addr
, &cxt
->mapping
, cxt
->size
);
2081 tot_size
-= cxt
->size
;
2084 /* allocate vfs ramrods dma memory - client_init and set_mac */
2085 tot_size
= BNX2X_NR_VIRTFN(bp
) * sizeof(struct bnx2x_vf_sp
);
2086 BNX2X_PCI_ALLOC(BP_VFDB(bp
)->sp_dma
.addr
, &BP_VFDB(bp
)->sp_dma
.mapping
,
2088 BP_VFDB(bp
)->sp_dma
.size
= tot_size
;
2090 /* allocate mailboxes */
2091 tot_size
= BNX2X_NR_VIRTFN(bp
) * MBX_MSG_ALIGNED_SIZE
;
2092 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp
)->addr
, &BP_VF_MBX_DMA(bp
)->mapping
,
2094 BP_VF_MBX_DMA(bp
)->size
= tot_size
;
2096 /* allocate local bulletin boards */
2097 tot_size
= BNX2X_NR_VIRTFN(bp
) * BULLETIN_CONTENT_SIZE
;
2098 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp
)->addr
,
2099 &BP_VF_BULLETIN_DMA(bp
)->mapping
, tot_size
);
2100 BP_VF_BULLETIN_DMA(bp
)->size
= tot_size
;
2108 static void bnx2x_vfq_init(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2109 struct bnx2x_vf_queue
*q
)
2111 u8 cl_id
= vfq_cl_id(vf
, q
);
2112 u8 func_id
= FW_VF_HANDLE(vf
->abs_vfid
);
2113 unsigned long q_type
= 0;
2115 set_bit(BNX2X_Q_TYPE_HAS_TX
, &q_type
);
2116 set_bit(BNX2X_Q_TYPE_HAS_RX
, &q_type
);
2118 /* Queue State object */
2119 bnx2x_init_queue_obj(bp
, &q
->sp_obj
,
2120 cl_id
, &q
->cid
, 1, func_id
,
2121 bnx2x_vf_sp(bp
, vf
, q_data
),
2122 bnx2x_vf_sp_map(bp
, vf
, q_data
),
2126 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2127 vf
->abs_vfid
, q
->sp_obj
.func_id
, q
->cid
);
2130 /* called by bnx2x_nic_load */
2131 int bnx2x_iov_nic_init(struct bnx2x
*bp
)
2135 if (!IS_SRIOV(bp
)) {
2136 DP(BNX2X_MSG_IOV
, "vfdb was not allocated\n");
2140 DP(BNX2X_MSG_IOV
, "num of vfs: %d\n", (bp
)->vfdb
->sriov
.nr_virtfn
);
2142 /* let FLR complete ... */
2145 /* initialize vf database */
2146 for_each_vf(bp
, vfid
) {
2147 struct bnx2x_virtf
*vf
= BP_VF(bp
, vfid
);
2149 int base_vf_cid
= (BP_VFDB(bp
)->sriov
.first_vf_in_pf
+ vfid
) *
2152 union cdu_context
*base_cxt
= (union cdu_context
*)
2153 BP_VF_CXT_PAGE(bp
, base_vf_cid
/ILT_PAGE_CIDS
)->addr
+
2154 (base_vf_cid
& (ILT_PAGE_CIDS
-1));
2157 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2158 vf
->abs_vfid
, vf_sb_count(vf
), base_vf_cid
,
2159 BNX2X_FIRST_VF_CID
+ base_vf_cid
, base_cxt
);
2161 /* init statically provisioned resources */
2162 bnx2x_iov_static_resc(bp
, vf
);
2164 /* queues are initialized during VF-ACQUIRE */
2166 /* reserve the vf vlan credit */
2167 bp
->vlans_pool
.get(&bp
->vlans_pool
, vf_vlan_rules_cnt(vf
));
2169 vf
->filter_state
= 0;
2170 vf
->sp_cl_id
= bnx2x_fp(bp
, 0, cl_id
);
2172 /* init mcast object - This object will be re-initialized
2173 * during VF-ACQUIRE with the proper cl_id and cid.
2174 * It needs to be initialized here so that it can be safely
2175 * handled by a subsequent FLR flow.
2177 bnx2x_init_mcast_obj(bp
, &vf
->mcast_obj
, 0xFF,
2179 bnx2x_vf_sp(bp
, vf
, mcast_rdata
),
2180 bnx2x_vf_sp_map(bp
, vf
, mcast_rdata
),
2181 BNX2X_FILTER_MCAST_PENDING
,
2183 BNX2X_OBJ_TYPE_RX_TX
);
2185 /* set the mailbox message addresses */
2186 BP_VF_MBX(bp
, vfid
)->msg
= (struct bnx2x_vf_mbx_msg
*)
2187 (((u8
*)BP_VF_MBX_DMA(bp
)->addr
) + vfid
*
2188 MBX_MSG_ALIGNED_SIZE
);
2190 BP_VF_MBX(bp
, vfid
)->msg_mapping
= BP_VF_MBX_DMA(bp
)->mapping
+
2191 vfid
* MBX_MSG_ALIGNED_SIZE
;
2193 /* Enable vf mailbox */
2194 bnx2x_vf_enable_mbx(bp
, vf
->abs_vfid
);
2198 for_each_vf(bp
, vfid
) {
2199 struct bnx2x_virtf
*vf
= BP_VF(bp
, vfid
);
2201 /* fill in the BDF and bars */
2202 vf
->bus
= bnx2x_vf_bus(bp
, vfid
);
2203 vf
->devfn
= bnx2x_vf_devfn(bp
, vfid
);
2204 bnx2x_vf_set_bars(bp
, vf
);
2207 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2208 vf
->abs_vfid
, vf
->bus
, vf
->devfn
,
2209 (unsigned)vf
->bars
[0].bar
, vf
->bars
[0].size
,
2210 (unsigned)vf
->bars
[1].bar
, vf
->bars
[1].size
,
2211 (unsigned)vf
->bars
[2].bar
, vf
->bars
[2].size
);
2217 /* called by bnx2x_chip_cleanup */
2218 int bnx2x_iov_chip_cleanup(struct bnx2x
*bp
)
2225 /* release all the VFs */
2227 bnx2x_vf_release(bp
, BP_VF(bp
, i
), true); /* blocking */
2232 /* called by bnx2x_init_hw_func, returns the next ilt line */
2233 int bnx2x_iov_init_ilt(struct bnx2x
*bp
, u16 line
)
2236 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
2241 /* set vfs ilt lines */
2242 for (i
= 0; i
< BNX2X_VF_CIDS
/ILT_PAGE_CIDS
; i
++) {
2243 struct hw_dma
*hw_cxt
= BP_VF_CXT_PAGE(bp
, i
);
2245 ilt
->lines
[line
+i
].page
= hw_cxt
->addr
;
2246 ilt
->lines
[line
+i
].page_mapping
= hw_cxt
->mapping
;
2247 ilt
->lines
[line
+i
].size
= hw_cxt
->size
; /* doesn't matter */
2252 static u8
bnx2x_iov_is_vf_cid(struct bnx2x
*bp
, u16 cid
)
2254 return ((cid
>= BNX2X_FIRST_VF_CID
) &&
2255 ((cid
- BNX2X_FIRST_VF_CID
) < BNX2X_VF_CIDS
));
2259 void bnx2x_vf_handle_classification_eqe(struct bnx2x
*bp
,
2260 struct bnx2x_vf_queue
*vfq
,
2261 union event_ring_elem
*elem
)
2263 unsigned long ramrod_flags
= 0;
2266 /* Always push next commands out, don't wait here */
2267 set_bit(RAMROD_CONT
, &ramrod_flags
);
2269 switch (elem
->message
.data
.eth_event
.echo
>> BNX2X_SWCID_SHIFT
) {
2270 case BNX2X_FILTER_MAC_PENDING
:
2271 rc
= vfq
->mac_obj
.complete(bp
, &vfq
->mac_obj
, elem
,
2274 case BNX2X_FILTER_VLAN_PENDING
:
2275 rc
= vfq
->vlan_obj
.complete(bp
, &vfq
->vlan_obj
, elem
,
2279 BNX2X_ERR("Unsupported classification command: %d\n",
2280 elem
->message
.data
.eth_event
.echo
);
2284 BNX2X_ERR("Failed to schedule new commands: %d\n", rc
);
2286 DP(BNX2X_MSG_IOV
, "Scheduled next pending commands...\n");
2290 void bnx2x_vf_handle_mcast_eqe(struct bnx2x
*bp
,
2291 struct bnx2x_virtf
*vf
)
2293 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
2296 rparam
.mcast_obj
= &vf
->mcast_obj
;
2297 vf
->mcast_obj
.raw
.clear_pending(&vf
->mcast_obj
.raw
);
2299 /* If there are pending mcast commands - send them */
2300 if (vf
->mcast_obj
.check_pending(&vf
->mcast_obj
)) {
2301 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2303 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2309 void bnx2x_vf_handle_filters_eqe(struct bnx2x
*bp
,
2310 struct bnx2x_virtf
*vf
)
2312 smp_mb__before_clear_bit();
2313 clear_bit(BNX2X_FILTER_RX_MODE_PENDING
, &vf
->filter_state
);
2314 smp_mb__after_clear_bit();
2317 int bnx2x_iov_eq_sp_event(struct bnx2x
*bp
, union event_ring_elem
*elem
)
2319 struct bnx2x_virtf
*vf
;
2320 int qidx
= 0, abs_vfid
;
2327 /* first get the cid - the only events we handle here are cfc-delete
2328 * and set-mac completion
2330 opcode
= elem
->message
.opcode
;
2333 case EVENT_RING_OPCODE_CFC_DEL
:
2334 cid
= SW_CID((__force __le32
)
2335 elem
->message
.data
.cfc_del_event
.cid
);
2336 DP(BNX2X_MSG_IOV
, "checking cfc-del comp cid=%d\n", cid
);
2338 case EVENT_RING_OPCODE_CLASSIFICATION_RULES
:
2339 case EVENT_RING_OPCODE_MULTICAST_RULES
:
2340 case EVENT_RING_OPCODE_FILTERS_RULES
:
2341 cid
= (elem
->message
.data
.eth_event
.echo
&
2343 DP(BNX2X_MSG_IOV
, "checking filtering comp cid=%d\n", cid
);
2345 case EVENT_RING_OPCODE_VF_FLR
:
2346 abs_vfid
= elem
->message
.data
.vf_flr_event
.vf_id
;
2347 DP(BNX2X_MSG_IOV
, "Got VF FLR notification abs_vfid=%d\n",
2350 case EVENT_RING_OPCODE_MALICIOUS_VF
:
2351 abs_vfid
= elem
->message
.data
.malicious_vf_event
.vf_id
;
2352 DP(BNX2X_MSG_IOV
, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2353 abs_vfid
, elem
->message
.data
.malicious_vf_event
.err_id
);
2359 /* check if the cid is the VF range */
2360 if (!bnx2x_iov_is_vf_cid(bp
, cid
)) {
2361 DP(BNX2X_MSG_IOV
, "cid is outside vf range: %d\n", cid
);
2365 /* extract vf and rxq index from vf_cid - relies on the following:
2366 * 1. vfid on cid reflects the true abs_vfid
2367 * 2. The max number of VFs (per path) is 64
2369 qidx
= cid
& ((1 << BNX2X_VF_CID_WND
)-1);
2370 abs_vfid
= (cid
>> BNX2X_VF_CID_WND
) & (BNX2X_MAX_NUM_OF_VFS
-1);
2372 vf
= bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
2375 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2381 case EVENT_RING_OPCODE_CFC_DEL
:
2382 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] cfc delete ramrod\n",
2383 vf
->abs_vfid
, qidx
);
2384 vfq_get(vf
, qidx
)->sp_obj
.complete_cmd(bp
,
2387 BNX2X_Q_CMD_CFC_DEL
);
2389 case EVENT_RING_OPCODE_CLASSIFICATION_RULES
:
2390 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] set mac/vlan ramrod\n",
2391 vf
->abs_vfid
, qidx
);
2392 bnx2x_vf_handle_classification_eqe(bp
, vfq_get(vf
, qidx
), elem
);
2394 case EVENT_RING_OPCODE_MULTICAST_RULES
:
2395 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] set mcast ramrod\n",
2396 vf
->abs_vfid
, qidx
);
2397 bnx2x_vf_handle_mcast_eqe(bp
, vf
);
2399 case EVENT_RING_OPCODE_FILTERS_RULES
:
2400 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] set rx-mode ramrod\n",
2401 vf
->abs_vfid
, qidx
);
2402 bnx2x_vf_handle_filters_eqe(bp
, vf
);
2404 case EVENT_RING_OPCODE_VF_FLR
:
2405 DP(BNX2X_MSG_IOV
, "got VF [%d] FLR notification\n",
2407 /* Do nothing for now */
2409 case EVENT_RING_OPCODE_MALICIOUS_VF
:
2410 DP(BNX2X_MSG_IOV
, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2411 abs_vfid
, elem
->message
.data
.malicious_vf_event
.err_id
);
2412 /* Do nothing for now */
2415 /* SRIOV: reschedule any 'in_progress' operations */
2416 bnx2x_iov_sp_event(bp
, cid
, false);
2421 static struct bnx2x_virtf
*bnx2x_vf_by_cid(struct bnx2x
*bp
, int vf_cid
)
2423 /* extract the vf from vf_cid - relies on the following:
2424 * 1. vfid on cid reflects the true abs_vfid
2425 * 2. The max number of VFs (per path) is 64
2427 int abs_vfid
= (vf_cid
>> BNX2X_VF_CID_WND
) & (BNX2X_MAX_NUM_OF_VFS
-1);
2428 return bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
2431 void bnx2x_iov_set_queue_sp_obj(struct bnx2x
*bp
, int vf_cid
,
2432 struct bnx2x_queue_sp_obj
**q_obj
)
2434 struct bnx2x_virtf
*vf
;
2439 vf
= bnx2x_vf_by_cid(bp
, vf_cid
);
2442 /* extract queue index from vf_cid - relies on the following:
2443 * 1. vfid on cid reflects the true abs_vfid
2444 * 2. The max number of VFs (per path) is 64
2446 int q_index
= vf_cid
& ((1 << BNX2X_VF_CID_WND
)-1);
2447 *q_obj
= &bnx2x_vfq(vf
, q_index
, sp_obj
);
2449 BNX2X_ERR("No vf matching cid %d\n", vf_cid
);
2453 void bnx2x_iov_sp_event(struct bnx2x
*bp
, int vf_cid
, bool queue_work
)
2455 struct bnx2x_virtf
*vf
;
2457 /* check if the cid is the VF range */
2458 if (!IS_SRIOV(bp
) || !bnx2x_iov_is_vf_cid(bp
, vf_cid
))
2461 vf
= bnx2x_vf_by_cid(bp
, vf_cid
);
2463 /* set in_progress flag */
2464 atomic_set(&vf
->op_in_progress
, 1);
2466 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
2470 void bnx2x_iov_adjust_stats_req(struct bnx2x
*bp
)
2473 int first_queue_query_index
, num_queues_req
;
2474 dma_addr_t cur_data_offset
;
2475 struct stats_query_entry
*cur_query_entry
;
2477 bool is_fcoe
= false;
2485 /* fcoe adds one global request and one queue request */
2486 num_queues_req
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe
;
2487 first_queue_query_index
= BNX2X_FIRST_QUEUE_QUERY_IDX
-
2491 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2492 BNX2X_NUM_ETH_QUEUES(bp
), is_fcoe
, first_queue_query_index
,
2493 first_queue_query_index
+ num_queues_req
);
2495 cur_data_offset
= bp
->fw_stats_data_mapping
+
2496 offsetof(struct bnx2x_fw_stats_data
, queue_stats
) +
2497 num_queues_req
* sizeof(struct per_queue_stats
);
2499 cur_query_entry
= &bp
->fw_stats_req
->
2500 query
[first_queue_query_index
+ num_queues_req
];
2502 for_each_vf(bp
, i
) {
2504 struct bnx2x_virtf
*vf
= BP_VF(bp
, i
);
2506 if (vf
->state
!= VF_ENABLED
) {
2508 "vf %d not enabled so no stats for it\n",
2513 DP(BNX2X_MSG_IOV
, "add addresses for vf %d\n", vf
->abs_vfid
);
2514 for_each_vfq(vf
, j
) {
2515 struct bnx2x_vf_queue
*rxq
= vfq_get(vf
, j
);
2517 dma_addr_t q_stats_addr
=
2518 vf
->fw_stat_map
+ j
* vf
->stats_stride
;
2520 /* collect stats fro active queues only */
2521 if (bnx2x_get_q_logical_state(bp
, &rxq
->sp_obj
) ==
2522 BNX2X_Q_LOGICAL_STATE_STOPPED
)
2525 /* create stats query entry for this queue */
2526 cur_query_entry
->kind
= STATS_TYPE_QUEUE
;
2527 cur_query_entry
->index
= vfq_stat_id(vf
, rxq
);
2528 cur_query_entry
->funcID
=
2529 cpu_to_le16(FW_VF_HANDLE(vf
->abs_vfid
));
2530 cur_query_entry
->address
.hi
=
2531 cpu_to_le32(U64_HI(q_stats_addr
));
2532 cur_query_entry
->address
.lo
=
2533 cpu_to_le32(U64_LO(q_stats_addr
));
2535 "added address %x %x for vf %d queue %d client %d\n",
2536 cur_query_entry
->address
.hi
,
2537 cur_query_entry
->address
.lo
, cur_query_entry
->funcID
,
2538 j
, cur_query_entry
->index
);
2540 cur_data_offset
+= sizeof(struct per_queue_stats
);
2543 /* all stats are coalesced to the leading queue */
2544 if (vf
->cfg_flags
& VF_CFG_STATS_COALESCE
)
2548 bp
->fw_stats_req
->hdr
.cmd_num
= bp
->fw_stats_num
+ stats_count
;
2551 void bnx2x_iov_sp_task(struct bnx2x
*bp
)
2557 /* Iterate over all VFs and invoke state transition for VFs with
2558 * 'in-progress' slow-path operations
2560 DP(BNX2X_MSG_IOV
, "searching for pending vf operations\n");
2561 for_each_vf(bp
, i
) {
2562 struct bnx2x_virtf
*vf
= BP_VF(bp
, i
);
2565 BNX2X_ERR("VF was null! skipping...\n");
2569 if (!list_empty(&vf
->op_list_head
) &&
2570 atomic_read(&vf
->op_in_progress
)) {
2571 DP(BNX2X_MSG_IOV
, "running pending op for vf %d\n", i
);
2572 bnx2x_vfop_cur(bp
, vf
)->transition(bp
, vf
);
2578 struct bnx2x_virtf
*__vf_from_stat_id(struct bnx2x
*bp
, u8 stat_id
)
2581 struct bnx2x_virtf
*vf
= NULL
;
2583 for_each_vf(bp
, i
) {
2585 if (stat_id
>= vf
->igu_base_id
&&
2586 stat_id
< vf
->igu_base_id
+ vf_sb_count(vf
))
2592 /* VF API helpers */
2593 static void bnx2x_vf_qtbl_set_q(struct bnx2x
*bp
, u8 abs_vfid
, u8 qid
,
2596 u32 reg
= PXP_REG_HST_ZONE_PERMISSION_TABLE
+ qid
* 4;
2597 u32 val
= enable
? (abs_vfid
| (1 << 6)) : 0;
2599 REG_WR(bp
, reg
, val
);
2602 static void bnx2x_vf_clr_qtbl(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2607 bnx2x_vf_qtbl_set_q(bp
, vf
->abs_vfid
,
2608 vfq_qzone_id(vf
, vfq_get(vf
, i
)), false);
2611 static void bnx2x_vf_igu_disable(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2615 /* clear the VF configuration - pretend */
2616 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
2617 val
= REG_RD(bp
, IGU_REG_VF_CONFIGURATION
);
2618 val
&= ~(IGU_VF_CONF_MSI_MSIX_EN
| IGU_VF_CONF_SINGLE_ISR_EN
|
2619 IGU_VF_CONF_FUNC_EN
| IGU_VF_CONF_PARENT_MASK
);
2620 REG_WR(bp
, IGU_REG_VF_CONFIGURATION
, val
);
2621 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
2624 u8
bnx2x_vf_max_queue_cnt(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2626 return min_t(u8
, min_t(u8
, vf_sb_count(vf
), BNX2X_CIDS_PER_VF
),
2627 BNX2X_VF_MAX_QUEUES
);
2631 int bnx2x_vf_chk_avail_resc(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2632 struct vf_pf_resc_request
*req_resc
)
2634 u8 rxq_cnt
= vf_rxq_count(vf
) ? : bnx2x_vf_max_queue_cnt(bp
, vf
);
2635 u8 txq_cnt
= vf_txq_count(vf
) ? : bnx2x_vf_max_queue_cnt(bp
, vf
);
2637 return ((req_resc
->num_rxqs
<= rxq_cnt
) &&
2638 (req_resc
->num_txqs
<= txq_cnt
) &&
2639 (req_resc
->num_sbs
<= vf_sb_count(vf
)) &&
2640 (req_resc
->num_mac_filters
<= vf_mac_rules_cnt(vf
)) &&
2641 (req_resc
->num_vlan_filters
<= vf_vlan_rules_cnt(vf
)));
2645 int bnx2x_vf_acquire(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2646 struct vf_pf_resc_request
*resc
)
2648 int base_vf_cid
= (BP_VFDB(bp
)->sriov
.first_vf_in_pf
+ vf
->index
) *
2651 union cdu_context
*base_cxt
= (union cdu_context
*)
2652 BP_VF_CXT_PAGE(bp
, base_vf_cid
/ILT_PAGE_CIDS
)->addr
+
2653 (base_vf_cid
& (ILT_PAGE_CIDS
-1));
2656 /* if state is 'acquired' the VF was not released or FLR'd, in
2657 * this case the returned resources match the acquired already
2658 * acquired resources. Verify that the requested numbers do
2659 * not exceed the already acquired numbers.
2661 if (vf
->state
== VF_ACQUIRED
) {
2662 DP(BNX2X_MSG_IOV
, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2665 if (!bnx2x_vf_chk_avail_resc(bp
, vf
, resc
)) {
2666 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2673 /* Otherwise vf state must be 'free' or 'reset' */
2674 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_RESET
) {
2675 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2676 vf
->abs_vfid
, vf
->state
);
2680 /* static allocation:
2681 * the global maximum number are fixed per VF. Fail the request if
2682 * requested number exceed these globals
2684 if (!bnx2x_vf_chk_avail_resc(bp
, vf
, resc
)) {
2686 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2687 /* set the max resource in the vf */
2691 /* Set resources counters - 0 request means max available */
2692 vf_sb_count(vf
) = resc
->num_sbs
;
2693 vf_rxq_count(vf
) = resc
->num_rxqs
? : bnx2x_vf_max_queue_cnt(bp
, vf
);
2694 vf_txq_count(vf
) = resc
->num_txqs
? : bnx2x_vf_max_queue_cnt(bp
, vf
);
2695 if (resc
->num_mac_filters
)
2696 vf_mac_rules_cnt(vf
) = resc
->num_mac_filters
;
2697 if (resc
->num_vlan_filters
)
2698 vf_vlan_rules_cnt(vf
) = resc
->num_vlan_filters
;
2701 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2702 vf_sb_count(vf
), vf_rxq_count(vf
),
2703 vf_txq_count(vf
), vf_mac_rules_cnt(vf
),
2704 vf_vlan_rules_cnt(vf
));
2706 /* Initialize the queues */
2708 DP(BNX2X_MSG_IOV
, "vf->vfqs was not allocated\n");
2712 for_each_vfq(vf
, i
) {
2713 struct bnx2x_vf_queue
*q
= vfq_get(vf
, i
);
2716 BNX2X_ERR("q number %d was not allocated\n", i
);
2721 q
->cxt
= &((base_cxt
+ i
)->eth
);
2722 q
->cid
= BNX2X_FIRST_VF_CID
+ base_vf_cid
+ i
;
2724 DP(BNX2X_MSG_IOV
, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2725 vf
->abs_vfid
, i
, q
->index
, q
->cid
, q
->cxt
);
2727 /* init SP objects */
2728 bnx2x_vfq_init(bp
, vf
, q
);
2730 vf
->state
= VF_ACQUIRED
;
2734 int bnx2x_vf_init(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
, dma_addr_t
*sb_map
)
2736 struct bnx2x_func_init_params func_init
= {0};
2740 /* the sb resources are initialized at this point, do the
2741 * FW/HW initializations
2743 for_each_vf_sb(vf
, i
)
2744 bnx2x_init_sb(bp
, (dma_addr_t
)sb_map
[i
], vf
->abs_vfid
, true,
2745 vf_igu_sb(vf
, i
), vf_igu_sb(vf
, i
));
2748 if (vf
->state
!= VF_ACQUIRED
) {
2749 DP(BNX2X_MSG_IOV
, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2750 vf
->abs_vfid
, vf
->state
);
2754 /* let FLR complete ... */
2757 /* FLR cleanup epilogue */
2758 if (bnx2x_vf_flr_clnup_epilog(bp
, vf
->abs_vfid
))
2761 /* reset IGU VF statistics: MSIX */
2762 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+ vf
->abs_vfid
* 4 , 0);
2765 if (vf
->cfg_flags
& VF_CFG_STATS
)
2766 flags
|= (FUNC_FLG_STATS
| FUNC_FLG_SPQ
);
2768 if (vf
->cfg_flags
& VF_CFG_TPA
)
2769 flags
|= FUNC_FLG_TPA
;
2771 if (is_vf_multi(vf
))
2772 flags
|= FUNC_FLG_RSS
;
2774 /* function setup */
2775 func_init
.func_flgs
= flags
;
2776 func_init
.pf_id
= BP_FUNC(bp
);
2777 func_init
.func_id
= FW_VF_HANDLE(vf
->abs_vfid
);
2778 func_init
.fw_stat_map
= vf
->fw_stat_map
;
2779 func_init
.spq_map
= vf
->spq_map
;
2780 func_init
.spq_prod
= 0;
2781 bnx2x_func_init(bp
, &func_init
);
2784 bnx2x_vf_enable_access(bp
, vf
->abs_vfid
);
2785 bnx2x_vf_enable_traffic(bp
, vf
);
2787 /* queue protection table */
2789 bnx2x_vf_qtbl_set_q(bp
, vf
->abs_vfid
,
2790 vfq_qzone_id(vf
, vfq_get(vf
, i
)), true);
2792 vf
->state
= VF_ENABLED
;
2794 /* update vf bulletin board */
2795 bnx2x_post_vf_bulletin(bp
, vf
->index
);
2800 struct set_vf_state_cookie
{
2801 struct bnx2x_virtf
*vf
;
2805 void bnx2x_set_vf_state(void *cookie
)
2807 struct set_vf_state_cookie
*p
= (struct set_vf_state_cookie
*)cookie
;
2809 p
->vf
->state
= p
->state
;
2812 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2813 static void bnx2x_vfop_close(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2815 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
2816 struct bnx2x_vfop_args_qx
*qx
= &vfop
->args
.qx
;
2817 enum bnx2x_vfop_close_state state
= vfop
->state
;
2818 struct bnx2x_vfop_cmd cmd
= {
2819 .done
= bnx2x_vfop_close
,
2826 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
2829 case BNX2X_VFOP_CLOSE_QUEUES
:
2831 if (++(qx
->qid
) < vf_rxq_count(vf
)) {
2832 vfop
->rc
= bnx2x_vfop_qdown_cmd(bp
, vf
, &cmd
, qx
->qid
);
2838 /* remove multicasts */
2839 vfop
->state
= BNX2X_VFOP_CLOSE_HW
;
2840 vfop
->rc
= bnx2x_vfop_mcast_cmd(bp
, vf
, &cmd
, NULL
, 0, false);
2845 case BNX2X_VFOP_CLOSE_HW
:
2847 /* disable the interrupts */
2848 DP(BNX2X_MSG_IOV
, "disabling igu\n");
2849 bnx2x_vf_igu_disable(bp
, vf
);
2851 /* disable the VF */
2852 DP(BNX2X_MSG_IOV
, "clearing qtbl\n");
2853 bnx2x_vf_clr_qtbl(bp
, vf
);
2857 bnx2x_vfop_default(state
);
2860 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf
->abs_vfid
, vfop
->rc
);
2863 /* need to make sure there are no outstanding stats ramrods which may
2864 * cause the device to access the VF's stats buffer which it will free
2865 * as soon as we return from the close flow.
2868 struct set_vf_state_cookie cookie
;
2871 cookie
.state
= VF_ACQUIRED
;
2872 bnx2x_stats_safe_exec(bp
, bnx2x_set_vf_state
, &cookie
);
2875 DP(BNX2X_MSG_IOV
, "set state to acquired\n");
2876 bnx2x_vfop_end(bp
, vf
, vfop
);
2879 int bnx2x_vfop_close_cmd(struct bnx2x
*bp
,
2880 struct bnx2x_virtf
*vf
,
2881 struct bnx2x_vfop_cmd
*cmd
)
2883 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
2885 vfop
->args
.qx
.qid
= -1; /* loop */
2886 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES
,
2887 bnx2x_vfop_close
, cmd
->done
);
2888 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_close
,
2894 /* VF release can be called either: 1. The VF was acquired but
2895 * not enabled 2. the vf was enabled or in the process of being
2898 static void bnx2x_vfop_release(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2900 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
2901 struct bnx2x_vfop_cmd cmd
= {
2902 .done
= bnx2x_vfop_release
,
2906 DP(BNX2X_MSG_IOV
, "vfop->rc %d\n", vfop
->rc
);
2911 DP(BNX2X_MSG_IOV
, "VF[%d] STATE: %s\n", vf
->abs_vfid
,
2912 vf
->state
== VF_FREE
? "Free" :
2913 vf
->state
== VF_ACQUIRED
? "Acquired" :
2914 vf
->state
== VF_ENABLED
? "Enabled" :
2915 vf
->state
== VF_RESET
? "Reset" :
2918 switch (vf
->state
) {
2920 vfop
->rc
= bnx2x_vfop_close_cmd(bp
, vf
, &cmd
);
2926 DP(BNX2X_MSG_IOV
, "about to free resources\n");
2927 bnx2x_vf_free_resc(bp
, vf
);
2928 DP(BNX2X_MSG_IOV
, "vfop->rc %d\n", vfop
->rc
);
2936 bnx2x_vfop_default(vf
->state
);
2939 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf
->abs_vfid
, vfop
->rc
);
2941 bnx2x_vfop_end(bp
, vf
, vfop
);
2944 static void bnx2x_vfop_rss(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2946 struct bnx2x_vfop
*vfop
= bnx2x_vfop_cur(bp
, vf
);
2947 enum bnx2x_vfop_rss_state state
;
2950 BNX2X_ERR("vfop was null\n");
2954 state
= vfop
->state
;
2955 bnx2x_vfop_reset_wq(vf
);
2960 DP(BNX2X_MSG_IOV
, "vf[%d] STATE: %d\n", vf
->abs_vfid
, state
);
2963 case BNX2X_VFOP_RSS_CONFIG
:
2965 vfop
->state
= BNX2X_VFOP_RSS_DONE
;
2966 bnx2x_config_rss(bp
, &vfop
->op_p
->rss
);
2967 bnx2x_vfop_finalize(vf
, vfop
->rc
, VFOP_DONE
);
2969 BNX2X_ERR("RSS error: rc %d\n", vfop
->rc
);
2971 case BNX2X_VFOP_RSS_DONE
:
2972 bnx2x_vfop_end(bp
, vf
, vfop
);
2975 bnx2x_vfop_default(state
);
2981 int bnx2x_vfop_release_cmd(struct bnx2x
*bp
,
2982 struct bnx2x_virtf
*vf
,
2983 struct bnx2x_vfop_cmd
*cmd
)
2985 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
2987 bnx2x_vfop_opset(-1, /* use vf->state */
2988 bnx2x_vfop_release
, cmd
->done
);
2989 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_release
,
2995 int bnx2x_vfop_rss_cmd(struct bnx2x
*bp
,
2996 struct bnx2x_virtf
*vf
,
2997 struct bnx2x_vfop_cmd
*cmd
)
2999 struct bnx2x_vfop
*vfop
= bnx2x_vfop_add(bp
, vf
);
3002 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG
, bnx2x_vfop_rss
,
3004 return bnx2x_vfop_transition(bp
, vf
, bnx2x_vfop_rss
,
3010 /* VF release ~ VF close + VF release-resources
3011 * Release is the ultimate SW shutdown and is called whenever an
3012 * irrecoverable error is encountered.
3014 void bnx2x_vf_release(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
, bool block
)
3016 struct bnx2x_vfop_cmd cmd
= {
3022 DP(BNX2X_MSG_IOV
, "PF releasing vf %d\n", vf
->abs_vfid
);
3023 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_RELEASE_VF
);
3025 rc
= bnx2x_vfop_release_cmd(bp
, vf
, &cmd
);
3028 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
3032 static inline void bnx2x_vf_get_sbdf(struct bnx2x
*bp
,
3033 struct bnx2x_virtf
*vf
, u32
*sbdf
)
3035 *sbdf
= vf
->devfn
| (vf
->bus
<< 8);
3038 static inline void bnx2x_vf_get_bars(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
3039 struct bnx2x_vf_bar_info
*bar_info
)
3043 bar_info
->nr_bars
= bp
->vfdb
->sriov
.nres
;
3044 for (n
= 0; n
< bar_info
->nr_bars
; n
++)
3045 bar_info
->bars
[n
] = vf
->bars
[n
];
3048 void bnx2x_lock_vf_pf_channel(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
3049 enum channel_tlvs tlv
)
3051 /* we don't lock the channel for unsupported tlvs */
3052 if (!bnx2x_tlv_supported(tlv
)) {
3053 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3057 /* lock the channel */
3058 mutex_lock(&vf
->op_mutex
);
3060 /* record the locking op */
3061 vf
->op_current
= tlv
;
3064 DP(BNX2X_MSG_IOV
, "VF[%d]: vf pf channel locked by %d\n",
3068 void bnx2x_unlock_vf_pf_channel(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
3069 enum channel_tlvs expected_tlv
)
3071 enum channel_tlvs current_tlv
;
3074 BNX2X_ERR("VF was %p\n", vf
);
3078 current_tlv
= vf
->op_current
;
3080 /* we don't unlock the channel for unsupported tlvs */
3081 if (!bnx2x_tlv_supported(expected_tlv
))
3084 WARN(expected_tlv
!= vf
->op_current
,
3085 "lock mismatch: expected %d found %d", expected_tlv
,
3088 /* record the locking op */
3089 vf
->op_current
= CHANNEL_TLV_NONE
;
3091 /* lock the channel */
3092 mutex_unlock(&vf
->op_mutex
);
3094 /* log the unlock */
3095 DP(BNX2X_MSG_IOV
, "VF[%d]: vf pf channel unlocked by %d\n",
3096 vf
->abs_vfid
, vf
->op_current
);
3099 int bnx2x_sriov_configure(struct pci_dev
*dev
, int num_vfs_param
)
3101 struct bnx2x
*bp
= netdev_priv(pci_get_drvdata(dev
));
3103 DP(BNX2X_MSG_IOV
, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3104 num_vfs_param
, BNX2X_NR_VIRTFN(bp
));
3106 /* HW channel is only operational when PF is up */
3107 if (bp
->state
!= BNX2X_STATE_OPEN
) {
3108 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3112 /* we are always bound by the total_vfs in the configuration space */
3113 if (num_vfs_param
> BNX2X_NR_VIRTFN(bp
)) {
3114 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3115 num_vfs_param
, BNX2X_NR_VIRTFN(bp
));
3116 num_vfs_param
= BNX2X_NR_VIRTFN(bp
);
3119 bp
->requested_nr_virtfn
= num_vfs_param
;
3120 if (num_vfs_param
== 0) {
3121 pci_disable_sriov(dev
);
3124 return bnx2x_enable_sriov(bp
);
3127 #define IGU_ENTRY_SIZE 4
3129 int bnx2x_enable_sriov(struct bnx2x
*bp
)
3131 int rc
= 0, req_vfs
= bp
->requested_nr_virtfn
;
3132 int vf_idx
, sb_idx
, vfq_idx
, qcount
, first_vf
;
3133 u32 igu_entry
, address
;
3139 first_vf
= bp
->vfdb
->sriov
.first_vf_in_pf
;
3141 /* statically distribute vf sb pool between VFs */
3142 num_vf_queues
= min_t(u16
, BNX2X_VF_MAX_QUEUES
,
3143 BP_VFDB(bp
)->vf_sbs_pool
/ req_vfs
);
3145 /* zero previous values learned from igu cam */
3146 for (vf_idx
= 0; vf_idx
< req_vfs
; vf_idx
++) {
3147 struct bnx2x_virtf
*vf
= BP_VF(bp
, vf_idx
);
3150 vf_sb_count(BP_VF(bp
, vf_idx
)) = 0;
3152 bp
->vfdb
->vf_sbs_pool
= 0;
3154 /* prepare IGU cam */
3155 sb_idx
= BP_VFDB(bp
)->first_vf_igu_entry
;
3156 address
= IGU_REG_MAPPING_MEMORY
+ sb_idx
* IGU_ENTRY_SIZE
;
3157 for (vf_idx
= first_vf
; vf_idx
< first_vf
+ req_vfs
; vf_idx
++) {
3158 for (vfq_idx
= 0; vfq_idx
< num_vf_queues
; vfq_idx
++) {
3159 igu_entry
= vf_idx
<< IGU_REG_MAPPING_MEMORY_FID_SHIFT
|
3160 vfq_idx
<< IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT
|
3161 IGU_REG_MAPPING_MEMORY_VALID
;
3162 DP(BNX2X_MSG_IOV
, "assigning sb %d to vf %d\n",
3164 REG_WR(bp
, address
, igu_entry
);
3166 address
+= IGU_ENTRY_SIZE
;
3170 /* Reinitialize vf database according to igu cam */
3171 bnx2x_get_vf_igu_cam_info(bp
);
3173 DP(BNX2X_MSG_IOV
, "vf_sbs_pool %d, num_vf_queues %d\n",
3174 BP_VFDB(bp
)->vf_sbs_pool
, num_vf_queues
);
3177 for_each_vf(bp
, vf_idx
) {
3178 struct bnx2x_virtf
*vf
= BP_VF(bp
, vf_idx
);
3180 /* set local queue arrays */
3181 vf
->vfqs
= &bp
->vfdb
->vfqs
[qcount
];
3182 qcount
+= vf_sb_count(vf
);
3185 /* prepare msix vectors in VF configuration space */
3186 for (vf_idx
= first_vf
; vf_idx
< first_vf
+ req_vfs
; vf_idx
++) {
3187 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf_idx
));
3188 REG_WR(bp
, PCICFG_OFFSET
+ GRC_CONFIG_REG_VF_MSIX_CONTROL
,
3191 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
3193 /* enable sriov. This will probe all the VFs, and consequentially cause
3194 * the "acquire" messages to appear on the VF PF channel.
3196 DP(BNX2X_MSG_IOV
, "about to call enable sriov\n");
3197 pci_disable_sriov(bp
->pdev
);
3198 rc
= pci_enable_sriov(bp
->pdev
, req_vfs
);
3200 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc
);
3203 DP(BNX2X_MSG_IOV
, "sriov enabled (%d vfs)\n", req_vfs
);
3207 void bnx2x_pf_set_vfs_vlan(struct bnx2x
*bp
)
3210 struct pf_vf_bulletin_content
*bulletin
;
3212 DP(BNX2X_MSG_IOV
, "configuring vlan for VFs from sp-task\n");
3213 for_each_vf(bp
, vfidx
) {
3214 bulletin
= BP_VF_BULLETIN(bp
, vfidx
);
3215 if (BP_VF(bp
, vfidx
)->cfg_flags
& VF_CFG_VLAN
)
3216 bnx2x_set_vf_vlan(bp
->dev
, vfidx
, bulletin
->vlan
, 0);
3220 void bnx2x_disable_sriov(struct bnx2x
*bp
)
3222 pci_disable_sriov(bp
->pdev
);
3225 int bnx2x_vf_ndo_prep(struct bnx2x
*bp
, int vfidx
, struct bnx2x_virtf
**vf
,
3226 struct pf_vf_bulletin_content
**bulletin
)
3228 if (bp
->state
!= BNX2X_STATE_OPEN
) {
3229 BNX2X_ERR("vf ndo called though PF is down\n");
3233 if (!IS_SRIOV(bp
)) {
3234 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3238 if (vfidx
>= BNX2X_NR_VIRTFN(bp
)) {
3239 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3240 vfidx
, BNX2X_NR_VIRTFN(bp
));
3245 *vf
= BP_VF(bp
, vfidx
);
3246 *bulletin
= BP_VF_BULLETIN(bp
, vfidx
);
3249 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3255 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3261 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3269 int bnx2x_get_vf_config(struct net_device
*dev
, int vfidx
,
3270 struct ifla_vf_info
*ivi
)
3272 struct bnx2x
*bp
= netdev_priv(dev
);
3273 struct bnx2x_virtf
*vf
= NULL
;
3274 struct pf_vf_bulletin_content
*bulletin
= NULL
;
3275 struct bnx2x_vlan_mac_obj
*mac_obj
;
3276 struct bnx2x_vlan_mac_obj
*vlan_obj
;
3279 /* sanity and init */
3280 rc
= bnx2x_vf_ndo_prep(bp
, vfidx
, &vf
, &bulletin
);
3283 mac_obj
= &bnx2x_leading_vfq(vf
, mac_obj
);
3284 vlan_obj
= &bnx2x_leading_vfq(vf
, vlan_obj
);
3285 if (!mac_obj
|| !vlan_obj
) {
3286 BNX2X_ERR("VF partially initialized\n");
3292 ivi
->tx_rate
= 10000; /* always 10G. TBA take from link struct */
3293 ivi
->spoofchk
= 1; /*always enabled */
3294 if (vf
->state
== VF_ENABLED
) {
3295 /* mac and vlan are in vlan_mac objects */
3296 if (validate_vlan_mac(bp
, &bnx2x_leading_vfq(vf
, mac_obj
)))
3297 mac_obj
->get_n_elements(bp
, mac_obj
, 1, (u8
*)&ivi
->mac
,
3299 if (validate_vlan_mac(bp
, &bnx2x_leading_vfq(vf
, vlan_obj
)))
3300 vlan_obj
->get_n_elements(bp
, vlan_obj
, 1,
3301 (u8
*)&ivi
->vlan
, 0,
3305 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_VALID
))
3306 /* mac configured by ndo so its in bulletin board */
3307 memcpy(&ivi
->mac
, bulletin
->mac
, ETH_ALEN
);
3309 /* function has not been loaded yet. Show mac as 0s */
3310 memset(&ivi
->mac
, 0, ETH_ALEN
);
3313 if (bulletin
->valid_bitmap
& (1 << VLAN_VALID
))
3314 /* vlan configured by ndo so its in bulletin board */
3315 memcpy(&ivi
->vlan
, &bulletin
->vlan
, VLAN_HLEN
);
3317 /* function has not been loaded yet. Show vlans as 0s */
3318 memset(&ivi
->vlan
, 0, VLAN_HLEN
);
3324 /* New mac for VF. Consider these cases:
3325 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3326 * supply at acquire.
3327 * 2. VF has already been acquired but has not yet initialized - store in local
3328 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3329 * will configure this mac when it is ready.
3330 * 3. VF has already initialized but has not yet setup a queue - post the new
3331 * mac on VF's bulletin board right now. VF will configure this mac when it
3333 * 4. VF has already set a queue - delete any macs already configured for this
3334 * queue and manually config the new mac.
3335 * In any event, once this function has been called refuse any attempts by the
3336 * VF to configure any mac for itself except for this mac. In case of a race
3337 * where the VF fails to see the new post on its bulletin board before sending a
3338 * mac configuration request, the PF will simply fail the request and VF can try
3339 * again after consulting its bulletin board.
3341 int bnx2x_set_vf_mac(struct net_device
*dev
, int vfidx
, u8
*mac
)
3343 struct bnx2x
*bp
= netdev_priv(dev
);
3344 int rc
, q_logical_state
;
3345 struct bnx2x_virtf
*vf
= NULL
;
3346 struct pf_vf_bulletin_content
*bulletin
= NULL
;
3348 /* sanity and init */
3349 rc
= bnx2x_vf_ndo_prep(bp
, vfidx
, &vf
, &bulletin
);
3352 if (!is_valid_ether_addr(mac
)) {
3353 BNX2X_ERR("mac address invalid\n");
3357 /* update PF's copy of the VF's bulletin. Will no longer accept mac
3358 * configuration requests from vf unless match this mac
3360 bulletin
->valid_bitmap
|= 1 << MAC_ADDR_VALID
;
3361 memcpy(bulletin
->mac
, mac
, ETH_ALEN
);
3363 /* Post update on VF's bulletin board */
3364 rc
= bnx2x_post_vf_bulletin(bp
, vfidx
);
3366 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx
);
3371 bnx2x_get_q_logical_state(bp
, &bnx2x_leading_vfq(vf
, sp_obj
));
3372 if (vf
->state
== VF_ENABLED
&&
3373 q_logical_state
== BNX2X_Q_LOGICAL_STATE_ACTIVE
) {
3374 /* configure the mac in device on this vf's queue */
3375 unsigned long ramrod_flags
= 0;
3376 struct bnx2x_vlan_mac_obj
*mac_obj
=
3377 &bnx2x_leading_vfq(vf
, mac_obj
);
3379 rc
= validate_vlan_mac(bp
, &bnx2x_leading_vfq(vf
, mac_obj
));
3383 /* must lock vfpf channel to protect against vf flows */
3384 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_MAC
);
3386 /* remove existing eth macs */
3387 rc
= bnx2x_del_all_macs(bp
, mac_obj
, BNX2X_ETH_MAC
, true);
3389 BNX2X_ERR("failed to delete eth macs\n");
3393 /* remove existing uc list macs */
3394 rc
= bnx2x_del_all_macs(bp
, mac_obj
, BNX2X_UC_LIST_MAC
, true);
3396 BNX2X_ERR("failed to delete uc_list macs\n");
3400 /* configure the new mac to device */
3401 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
3402 bnx2x_set_mac_one(bp
, (u8
*)&bulletin
->mac
, mac_obj
, true,
3403 BNX2X_ETH_MAC
, &ramrod_flags
);
3405 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_MAC
);
3411 int bnx2x_set_vf_vlan(struct net_device
*dev
, int vfidx
, u16 vlan
, u8 qos
)
3413 struct bnx2x
*bp
= netdev_priv(dev
);
3414 int rc
, q_logical_state
;
3415 struct bnx2x_virtf
*vf
= NULL
;
3416 struct pf_vf_bulletin_content
*bulletin
= NULL
;
3418 /* sanity and init */
3419 rc
= bnx2x_vf_ndo_prep(bp
, vfidx
, &vf
, &bulletin
);
3424 BNX2X_ERR("illegal vlan value %d\n", vlan
);
3428 DP(BNX2X_MSG_IOV
, "configuring VF %d with VLAN %d qos %d\n",
3431 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3432 * to the VF since it doesn't have anything to do with it. But it useful
3433 * to store it here in case the VF is not up yet and we can only
3434 * configure the vlan later when it does.
3436 bulletin
->valid_bitmap
|= 1 << VLAN_VALID
;
3437 bulletin
->vlan
= vlan
;
3439 /* is vf initialized and queue set up? */
3441 bnx2x_get_q_logical_state(bp
, &bnx2x_leading_vfq(vf
, sp_obj
));
3442 if (vf
->state
== VF_ENABLED
&&
3443 q_logical_state
== BNX2X_Q_LOGICAL_STATE_ACTIVE
) {
3444 /* configure the vlan in device on this vf's queue */
3445 unsigned long ramrod_flags
= 0;
3446 unsigned long vlan_mac_flags
= 0;
3447 struct bnx2x_vlan_mac_obj
*vlan_obj
=
3448 &bnx2x_leading_vfq(vf
, vlan_obj
);
3449 struct bnx2x_vlan_mac_ramrod_params ramrod_param
;
3450 struct bnx2x_queue_state_params q_params
= {NULL
};
3451 struct bnx2x_queue_update_params
*update_params
;
3453 rc
= validate_vlan_mac(bp
, &bnx2x_leading_vfq(vf
, mac_obj
));
3456 memset(&ramrod_param
, 0, sizeof(ramrod_param
));
3458 /* must lock vfpf channel to protect against vf flows */
3459 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_VLAN
);
3461 /* remove existing vlans */
3462 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
3463 rc
= vlan_obj
->delete_all(bp
, vlan_obj
, &vlan_mac_flags
,
3466 BNX2X_ERR("failed to delete vlans\n");
3470 /* send queue update ramrod to configure default vlan and silent
3473 __set_bit(RAMROD_COMP_WAIT
, &q_params
.ramrod_flags
);
3474 q_params
.cmd
= BNX2X_Q_CMD_UPDATE
;
3475 q_params
.q_obj
= &bnx2x_leading_vfq(vf
, sp_obj
);
3476 update_params
= &q_params
.params
.update
;
3477 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG
,
3478 &update_params
->update_flags
);
3479 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG
,
3480 &update_params
->update_flags
);
3483 /* if vlan is 0 then we want to leave the VF traffic
3484 * untagged, and leave the incoming traffic untouched
3485 * (i.e. do not remove any vlan tags).
3487 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN
,
3488 &update_params
->update_flags
);
3489 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM
,
3490 &update_params
->update_flags
);
3492 /* configure the new vlan to device */
3493 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
3494 ramrod_param
.vlan_mac_obj
= vlan_obj
;
3495 ramrod_param
.ramrod_flags
= ramrod_flags
;
3496 ramrod_param
.user_req
.u
.vlan
.vlan
= vlan
;
3497 ramrod_param
.user_req
.cmd
= BNX2X_VLAN_MAC_ADD
;
3498 rc
= bnx2x_config_vlan_mac(bp
, &ramrod_param
);
3500 BNX2X_ERR("failed to configure vlan\n");
3504 /* configure default vlan to vf queue and set silent
3505 * vlan removal (the vf remains unaware of this vlan).
3507 update_params
= &q_params
.params
.update
;
3508 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN
,
3509 &update_params
->update_flags
);
3510 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM
,
3511 &update_params
->update_flags
);
3512 update_params
->def_vlan
= vlan
;
3515 /* Update the Queue state */
3516 rc
= bnx2x_queue_state_change(bp
, &q_params
);
3518 BNX2X_ERR("Failed to configure default VLAN\n");
3522 /* clear the flag indicating that this VF needs its vlan
3523 * (will only be set if the HV configured th Vlan before vf was
3524 * and we were called because the VF came up later
3526 vf
->cfg_flags
&= ~VF_CFG_VLAN
;
3528 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_VLAN
);
3533 /* crc is the first field in the bulletin board. Compute the crc over the
3534 * entire bulletin board excluding the crc field itself. Use the length field
3535 * as the Bulletin Board was posted by a PF with possibly a different version
3536 * from the vf which will sample it. Therefore, the length is computed by the
3537 * PF and the used blindly by the VF.
3539 u32
bnx2x_crc_vf_bulletin(struct bnx2x
*bp
,
3540 struct pf_vf_bulletin_content
*bulletin
)
3542 return crc32(BULLETIN_CRC_SEED
,
3543 ((u8
*)bulletin
) + sizeof(bulletin
->crc
),
3544 bulletin
->length
- sizeof(bulletin
->crc
));
3547 /* Check for new posts on the bulletin board */
3548 enum sample_bulletin_result
bnx2x_sample_bulletin(struct bnx2x
*bp
)
3550 struct pf_vf_bulletin_content bulletin
= bp
->pf2vf_bulletin
->content
;
3553 /* bulletin board hasn't changed since last sample */
3554 if (bp
->old_bulletin
.version
== bulletin
.version
)
3555 return PFVF_BULLETIN_UNCHANGED
;
3557 /* validate crc of new bulletin board */
3558 if (bp
->old_bulletin
.version
!= bp
->pf2vf_bulletin
->content
.version
) {
3559 /* sampling structure in mid post may result with corrupted data
3560 * validate crc to ensure coherency.
3562 for (attempts
= 0; attempts
< BULLETIN_ATTEMPTS
; attempts
++) {
3563 bulletin
= bp
->pf2vf_bulletin
->content
;
3564 if (bulletin
.crc
== bnx2x_crc_vf_bulletin(bp
,
3567 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3569 bnx2x_crc_vf_bulletin(bp
, &bulletin
));
3571 if (attempts
>= BULLETIN_ATTEMPTS
) {
3572 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3574 return PFVF_BULLETIN_CRC_ERR
;
3578 /* the mac address in bulletin board is valid and is new */
3579 if (bulletin
.valid_bitmap
& 1 << MAC_ADDR_VALID
&&
3580 memcmp(bulletin
.mac
, bp
->old_bulletin
.mac
, ETH_ALEN
)) {
3581 /* update new mac to net device */
3582 memcpy(bp
->dev
->dev_addr
, bulletin
.mac
, ETH_ALEN
);
3585 /* the vlan in bulletin board is valid and is new */
3586 if (bulletin
.valid_bitmap
& 1 << VLAN_VALID
)
3587 memcpy(&bulletin
.vlan
, &bp
->old_bulletin
.vlan
, VLAN_HLEN
);
3589 /* copy new bulletin board to bp */
3590 bp
->old_bulletin
= bulletin
;
3592 return PFVF_BULLETIN_UPDATED
;
3595 void bnx2x_timer_sriov(struct bnx2x
*bp
)
3597 bnx2x_sample_bulletin(bp
);
3599 /* if channel is down we need to self destruct */
3600 if (bp
->old_bulletin
.valid_bitmap
& 1 << CHANNEL_DOWN
) {
3601 smp_mb__before_clear_bit();
3602 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN
,
3603 &bp
->sp_rtnl_state
);
3604 smp_mb__after_clear_bit();
3605 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
3609 void __iomem
*bnx2x_vf_doorbells(struct bnx2x
*bp
)
3611 /* vf doorbells are embedded within the regview */
3612 return bp
->regview
+ PXP_VF_ADDR_DB_START
;
3615 int bnx2x_vf_pci_alloc(struct bnx2x
*bp
)
3617 mutex_init(&bp
->vf2pf_mutex
);
3619 /* allocate vf2pf mailbox for vf to pf channel */
3620 BNX2X_PCI_ALLOC(bp
->vf2pf_mbox
, &bp
->vf2pf_mbox_mapping
,
3621 sizeof(struct bnx2x_vf_mbx_msg
));
3623 /* allocate pf 2 vf bulletin board */
3624 BNX2X_PCI_ALLOC(bp
->pf2vf_bulletin
, &bp
->pf2vf_bulletin_mapping
,
3625 sizeof(union pf_vf_bulletin
));
3630 BNX2X_PCI_FREE(bp
->vf2pf_mbox
, bp
->vf2pf_mbox_mapping
,
3631 sizeof(struct bnx2x_vf_mbx_msg
));
3632 BNX2X_PCI_FREE(bp
->vf2pf_mbox
, bp
->pf2vf_bulletin_mapping
,
3633 sizeof(union pf_vf_bulletin
));
3637 int bnx2x_open_epilog(struct bnx2x
*bp
)
3639 /* Enable sriov via delayed work. This must be done via delayed work
3640 * because it causes the probe of the vf devices to be run, which invoke
3641 * register_netdevice which must have rtnl lock taken. As we are holding
3642 * the lock right now, that could only work if the probe would not take
3643 * the lock. However, as the probe of the vf may be called from other
3644 * contexts as well (such as passthrough to vm fails) it can't assume
3645 * the lock is being held for it. Using delayed work here allows the
3646 * probe code to simply take the lock (i.e. wait for it to be released
3647 * if it is being held). We only want to do this if the number of VFs
3648 * was set before PF driver was loaded.
3650 if (IS_SRIOV(bp
) && BNX2X_NR_VIRTFN(bp
)) {
3651 smp_mb__before_clear_bit();
3652 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV
, &bp
->sp_rtnl_state
);
3653 smp_mb__after_clear_bit();
3654 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
3660 void bnx2x_iov_channel_down(struct bnx2x
*bp
)
3663 struct pf_vf_bulletin_content
*bulletin
;
3668 for_each_vf(bp
, vf_idx
) {
3669 /* locate this VFs bulletin board and update the channel down
3672 bulletin
= BP_VF_BULLETIN(bp
, vf_idx
);
3673 bulletin
->valid_bitmap
|= 1 << CHANNEL_DOWN
;
3675 /* update vf bulletin board */
3676 bnx2x_post_vf_bulletin(bp
, vf_idx
);