1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
29 /***********************misc routines*****************************/
33 * @pf: pointer to the pf info
34 * @vf: pointer to the vf info
36 * Disable the VF through a SW reset
38 static inline void i40e_vc_disable_vf(struct i40e_pf
*pf
, struct i40e_vf
*vf
)
40 struct i40e_hw
*hw
= &pf
->hw
;
43 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
44 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
45 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
50 * i40e_vc_isvalid_vsi_id
51 * @vf: pointer to the vf info
52 * @vsi_id: vf relative vsi id
54 * check for the valid vsi id
56 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf
*vf
, u8 vsi_id
)
58 struct i40e_pf
*pf
= vf
->pf
;
60 return pf
->vsi
[vsi_id
]->vf_id
== vf
->vf_id
;
64 * i40e_vc_isvalid_queue_id
65 * @vf: pointer to the vf info
67 * @qid: vsi relative queue id
69 * check for the valid queue id
71 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf
*vf
, u8 vsi_id
,
74 struct i40e_pf
*pf
= vf
->pf
;
76 return qid
< pf
->vsi
[vsi_id
]->num_queue_pairs
;
80 * i40e_vc_isvalid_vector_id
81 * @vf: pointer to the vf info
82 * @vector_id: vf relative vector id
84 * check for the valid vector id
86 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf
*vf
, u8 vector_id
)
88 struct i40e_pf
*pf
= vf
->pf
;
90 return vector_id
< pf
->hw
.func_caps
.num_msix_vectors_vf
;
93 /***********************vf resource mgmt routines*****************/
96 * i40e_vc_get_pf_queue_id
97 * @vf: pointer to the vf info
98 * @vsi_idx: index of VSI in PF struct
99 * @vsi_queue_id: vsi relative queue id
101 * return pf relative queue id
103 static u16
i40e_vc_get_pf_queue_id(struct i40e_vf
*vf
, u8 vsi_idx
,
106 struct i40e_pf
*pf
= vf
->pf
;
107 struct i40e_vsi
*vsi
= pf
->vsi
[vsi_idx
];
108 u16 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
110 if (le16_to_cpu(vsi
->info
.mapping_flags
) &
111 I40E_AQ_VSI_QUE_MAP_NONCONTIG
)
113 le16_to_cpu(vsi
->info
.queue_mapping
[vsi_queue_id
]);
115 pf_queue_id
= le16_to_cpu(vsi
->info
.queue_mapping
[0]) +
122 * i40e_config_irq_link_list
123 * @vf: pointer to the vf info
124 * @vsi_idx: index of VSI in PF struct
125 * @vecmap: irq map info
127 * configure irq link list from the map
129 static void i40e_config_irq_link_list(struct i40e_vf
*vf
, u16 vsi_idx
,
130 struct i40e_virtchnl_vector_map
*vecmap
)
132 unsigned long linklistmap
= 0, tempmap
;
133 struct i40e_pf
*pf
= vf
->pf
;
134 struct i40e_hw
*hw
= &pf
->hw
;
135 u16 vsi_queue_id
, pf_queue_id
;
136 enum i40e_queue_type qtype
;
137 u16 next_q
, vector_id
;
141 vector_id
= vecmap
->vector_id
;
144 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
146 reg_idx
= I40E_VPINT_LNKLSTN(
147 ((pf
->hw
.func_caps
.num_msix_vectors_vf
- 1) * vf
->vf_id
) +
150 if (vecmap
->rxq_map
== 0 && vecmap
->txq_map
== 0) {
151 /* Special case - No queues mapped on this vector */
152 wr32(hw
, reg_idx
, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK
);
155 tempmap
= vecmap
->rxq_map
;
156 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
158 (I40E_VIRTCHNL_SUPPORTED_QTYPES
*
162 tempmap
= vecmap
->txq_map
;
163 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
165 (I40E_VIRTCHNL_SUPPORTED_QTYPES
* vsi_queue_id
169 next_q
= find_first_bit(&linklistmap
,
171 I40E_VIRTCHNL_SUPPORTED_QTYPES
));
172 vsi_queue_id
= next_q
/I40E_VIRTCHNL_SUPPORTED_QTYPES
;
173 qtype
= next_q
%I40E_VIRTCHNL_SUPPORTED_QTYPES
;
174 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
175 reg
= ((qtype
<< I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
) | pf_queue_id
);
177 wr32(hw
, reg_idx
, reg
);
179 while (next_q
< (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
181 case I40E_QUEUE_TYPE_RX
:
182 reg_idx
= I40E_QINT_RQCTL(pf_queue_id
);
183 itr_idx
= vecmap
->rxitr_idx
;
185 case I40E_QUEUE_TYPE_TX
:
186 reg_idx
= I40E_QINT_TQCTL(pf_queue_id
);
187 itr_idx
= vecmap
->txitr_idx
;
193 next_q
= find_next_bit(&linklistmap
,
195 I40E_VIRTCHNL_SUPPORTED_QTYPES
),
198 (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
199 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
200 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
201 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
,
204 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
208 /* format for the RQCTL & TQCTL regs is same */
210 (qtype
<< I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
) |
211 (pf_queue_id
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
212 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT
) |
213 (itr_idx
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
);
214 wr32(hw
, reg_idx
, reg
);
222 * i40e_config_vsi_tx_queue
223 * @vf: pointer to the vf info
224 * @vsi_idx: index of VSI in PF struct
225 * @vsi_queue_id: vsi relative queue index
226 * @info: config. info
230 static int i40e_config_vsi_tx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
232 struct i40e_virtchnl_txq_info
*info
)
234 struct i40e_pf
*pf
= vf
->pf
;
235 struct i40e_hw
*hw
= &pf
->hw
;
236 struct i40e_hmc_obj_txq tx_ctx
;
241 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
243 /* clear the context structure first */
244 memset(&tx_ctx
, 0, sizeof(struct i40e_hmc_obj_txq
));
246 /* only set the required fields */
247 tx_ctx
.base
= info
->dma_ring_addr
/ 128;
248 tx_ctx
.qlen
= info
->ring_len
;
249 tx_ctx
.rdylist
= le16_to_cpu(pf
->vsi
[vsi_idx
]->info
.qs_handle
[0]);
250 tx_ctx
.rdylist_act
= 0;
251 tx_ctx
.head_wb_ena
= 1;
252 tx_ctx
.head_wb_addr
= info
->dma_ring_addr
+
253 (info
->ring_len
* sizeof(struct i40e_tx_desc
));
255 /* clear the context in the HMC */
256 ret
= i40e_clear_lan_tx_queue_context(hw
, pf_queue_id
);
258 dev_err(&pf
->pdev
->dev
,
259 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
265 /* set the context in the HMC */
266 ret
= i40e_set_lan_tx_queue_context(hw
, pf_queue_id
, &tx_ctx
);
268 dev_err(&pf
->pdev
->dev
,
269 "Failed to set VF LAN Tx queue context %d error: %d\n",
275 /* associate this queue with the PCI VF function */
276 qtx_ctl
= I40E_QTX_CTL_VF_QUEUE
;
277 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
)
278 & I40E_QTX_CTL_PF_INDX_MASK
);
279 qtx_ctl
|= (((vf
->vf_id
+ hw
->func_caps
.vf_base_id
)
280 << I40E_QTX_CTL_VFVM_INDX_SHIFT
)
281 & I40E_QTX_CTL_VFVM_INDX_MASK
);
282 wr32(hw
, I40E_QTX_CTL(pf_queue_id
), qtx_ctl
);
290 * i40e_config_vsi_rx_queue
291 * @vf: pointer to the vf info
292 * @vsi_idx: index of VSI in PF struct
293 * @vsi_queue_id: vsi relative queue index
294 * @info: config. info
298 static int i40e_config_vsi_rx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
300 struct i40e_virtchnl_rxq_info
*info
)
302 struct i40e_pf
*pf
= vf
->pf
;
303 struct i40e_hw
*hw
= &pf
->hw
;
304 struct i40e_hmc_obj_rxq rx_ctx
;
308 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
310 /* clear the context structure first */
311 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
313 /* only set the required fields */
314 rx_ctx
.base
= info
->dma_ring_addr
/ 128;
315 rx_ctx
.qlen
= info
->ring_len
;
317 if (info
->splithdr_enabled
) {
318 rx_ctx
.hsplit_0
= I40E_RX_SPLIT_L2
|
320 I40E_RX_SPLIT_TCP_UDP
|
322 /* header length validation */
323 if (info
->hdr_size
> ((2 * 1024) - 64)) {
327 rx_ctx
.hbuff
= info
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
329 /* set splitalways mode 10b */
333 /* databuffer length validation */
334 if (info
->databuffer_size
> ((16 * 1024) - 128)) {
338 rx_ctx
.dbuff
= info
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
340 /* max pkt. length validation */
341 if (info
->max_pkt_size
>= (16 * 1024) || info
->max_pkt_size
< 64) {
345 rx_ctx
.rxmax
= info
->max_pkt_size
;
347 /* enable 32bytes desc always */
351 rx_ctx
.tphrdesc_ena
= 1;
352 rx_ctx
.tphwdesc_ena
= 1;
353 rx_ctx
.tphdata_ena
= 1;
354 rx_ctx
.tphhead_ena
= 1;
355 rx_ctx
.lrxqthresh
= 2;
359 /* clear the context in the HMC */
360 ret
= i40e_clear_lan_rx_queue_context(hw
, pf_queue_id
);
362 dev_err(&pf
->pdev
->dev
,
363 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
369 /* set the context in the HMC */
370 ret
= i40e_set_lan_rx_queue_context(hw
, pf_queue_id
, &rx_ctx
);
372 dev_err(&pf
->pdev
->dev
,
373 "Failed to set VF LAN Rx queue context %d error: %d\n",
385 * @vf: pointer to the vf info
386 * @type: type of VSI to allocate
388 * alloc vf vsi context & resources
390 static int i40e_alloc_vsi_res(struct i40e_vf
*vf
, enum i40e_vsi_type type
)
392 struct i40e_mac_filter
*f
= NULL
;
393 struct i40e_pf
*pf
= vf
->pf
;
394 struct i40e_vsi
*vsi
;
397 vsi
= i40e_vsi_setup(pf
, type
, pf
->vsi
[pf
->lan_vsi
]->seid
, vf
->vf_id
);
400 dev_err(&pf
->pdev
->dev
,
401 "add vsi failed for vf %d, aq_err %d\n",
402 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
404 goto error_alloc_vsi_res
;
406 if (type
== I40E_VSI_SRIOV
) {
407 u8 brdcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
408 vf
->lan_vsi_index
= vsi
->idx
;
409 vf
->lan_vsi_id
= vsi
->id
;
410 dev_info(&pf
->pdev
->dev
,
411 "VF %d assigned LAN VSI index %d, VSI id %d\n",
412 vf
->vf_id
, vsi
->idx
, vsi
->id
);
413 /* If the port VLAN has been configured and then the
414 * VF driver was removed then the VSI port VLAN
415 * configuration was destroyed. Check if there is
416 * a port VLAN and restore the VSI configuration if
419 if (vf
->port_vlan_id
)
420 i40e_vsi_add_pvid(vsi
, vf
->port_vlan_id
);
421 f
= i40e_add_filter(vsi
, vf
->default_lan_addr
.addr
,
422 vf
->port_vlan_id
, true, false);
424 dev_info(&pf
->pdev
->dev
,
425 "Could not allocate VF MAC addr\n");
426 f
= i40e_add_filter(vsi
, brdcast
, vf
->port_vlan_id
,
429 dev_info(&pf
->pdev
->dev
,
430 "Could not allocate VF broadcast filter\n");
433 /* program mac filter */
434 ret
= i40e_sync_vsi_filters(vsi
);
436 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
438 /* Set VF bandwidth if specified */
440 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, vsi
->seid
,
441 vf
->tx_rate
/ 50, 0, NULL
);
443 dev_err(&pf
->pdev
->dev
, "Unable to set tx rate, VF %d, error code %d.\n",
452 * i40e_enable_vf_mappings
453 * @vf: pointer to the vf info
457 static void i40e_enable_vf_mappings(struct i40e_vf
*vf
)
459 struct i40e_pf
*pf
= vf
->pf
;
460 struct i40e_hw
*hw
= &pf
->hw
;
461 u32 reg
, total_queue_pairs
= 0;
464 /* Tell the hardware we're using noncontiguous mapping. HW requires
465 * that VF queues be mapped using this method, even when they are
466 * contiguous in real life
468 wr32(hw
, I40E_VSILAN_QBASE(vf
->lan_vsi_id
),
469 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
471 /* enable VF vplan_qtable mappings */
472 reg
= I40E_VPLAN_MAPENA_TXRX_ENA_MASK
;
473 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), reg
);
475 /* map PF queues to VF queues */
476 for (j
= 0; j
< pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
; j
++) {
477 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
, j
);
478 reg
= (qid
& I40E_VPLAN_QTABLE_QINDEX_MASK
);
479 wr32(hw
, I40E_VPLAN_QTABLE(total_queue_pairs
, vf
->vf_id
), reg
);
483 /* map PF queues to VSI */
484 for (j
= 0; j
< 7; j
++) {
485 if (j
* 2 >= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
) {
486 reg
= 0x07FF07FF; /* unused */
488 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
491 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
495 wr32(hw
, I40E_VSILAN_QTABLE(j
, vf
->lan_vsi_id
), reg
);
502 * i40e_disable_vf_mappings
503 * @vf: pointer to the vf info
505 * disable vf mappings
507 static void i40e_disable_vf_mappings(struct i40e_vf
*vf
)
509 struct i40e_pf
*pf
= vf
->pf
;
510 struct i40e_hw
*hw
= &pf
->hw
;
513 /* disable qp mappings */
514 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), 0);
515 for (i
= 0; i
< I40E_MAX_VSI_QP
; i
++)
516 wr32(hw
, I40E_VPLAN_QTABLE(i
, vf
->vf_id
),
517 I40E_QUEUE_END_OF_LIST
);
523 * @vf: pointer to the vf info
527 static void i40e_free_vf_res(struct i40e_vf
*vf
)
529 struct i40e_pf
*pf
= vf
->pf
;
530 struct i40e_hw
*hw
= &pf
->hw
;
534 /* free vsi & disconnect it from the parent uplink */
535 if (vf
->lan_vsi_index
) {
536 i40e_vsi_release(pf
->vsi
[vf
->lan_vsi_index
]);
537 vf
->lan_vsi_index
= 0;
540 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
542 /* disable interrupts so the VF starts in a known state */
543 for (i
= 0; i
< msix_vf
; i
++) {
544 /* format is same for both registers */
546 reg_idx
= I40E_VFINT_DYN_CTL0(vf
->vf_id
);
548 reg_idx
= I40E_VFINT_DYN_CTLN(((msix_vf
- 1) *
551 wr32(hw
, reg_idx
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
555 /* clear the irq settings */
556 for (i
= 0; i
< msix_vf
; i
++) {
557 /* format is same for both registers */
559 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
561 reg_idx
= I40E_VPINT_LNKLSTN(((msix_vf
- 1) *
564 reg
= (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
|
565 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
);
566 wr32(hw
, reg_idx
, reg
);
569 /* reset some of the state varibles keeping
570 * track of the resources
572 vf
->num_queue_pairs
= 0;
578 * @vf: pointer to the vf info
580 * allocate vf resources
582 static int i40e_alloc_vf_res(struct i40e_vf
*vf
)
584 struct i40e_pf
*pf
= vf
->pf
;
585 int total_queue_pairs
= 0;
588 /* allocate hw vsi context & associated resources */
589 ret
= i40e_alloc_vsi_res(vf
, I40E_VSI_SRIOV
);
592 total_queue_pairs
+= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
593 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
595 /* store the total qps number for the runtime
598 vf
->num_queue_pairs
= total_queue_pairs
;
600 /* vf is now completely initialized */
601 set_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
);
605 i40e_free_vf_res(vf
);
610 #define VF_DEVICE_STATUS 0xAA
611 #define VF_TRANS_PENDING_MASK 0x20
613 * i40e_quiesce_vf_pci
614 * @vf: pointer to the vf structure
616 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
617 * if the transactions never clear.
619 static int i40e_quiesce_vf_pci(struct i40e_vf
*vf
)
621 struct i40e_pf
*pf
= vf
->pf
;
622 struct i40e_hw
*hw
= &pf
->hw
;
626 vf_abs_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
628 wr32(hw
, I40E_PF_PCI_CIAA
,
629 VF_DEVICE_STATUS
| (vf_abs_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
630 for (i
= 0; i
< 100; i
++) {
631 reg
= rd32(hw
, I40E_PF_PCI_CIAD
);
632 if ((reg
& VF_TRANS_PENDING_MASK
) == 0)
641 * @vf: pointer to the vf structure
642 * @flr: VFLR was issued or not
646 void i40e_reset_vf(struct i40e_vf
*vf
, bool flr
)
648 struct i40e_pf
*pf
= vf
->pf
;
649 struct i40e_hw
*hw
= &pf
->hw
;
655 clear_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
657 /* In the case of a VFLR, the HW has already reset the VF and we
658 * just need to clean up, so don't hit the VFRTRIG register.
661 /* reset vf using VPGEN_VFRTRIG reg */
662 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
663 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
664 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
668 if (i40e_quiesce_vf_pci(vf
))
669 dev_err(&pf
->pdev
->dev
, "VF %d PCI transactions stuck\n",
672 /* poll VPGEN_VFRSTAT reg to make sure
673 * that reset is complete
675 for (i
= 0; i
< 100; i
++) {
676 /* vf reset requires driver to first reset the
677 * vf & than poll the status register to make sure
678 * that the requested op was completed
682 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
683 if (reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
) {
690 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
692 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_COMPLETED
);
693 /* clear the reset bit in the VPGEN_VFRTRIG reg */
694 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
695 reg
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
696 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
698 /* On initial reset, we won't have any queues */
699 if (vf
->lan_vsi_index
== 0)
702 i40e_vsi_control_rings(pf
->vsi
[vf
->lan_vsi_index
], false);
704 /* reallocate vf resources to reset the VSI state */
705 i40e_free_vf_res(vf
);
706 i40e_alloc_vf_res(vf
);
707 i40e_enable_vf_mappings(vf
);
708 set_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
710 /* tell the VF the reset is done */
711 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_VFACTIVE
);
716 * i40e_vfs_are_assigned
717 * @pf: pointer to the pf structure
719 * Determine if any VFs are assigned to VMs
721 static bool i40e_vfs_are_assigned(struct i40e_pf
*pf
)
723 struct pci_dev
*pdev
= pf
->pdev
;
724 struct pci_dev
*vfdev
;
726 /* loop through all the VFs to see if we own any that are assigned */
727 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, I40E_DEV_ID_VF
, NULL
);
729 /* if we don't own it we don't care */
730 if (vfdev
->is_virtfn
&& pci_physfn(vfdev
) == pdev
) {
731 /* if it is assigned we cannot release it */
732 if (vfdev
->dev_flags
& PCI_DEV_FLAGS_ASSIGNED
)
736 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
,
743 #ifdef CONFIG_PCI_IOV
746 * i40e_enable_pf_switch_lb
747 * @pf: pointer to the pf structure
749 * enable switch loop back or die - no point in a return value
751 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
753 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
754 struct i40e_vsi_context ctxt
;
757 ctxt
.seid
= pf
->main_vsi_seid
;
758 ctxt
.pf_num
= pf
->hw
.pf_id
;
760 aq_ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
762 dev_info(&pf
->pdev
->dev
,
763 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
764 __func__
, aq_ret
, pf
->hw
.aq
.asq_last_status
);
767 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
768 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
769 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
771 aq_ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
773 dev_info(&pf
->pdev
->dev
,
774 "%s: update vsi switch failed, aq_err=%d\n",
775 __func__
, vsi
->back
->hw
.aq
.asq_last_status
);
781 * i40e_disable_pf_switch_lb
782 * @pf: pointer to the pf structure
784 * disable switch loop back or die - no point in a return value
786 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
788 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
789 struct i40e_vsi_context ctxt
;
792 ctxt
.seid
= pf
->main_vsi_seid
;
793 ctxt
.pf_num
= pf
->hw
.pf_id
;
795 aq_ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
797 dev_info(&pf
->pdev
->dev
,
798 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
799 __func__
, aq_ret
, pf
->hw
.aq
.asq_last_status
);
802 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
803 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
804 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
806 aq_ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
808 dev_info(&pf
->pdev
->dev
,
809 "%s: update vsi switch failed, aq_err=%d\n",
810 __func__
, vsi
->back
->hw
.aq
.asq_last_status
);
816 * @pf: pointer to the pf structure
820 void i40e_free_vfs(struct i40e_pf
*pf
)
822 struct i40e_hw
*hw
= &pf
->hw
;
823 u32 reg_idx
, bit_idx
;
829 /* Disable interrupt 0 so we don't try to handle the VFLR. */
830 i40e_irq_dynamic_disable_icr0(pf
);
832 mdelay(10); /* let any messages in transit get finished up */
833 /* free up vf resources */
834 tmp
= pf
->num_alloc_vfs
;
835 pf
->num_alloc_vfs
= 0;
836 for (i
= 0; i
< tmp
; i
++) {
837 if (test_bit(I40E_VF_STAT_INIT
, &pf
->vf
[i
].vf_states
))
838 i40e_free_vf_res(&pf
->vf
[i
]);
839 /* disable qp mappings */
840 i40e_disable_vf_mappings(&pf
->vf
[i
]);
846 /* This check is for when the driver is unloaded while VFs are
847 * assigned. Setting the number of VFs to 0 through sysfs is caught
848 * before this function ever gets called.
850 if (!i40e_vfs_are_assigned(pf
)) {
851 pci_disable_sriov(pf
->pdev
);
852 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
853 * work correctly when SR-IOV gets re-enabled.
855 for (vf_id
= 0; vf_id
< tmp
; vf_id
++) {
856 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
857 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
858 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
860 i40e_disable_pf_switch_lb(pf
);
862 dev_warn(&pf
->pdev
->dev
,
863 "unable to disable SR-IOV because VFs are assigned.\n");
866 /* Re-enable interrupt 0. */
867 i40e_irq_dynamic_enable_icr0(pf
);
870 #ifdef CONFIG_PCI_IOV
873 * @pf: pointer to the pf structure
874 * @num_alloc_vfs: number of vfs to allocate
876 * allocate vf resources
878 int i40e_alloc_vfs(struct i40e_pf
*pf
, u16 num_alloc_vfs
)
883 /* Disable interrupt 0 so we don't try to handle the VFLR. */
884 i40e_irq_dynamic_disable_icr0(pf
);
886 /* Check to see if we're just allocating resources for extant VFs */
887 if (pci_num_vf(pf
->pdev
) != num_alloc_vfs
) {
888 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
890 dev_err(&pf
->pdev
->dev
,
891 "Failed to enable SR-IOV, error %d.\n", ret
);
892 pf
->num_alloc_vfs
= 0;
896 /* allocate memory */
897 vfs
= kcalloc(num_alloc_vfs
, sizeof(struct i40e_vf
), GFP_KERNEL
);
903 /* apply default profile */
904 for (i
= 0; i
< num_alloc_vfs
; i
++) {
906 vfs
[i
].parent_type
= I40E_SWITCH_ELEMENT_TYPE_VEB
;
909 /* assign default capabilities */
910 set_bit(I40E_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
911 /* vf resources get allocated during reset */
912 i40e_reset_vf(&vfs
[i
], false);
914 /* enable vf vplan_qtable mappings */
915 i40e_enable_vf_mappings(&vfs
[i
]);
918 pf
->num_alloc_vfs
= num_alloc_vfs
;
920 i40e_enable_pf_switch_lb(pf
);
925 /* Re-enable interrupt 0. */
926 i40e_irq_dynamic_enable_icr0(pf
);
932 * i40e_pci_sriov_enable
933 * @pdev: pointer to a pci_dev structure
934 * @num_vfs: number of vfs to allocate
936 * Enable or change the number of VFs
938 static int i40e_pci_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
940 #ifdef CONFIG_PCI_IOV
941 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
942 int pre_existing_vfs
= pci_num_vf(pdev
);
945 dev_info(&pdev
->dev
, "Allocating %d VFs.\n", num_vfs
);
946 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
948 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
951 if (num_vfs
> pf
->num_req_vfs
) {
956 err
= i40e_alloc_vfs(pf
, num_vfs
);
958 dev_warn(&pdev
->dev
, "Failed to enable SR-IOV: %d\n", err
);
972 * i40e_pci_sriov_configure
973 * @pdev: pointer to a pci_dev structure
974 * @num_vfs: number of vfs to allocate
976 * Enable or change the number of VFs. Called when the user updates the number
979 int i40e_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
981 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
984 return i40e_pci_sriov_enable(pdev
, num_vfs
);
986 if (!i40e_vfs_are_assigned(pf
)) {
989 dev_warn(&pdev
->dev
, "Unable to free VFs because some are assigned to VMs.\n");
995 /***********************virtual channel routines******************/
998 * i40e_vc_send_msg_to_vf
999 * @vf: pointer to the vf info
1000 * @v_opcode: virtual channel opcode
1001 * @v_retval: virtual channel return value
1002 * @msg: pointer to the msg buffer
1003 * @msglen: msg length
1007 static int i40e_vc_send_msg_to_vf(struct i40e_vf
*vf
, u32 v_opcode
,
1008 u32 v_retval
, u8
*msg
, u16 msglen
)
1010 struct i40e_pf
*pf
= vf
->pf
;
1011 struct i40e_hw
*hw
= &pf
->hw
;
1012 int true_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1015 /* single place to detect unsuccessful return values */
1017 vf
->num_invalid_msgs
++;
1018 dev_err(&pf
->pdev
->dev
, "Failed opcode %d Error: %d\n",
1019 v_opcode
, v_retval
);
1020 if (vf
->num_invalid_msgs
>
1021 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED
) {
1022 dev_err(&pf
->pdev
->dev
,
1023 "Number of invalid messages exceeded for VF %d\n",
1025 dev_err(&pf
->pdev
->dev
, "Use PF Control I/F to enable the VF\n");
1026 set_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
);
1029 vf
->num_valid_msgs
++;
1032 aq_ret
= i40e_aq_send_msg_to_vf(hw
, true_vf_id
, v_opcode
, v_retval
,
1035 dev_err(&pf
->pdev
->dev
,
1036 "Unable to send the message to VF %d aq_err %d\n",
1037 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
1045 * i40e_vc_send_resp_to_vf
1046 * @vf: pointer to the vf info
1047 * @opcode: operation code
1048 * @retval: return value
1050 * send resp msg to vf
1052 static int i40e_vc_send_resp_to_vf(struct i40e_vf
*vf
,
1053 enum i40e_virtchnl_ops opcode
,
1056 return i40e_vc_send_msg_to_vf(vf
, opcode
, retval
, NULL
, 0);
1060 * i40e_vc_get_version_msg
1061 * @vf: pointer to the vf info
1063 * called from the vf to request the API version used by the PF
1065 static int i40e_vc_get_version_msg(struct i40e_vf
*vf
)
1067 struct i40e_virtchnl_version_info info
= {
1068 I40E_VIRTCHNL_VERSION_MAJOR
, I40E_VIRTCHNL_VERSION_MINOR
1071 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_VERSION
,
1072 I40E_SUCCESS
, (u8
*)&info
,
1074 i40e_virtchnl_version_info
));
1078 * i40e_vc_get_vf_resources_msg
1079 * @vf: pointer to the vf info
1080 * @msg: pointer to the msg buffer
1081 * @msglen: msg length
1083 * called from the vf to request its resources
1085 static int i40e_vc_get_vf_resources_msg(struct i40e_vf
*vf
)
1087 struct i40e_virtchnl_vf_resource
*vfres
= NULL
;
1088 struct i40e_pf
*pf
= vf
->pf
;
1089 i40e_status aq_ret
= 0;
1090 struct i40e_vsi
*vsi
;
1095 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
1096 aq_ret
= I40E_ERR_PARAM
;
1100 len
= (sizeof(struct i40e_virtchnl_vf_resource
) +
1101 sizeof(struct i40e_virtchnl_vsi_resource
) * num_vsis
);
1103 vfres
= kzalloc(len
, GFP_KERNEL
);
1105 aq_ret
= I40E_ERR_NO_MEMORY
;
1110 vfres
->vf_offload_flags
= I40E_VIRTCHNL_VF_OFFLOAD_L2
;
1111 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
1112 if (!vsi
->info
.pvid
)
1113 vfres
->vf_offload_flags
|= I40E_VIRTCHNL_VF_OFFLOAD_VLAN
;
1115 vfres
->num_vsis
= num_vsis
;
1116 vfres
->num_queue_pairs
= vf
->num_queue_pairs
;
1117 vfres
->max_vectors
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
1118 if (vf
->lan_vsi_index
) {
1119 vfres
->vsi_res
[i
].vsi_id
= vf
->lan_vsi_index
;
1120 vfres
->vsi_res
[i
].vsi_type
= I40E_VSI_SRIOV
;
1121 vfres
->vsi_res
[i
].num_queue_pairs
=
1122 pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
1123 memcpy(vfres
->vsi_res
[i
].default_mac_addr
,
1124 vf
->default_lan_addr
.addr
, ETH_ALEN
);
1127 set_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
1130 /* send the response back to the vf */
1131 ret
= i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_VF_RESOURCES
,
1132 aq_ret
, (u8
*)vfres
, len
);
1139 * i40e_vc_reset_vf_msg
1140 * @vf: pointer to the vf info
1141 * @msg: pointer to the msg buffer
1142 * @msglen: msg length
1144 * called from the vf to reset itself,
1145 * unlike other virtchnl messages, pf driver
1146 * doesn't send the response back to the vf
1148 static void i40e_vc_reset_vf_msg(struct i40e_vf
*vf
)
1150 if (test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
))
1151 i40e_reset_vf(vf
, false);
1155 * i40e_vc_config_promiscuous_mode_msg
1156 * @vf: pointer to the vf info
1157 * @msg: pointer to the msg buffer
1158 * @msglen: msg length
1160 * called from the vf to configure the promiscuous mode of
1163 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
*vf
,
1164 u8
*msg
, u16 msglen
)
1166 struct i40e_virtchnl_promisc_info
*info
=
1167 (struct i40e_virtchnl_promisc_info
*)msg
;
1168 struct i40e_pf
*pf
= vf
->pf
;
1169 struct i40e_hw
*hw
= &pf
->hw
;
1170 bool allmulti
= false;
1171 bool promisc
= false;
1174 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1175 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1176 !i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
) ||
1177 (pf
->vsi
[info
->vsi_id
]->type
!= I40E_VSI_FCOE
)) {
1178 aq_ret
= I40E_ERR_PARAM
;
1182 if (info
->flags
& I40E_FLAG_VF_UNICAST_PROMISC
)
1184 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, info
->vsi_id
,
1189 if (info
->flags
& I40E_FLAG_VF_MULTICAST_PROMISC
)
1191 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, info
->vsi_id
,
1195 /* send the response to the vf */
1196 return i40e_vc_send_resp_to_vf(vf
,
1197 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
,
1202 * i40e_vc_config_queues_msg
1203 * @vf: pointer to the vf info
1204 * @msg: pointer to the msg buffer
1205 * @msglen: msg length
1207 * called from the vf to configure the rx/tx
1210 static int i40e_vc_config_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1212 struct i40e_virtchnl_vsi_queue_config_info
*qci
=
1213 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1214 struct i40e_virtchnl_queue_pair_info
*qpi
;
1215 u16 vsi_id
, vsi_queue_id
;
1216 i40e_status aq_ret
= 0;
1219 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1220 aq_ret
= I40E_ERR_PARAM
;
1224 vsi_id
= qci
->vsi_id
;
1225 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1226 aq_ret
= I40E_ERR_PARAM
;
1229 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
1230 qpi
= &qci
->qpair
[i
];
1231 vsi_queue_id
= qpi
->txq
.queue_id
;
1232 if ((qpi
->txq
.vsi_id
!= vsi_id
) ||
1233 (qpi
->rxq
.vsi_id
!= vsi_id
) ||
1234 (qpi
->rxq
.queue_id
!= vsi_queue_id
) ||
1235 !i40e_vc_isvalid_queue_id(vf
, vsi_id
, vsi_queue_id
)) {
1236 aq_ret
= I40E_ERR_PARAM
;
1240 if (i40e_config_vsi_rx_queue(vf
, vsi_id
, vsi_queue_id
,
1242 i40e_config_vsi_tx_queue(vf
, vsi_id
, vsi_queue_id
,
1244 aq_ret
= I40E_ERR_PARAM
;
1250 /* send the response to the vf */
1251 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
1256 * i40e_vc_config_irq_map_msg
1257 * @vf: pointer to the vf info
1258 * @msg: pointer to the msg buffer
1259 * @msglen: msg length
1261 * called from the vf to configure the irq to
1264 static int i40e_vc_config_irq_map_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1266 struct i40e_virtchnl_irq_map_info
*irqmap_info
=
1267 (struct i40e_virtchnl_irq_map_info
*)msg
;
1268 struct i40e_virtchnl_vector_map
*map
;
1269 u16 vsi_id
, vsi_queue_id
, vector_id
;
1270 i40e_status aq_ret
= 0;
1271 unsigned long tempmap
;
1274 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1275 aq_ret
= I40E_ERR_PARAM
;
1279 for (i
= 0; i
< irqmap_info
->num_vectors
; i
++) {
1280 map
= &irqmap_info
->vecmap
[i
];
1282 vector_id
= map
->vector_id
;
1283 vsi_id
= map
->vsi_id
;
1284 /* validate msg params */
1285 if (!i40e_vc_isvalid_vector_id(vf
, vector_id
) ||
1286 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1287 aq_ret
= I40E_ERR_PARAM
;
1291 /* lookout for the invalid queue index */
1292 tempmap
= map
->rxq_map
;
1293 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1294 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1296 aq_ret
= I40E_ERR_PARAM
;
1301 tempmap
= map
->txq_map
;
1302 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1303 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1305 aq_ret
= I40E_ERR_PARAM
;
1310 i40e_config_irq_link_list(vf
, vsi_id
, map
);
1313 /* send the response to the vf */
1314 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
,
1319 * i40e_vc_enable_queues_msg
1320 * @vf: pointer to the vf info
1321 * @msg: pointer to the msg buffer
1322 * @msglen: msg length
1324 * called from the vf to enable all or specific queue(s)
1326 static int i40e_vc_enable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1328 struct i40e_virtchnl_queue_select
*vqs
=
1329 (struct i40e_virtchnl_queue_select
*)msg
;
1330 struct i40e_pf
*pf
= vf
->pf
;
1331 u16 vsi_id
= vqs
->vsi_id
;
1332 i40e_status aq_ret
= 0;
1334 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1335 aq_ret
= I40E_ERR_PARAM
;
1339 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1340 aq_ret
= I40E_ERR_PARAM
;
1344 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1345 aq_ret
= I40E_ERR_PARAM
;
1348 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], true))
1349 aq_ret
= I40E_ERR_TIMEOUT
;
1351 /* send the response to the vf */
1352 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ENABLE_QUEUES
,
1357 * i40e_vc_disable_queues_msg
1358 * @vf: pointer to the vf info
1359 * @msg: pointer to the msg buffer
1360 * @msglen: msg length
1362 * called from the vf to disable all or specific
1365 static int i40e_vc_disable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1367 struct i40e_virtchnl_queue_select
*vqs
=
1368 (struct i40e_virtchnl_queue_select
*)msg
;
1369 struct i40e_pf
*pf
= vf
->pf
;
1370 u16 vsi_id
= vqs
->vsi_id
;
1371 i40e_status aq_ret
= 0;
1373 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1374 aq_ret
= I40E_ERR_PARAM
;
1378 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1379 aq_ret
= I40E_ERR_PARAM
;
1383 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1384 aq_ret
= I40E_ERR_PARAM
;
1387 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], false))
1388 aq_ret
= I40E_ERR_TIMEOUT
;
1391 /* send the response to the vf */
1392 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DISABLE_QUEUES
,
1397 * i40e_vc_get_stats_msg
1398 * @vf: pointer to the vf info
1399 * @msg: pointer to the msg buffer
1400 * @msglen: msg length
1402 * called from the vf to get vsi stats
1404 static int i40e_vc_get_stats_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1406 struct i40e_virtchnl_queue_select
*vqs
=
1407 (struct i40e_virtchnl_queue_select
*)msg
;
1408 struct i40e_pf
*pf
= vf
->pf
;
1409 struct i40e_eth_stats stats
;
1410 i40e_status aq_ret
= 0;
1411 struct i40e_vsi
*vsi
;
1413 memset(&stats
, 0, sizeof(struct i40e_eth_stats
));
1415 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1416 aq_ret
= I40E_ERR_PARAM
;
1420 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1421 aq_ret
= I40E_ERR_PARAM
;
1425 vsi
= pf
->vsi
[vqs
->vsi_id
];
1427 aq_ret
= I40E_ERR_PARAM
;
1430 i40e_update_eth_stats(vsi
);
1431 stats
= vsi
->eth_stats
;
1434 /* send the response back to the vf */
1435 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_STATS
, aq_ret
,
1436 (u8
*)&stats
, sizeof(stats
));
1440 * i40e_check_vf_permission
1441 * @vf: pointer to the vf info
1442 * @macaddr: pointer to the MAC Address being checked
1444 * Check if the VF has permission to add or delete unicast MAC address
1445 * filters and return error code -EPERM if not. Then check if the
1446 * address filter requested is broadcast or zero and if so return
1447 * an invalid MAC address error code.
1449 static inline int i40e_check_vf_permission(struct i40e_vf
*vf
, u8
*macaddr
)
1451 struct i40e_pf
*pf
= vf
->pf
;
1454 if (is_broadcast_ether_addr(macaddr
) ||
1455 is_zero_ether_addr(macaddr
)) {
1456 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n", macaddr
);
1457 ret
= I40E_ERR_INVALID_MAC_ADDR
;
1458 } else if (vf
->pf_set_mac
&& !is_multicast_ether_addr(macaddr
) &&
1459 !ether_addr_equal(macaddr
, vf
->default_lan_addr
.addr
)) {
1460 /* If the host VMM administrator has set the VF MAC address
1461 * administratively via the ndo_set_vf_mac command then deny
1462 * permission to the VF to add or delete unicast MAC addresses.
1463 * The VF may request to set the MAC address filter already
1464 * assigned to it so do not return an error in that case.
1466 dev_err(&pf
->pdev
->dev
,
1467 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1474 * i40e_vc_add_mac_addr_msg
1475 * @vf: pointer to the vf info
1476 * @msg: pointer to the msg buffer
1477 * @msglen: msg length
1479 * add guest mac address filter
1481 static int i40e_vc_add_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1483 struct i40e_virtchnl_ether_addr_list
*al
=
1484 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1485 struct i40e_pf
*pf
= vf
->pf
;
1486 struct i40e_vsi
*vsi
= NULL
;
1487 u16 vsi_id
= al
->vsi_id
;
1488 i40e_status ret
= 0;
1491 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1492 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1493 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1494 ret
= I40E_ERR_PARAM
;
1498 for (i
= 0; i
< al
->num_elements
; i
++) {
1499 ret
= i40e_check_vf_permission(vf
, al
->list
[i
].addr
);
1503 vsi
= pf
->vsi
[vsi_id
];
1505 /* add new addresses to the list */
1506 for (i
= 0; i
< al
->num_elements
; i
++) {
1507 struct i40e_mac_filter
*f
;
1509 f
= i40e_find_mac(vsi
, al
->list
[i
].addr
, true, false);
1511 if (i40e_is_vsi_in_vlan(vsi
))
1512 f
= i40e_put_mac_in_vlan(vsi
, al
->list
[i
].addr
,
1515 f
= i40e_add_filter(vsi
, al
->list
[i
].addr
, -1,
1520 dev_err(&pf
->pdev
->dev
,
1521 "Unable to add VF MAC filter\n");
1522 ret
= I40E_ERR_PARAM
;
1527 /* program the updated filter list */
1528 if (i40e_sync_vsi_filters(vsi
))
1529 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1532 /* send the response to the vf */
1533 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
,
1538 * i40e_vc_del_mac_addr_msg
1539 * @vf: pointer to the vf info
1540 * @msg: pointer to the msg buffer
1541 * @msglen: msg length
1543 * remove guest mac address filter
1545 static int i40e_vc_del_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1547 struct i40e_virtchnl_ether_addr_list
*al
=
1548 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1549 struct i40e_pf
*pf
= vf
->pf
;
1550 struct i40e_vsi
*vsi
= NULL
;
1551 u16 vsi_id
= al
->vsi_id
;
1552 i40e_status ret
= 0;
1555 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1556 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1557 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1558 ret
= I40E_ERR_PARAM
;
1562 for (i
= 0; i
< al
->num_elements
; i
++) {
1563 if (is_broadcast_ether_addr(al
->list
[i
].addr
) ||
1564 is_zero_ether_addr(al
->list
[i
].addr
)) {
1565 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n",
1567 ret
= I40E_ERR_INVALID_MAC_ADDR
;
1571 vsi
= pf
->vsi
[vsi_id
];
1573 /* delete addresses from the list */
1574 for (i
= 0; i
< al
->num_elements
; i
++)
1575 i40e_del_filter(vsi
, al
->list
[i
].addr
,
1576 I40E_VLAN_ANY
, true, false);
1578 /* program the updated filter list */
1579 if (i40e_sync_vsi_filters(vsi
))
1580 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1583 /* send the response to the vf */
1584 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
,
1589 * i40e_vc_add_vlan_msg
1590 * @vf: pointer to the vf info
1591 * @msg: pointer to the msg buffer
1592 * @msglen: msg length
1594 * program guest vlan id
1596 static int i40e_vc_add_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1598 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1599 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1600 struct i40e_pf
*pf
= vf
->pf
;
1601 struct i40e_vsi
*vsi
= NULL
;
1602 u16 vsi_id
= vfl
->vsi_id
;
1603 i40e_status aq_ret
= 0;
1606 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1607 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1608 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1609 aq_ret
= I40E_ERR_PARAM
;
1613 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1614 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1615 aq_ret
= I40E_ERR_PARAM
;
1616 dev_err(&pf
->pdev
->dev
,
1617 "invalid VF VLAN id %d\n", vfl
->vlan_id
[i
]);
1621 vsi
= pf
->vsi
[vsi_id
];
1622 if (vsi
->info
.pvid
) {
1623 aq_ret
= I40E_ERR_PARAM
;
1627 i40e_vlan_stripping_enable(vsi
);
1628 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1629 /* add new VLAN filter */
1630 int ret
= i40e_vsi_add_vlan(vsi
, vfl
->vlan_id
[i
]);
1632 dev_err(&pf
->pdev
->dev
,
1633 "Unable to add VF vlan filter %d, error %d\n",
1634 vfl
->vlan_id
[i
], ret
);
1638 /* send the response to the vf */
1639 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_VLAN
, aq_ret
);
1643 * i40e_vc_remove_vlan_msg
1644 * @vf: pointer to the vf info
1645 * @msg: pointer to the msg buffer
1646 * @msglen: msg length
1648 * remove programmed guest vlan id
1650 static int i40e_vc_remove_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1652 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1653 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1654 struct i40e_pf
*pf
= vf
->pf
;
1655 struct i40e_vsi
*vsi
= NULL
;
1656 u16 vsi_id
= vfl
->vsi_id
;
1657 i40e_status aq_ret
= 0;
1660 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1661 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1662 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1663 aq_ret
= I40E_ERR_PARAM
;
1667 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1668 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1669 aq_ret
= I40E_ERR_PARAM
;
1674 vsi
= pf
->vsi
[vsi_id
];
1675 if (vsi
->info
.pvid
) {
1676 aq_ret
= I40E_ERR_PARAM
;
1680 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1681 int ret
= i40e_vsi_kill_vlan(vsi
, vfl
->vlan_id
[i
]);
1683 dev_err(&pf
->pdev
->dev
,
1684 "Unable to delete VF vlan filter %d, error %d\n",
1685 vfl
->vlan_id
[i
], ret
);
1689 /* send the response to the vf */
1690 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_VLAN
, aq_ret
);
1694 * i40e_vc_validate_vf_msg
1695 * @vf: pointer to the vf info
1696 * @msg: pointer to the msg buffer
1697 * @msglen: msg length
1698 * @msghndl: msg handle
1702 static int i40e_vc_validate_vf_msg(struct i40e_vf
*vf
, u32 v_opcode
,
1703 u32 v_retval
, u8
*msg
, u16 msglen
)
1705 bool err_msg_format
= false;
1708 /* Check if VF is disabled. */
1709 if (test_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
))
1710 return I40E_ERR_PARAM
;
1712 /* Validate message length. */
1714 case I40E_VIRTCHNL_OP_VERSION
:
1715 valid_len
= sizeof(struct i40e_virtchnl_version_info
);
1717 case I40E_VIRTCHNL_OP_RESET_VF
:
1718 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1721 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
:
1722 valid_len
= sizeof(struct i40e_virtchnl_txq_info
);
1724 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
:
1725 valid_len
= sizeof(struct i40e_virtchnl_rxq_info
);
1727 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1728 valid_len
= sizeof(struct i40e_virtchnl_vsi_queue_config_info
);
1729 if (msglen
>= valid_len
) {
1730 struct i40e_virtchnl_vsi_queue_config_info
*vqc
=
1731 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1732 valid_len
+= (vqc
->num_queue_pairs
*
1734 i40e_virtchnl_queue_pair_info
));
1735 if (vqc
->num_queue_pairs
== 0)
1736 err_msg_format
= true;
1739 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1740 valid_len
= sizeof(struct i40e_virtchnl_irq_map_info
);
1741 if (msglen
>= valid_len
) {
1742 struct i40e_virtchnl_irq_map_info
*vimi
=
1743 (struct i40e_virtchnl_irq_map_info
*)msg
;
1744 valid_len
+= (vimi
->num_vectors
*
1745 sizeof(struct i40e_virtchnl_vector_map
));
1746 if (vimi
->num_vectors
== 0)
1747 err_msg_format
= true;
1750 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1751 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1752 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1754 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1755 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1756 valid_len
= sizeof(struct i40e_virtchnl_ether_addr_list
);
1757 if (msglen
>= valid_len
) {
1758 struct i40e_virtchnl_ether_addr_list
*veal
=
1759 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1760 valid_len
+= veal
->num_elements
*
1761 sizeof(struct i40e_virtchnl_ether_addr
);
1762 if (veal
->num_elements
== 0)
1763 err_msg_format
= true;
1766 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1767 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1768 valid_len
= sizeof(struct i40e_virtchnl_vlan_filter_list
);
1769 if (msglen
>= valid_len
) {
1770 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1771 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1772 valid_len
+= vfl
->num_elements
* sizeof(u16
);
1773 if (vfl
->num_elements
== 0)
1774 err_msg_format
= true;
1777 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1778 valid_len
= sizeof(struct i40e_virtchnl_promisc_info
);
1780 case I40E_VIRTCHNL_OP_GET_STATS
:
1781 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1783 /* These are always errors coming from the VF. */
1784 case I40E_VIRTCHNL_OP_EVENT
:
1785 case I40E_VIRTCHNL_OP_UNKNOWN
:
1790 /* few more checks */
1791 if ((valid_len
!= msglen
) || (err_msg_format
)) {
1792 i40e_vc_send_resp_to_vf(vf
, v_opcode
, I40E_ERR_PARAM
);
1800 * i40e_vc_process_vf_msg
1801 * @pf: pointer to the pf structure
1802 * @vf_id: source vf id
1803 * @msg: pointer to the msg buffer
1804 * @msglen: msg length
1805 * @msghndl: msg handle
1807 * called from the common aeq/arq handler to
1808 * process request from vf
1810 int i40e_vc_process_vf_msg(struct i40e_pf
*pf
, u16 vf_id
, u32 v_opcode
,
1811 u32 v_retval
, u8
*msg
, u16 msglen
)
1813 struct i40e_hw
*hw
= &pf
->hw
;
1814 unsigned int local_vf_id
= vf_id
- hw
->func_caps
.vf_base_id
;
1818 pf
->vf_aq_requests
++;
1819 if (local_vf_id
>= pf
->num_alloc_vfs
)
1821 vf
= &(pf
->vf
[local_vf_id
]);
1822 /* perform basic checks on the msg */
1823 ret
= i40e_vc_validate_vf_msg(vf
, v_opcode
, v_retval
, msg
, msglen
);
1826 dev_err(&pf
->pdev
->dev
, "Invalid message from vf %d, opcode %d, len %d\n",
1827 local_vf_id
, v_opcode
, msglen
);
1832 case I40E_VIRTCHNL_OP_VERSION
:
1833 ret
= i40e_vc_get_version_msg(vf
);
1835 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1836 ret
= i40e_vc_get_vf_resources_msg(vf
);
1838 case I40E_VIRTCHNL_OP_RESET_VF
:
1839 i40e_vc_reset_vf_msg(vf
);
1842 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1843 ret
= i40e_vc_config_promiscuous_mode_msg(vf
, msg
, msglen
);
1845 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1846 ret
= i40e_vc_config_queues_msg(vf
, msg
, msglen
);
1848 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1849 ret
= i40e_vc_config_irq_map_msg(vf
, msg
, msglen
);
1851 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1852 ret
= i40e_vc_enable_queues_msg(vf
, msg
, msglen
);
1854 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1855 ret
= i40e_vc_disable_queues_msg(vf
, msg
, msglen
);
1857 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1858 ret
= i40e_vc_add_mac_addr_msg(vf
, msg
, msglen
);
1860 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1861 ret
= i40e_vc_del_mac_addr_msg(vf
, msg
, msglen
);
1863 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1864 ret
= i40e_vc_add_vlan_msg(vf
, msg
, msglen
);
1866 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1867 ret
= i40e_vc_remove_vlan_msg(vf
, msg
, msglen
);
1869 case I40E_VIRTCHNL_OP_GET_STATS
:
1870 ret
= i40e_vc_get_stats_msg(vf
, msg
, msglen
);
1872 case I40E_VIRTCHNL_OP_UNKNOWN
:
1874 dev_err(&pf
->pdev
->dev
, "Unsupported opcode %d from vf %d\n",
1875 v_opcode
, local_vf_id
);
1876 ret
= i40e_vc_send_resp_to_vf(vf
, v_opcode
,
1877 I40E_ERR_NOT_IMPLEMENTED
);
1885 * i40e_vc_process_vflr_event
1886 * @pf: pointer to the pf structure
1888 * called from the vlfr irq handler to
1889 * free up vf resources and state variables
1891 int i40e_vc_process_vflr_event(struct i40e_pf
*pf
)
1893 u32 reg
, reg_idx
, bit_idx
, vf_id
;
1894 struct i40e_hw
*hw
= &pf
->hw
;
1897 if (!test_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
))
1900 clear_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
);
1901 for (vf_id
= 0; vf_id
< pf
->num_alloc_vfs
; vf_id
++) {
1902 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1903 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1904 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1905 vf
= &pf
->vf
[vf_id
];
1906 reg
= rd32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
));
1907 if (reg
& (1 << bit_idx
)) {
1908 /* clear the bit in GLGEN_VFLRSTAT */
1909 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
1911 if (!test_bit(__I40E_DOWN
, &pf
->state
))
1912 i40e_reset_vf(vf
, true);
1916 /* re-enable vflr interrupt cause */
1917 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
1918 reg
|= I40E_PFINT_ICR0_ENA_VFLR_MASK
;
1919 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
1926 * i40e_vc_vf_broadcast
1927 * @pf: pointer to the pf structure
1928 * @opcode: operation code
1929 * @retval: return value
1930 * @msg: pointer to the msg buffer
1931 * @msglen: msg length
1933 * send a message to all VFs on a given PF
1935 static void i40e_vc_vf_broadcast(struct i40e_pf
*pf
,
1936 enum i40e_virtchnl_ops v_opcode
,
1937 i40e_status v_retval
, u8
*msg
,
1940 struct i40e_hw
*hw
= &pf
->hw
;
1941 struct i40e_vf
*vf
= pf
->vf
;
1944 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1945 /* Ignore return value on purpose - a given VF may fail, but
1946 * we need to keep going and send to all of them
1948 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, v_opcode
, v_retval
,
1955 * i40e_vc_notify_link_state
1956 * @pf: pointer to the pf structure
1958 * send a link status message to all VFs on a given PF
1960 void i40e_vc_notify_link_state(struct i40e_pf
*pf
)
1962 struct i40e_virtchnl_pf_event pfe
;
1963 struct i40e_hw
*hw
= &pf
->hw
;
1964 struct i40e_vf
*vf
= pf
->vf
;
1965 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
1968 pfe
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
1969 pfe
.severity
= I40E_PF_EVENT_SEVERITY_INFO
;
1970 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1971 if (vf
->link_forced
) {
1972 pfe
.event_data
.link_event
.link_status
= vf
->link_up
;
1973 pfe
.event_data
.link_event
.link_speed
=
1974 (vf
->link_up
? I40E_LINK_SPEED_40GB
: 0);
1976 pfe
.event_data
.link_event
.link_status
=
1977 ls
->link_info
& I40E_AQ_LINK_UP
;
1978 pfe
.event_data
.link_event
.link_speed
= ls
->link_speed
;
1980 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
1981 0, (u8
*)&pfe
, sizeof(pfe
),
1988 * i40e_vc_notify_reset
1989 * @pf: pointer to the pf structure
1991 * indicate a pending reset to all VFs on a given PF
1993 void i40e_vc_notify_reset(struct i40e_pf
*pf
)
1995 struct i40e_virtchnl_pf_event pfe
;
1997 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
1998 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1999 i40e_vc_vf_broadcast(pf
, I40E_VIRTCHNL_OP_EVENT
, I40E_SUCCESS
,
2000 (u8
*)&pfe
, sizeof(struct i40e_virtchnl_pf_event
));
2004 * i40e_vc_notify_vf_reset
2005 * @vf: pointer to the vf structure
2007 * indicate a pending reset to the given VF
2009 void i40e_vc_notify_vf_reset(struct i40e_vf
*vf
)
2011 struct i40e_virtchnl_pf_event pfe
;
2013 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
2014 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
2015 i40e_aq_send_msg_to_vf(&vf
->pf
->hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
2016 I40E_SUCCESS
, (u8
*)&pfe
,
2017 sizeof(struct i40e_virtchnl_pf_event
), NULL
);
2021 * i40e_ndo_set_vf_mac
2022 * @netdev: network interface device structure
2023 * @vf_id: vf identifier
2026 * program vf mac address
2028 int i40e_ndo_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
2030 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2031 struct i40e_vsi
*vsi
= np
->vsi
;
2032 struct i40e_pf
*pf
= vsi
->back
;
2033 struct i40e_mac_filter
*f
;
2037 /* validate the request */
2038 if (vf_id
>= pf
->num_alloc_vfs
) {
2039 dev_err(&pf
->pdev
->dev
,
2040 "Invalid VF Identifier %d\n", vf_id
);
2045 vf
= &(pf
->vf
[vf_id
]);
2046 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2047 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2048 dev_err(&pf
->pdev
->dev
,
2049 "Uninitialized VF %d\n", vf_id
);
2054 if (!is_valid_ether_addr(mac
)) {
2055 dev_err(&pf
->pdev
->dev
,
2056 "Invalid VF ethernet address\n");
2061 /* delete the temporary mac address */
2062 i40e_del_filter(vsi
, vf
->default_lan_addr
.addr
, vf
->port_vlan_id
,
2065 /* add the new mac address */
2066 f
= i40e_add_filter(vsi
, mac
, vf
->port_vlan_id
, true, false);
2068 dev_err(&pf
->pdev
->dev
,
2069 "Unable to add VF ucast filter\n");
2074 dev_info(&pf
->pdev
->dev
, "Setting MAC %pM on VF %d\n", mac
, vf_id
);
2075 /* program mac filter */
2076 if (i40e_sync_vsi_filters(vsi
)) {
2077 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
2081 memcpy(vf
->default_lan_addr
.addr
, mac
, ETH_ALEN
);
2082 vf
->pf_set_mac
= true;
2083 dev_info(&pf
->pdev
->dev
, "Reload the VF driver to make this change effective.\n");
2091 * i40e_ndo_set_vf_port_vlan
2092 * @netdev: network interface device structure
2093 * @vf_id: vf identifier
2094 * @vlan_id: mac address
2095 * @qos: priority setting
2097 * program vf vlan id and/or qos
2099 int i40e_ndo_set_vf_port_vlan(struct net_device
*netdev
,
2100 int vf_id
, u16 vlan_id
, u8 qos
)
2102 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2103 struct i40e_pf
*pf
= np
->vsi
->back
;
2104 struct i40e_vsi
*vsi
;
2108 /* validate the request */
2109 if (vf_id
>= pf
->num_alloc_vfs
) {
2110 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2115 if ((vlan_id
> I40E_MAX_VLANID
) || (qos
> 7)) {
2116 dev_err(&pf
->pdev
->dev
, "Invalid VF Parameters\n");
2121 vf
= &(pf
->vf
[vf_id
]);
2122 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2123 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2124 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2129 if (vsi
->info
.pvid
== 0 && i40e_is_vsi_in_vlan(vsi
)) {
2130 dev_err(&pf
->pdev
->dev
,
2131 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2133 /* Administrator Error - knock the VF offline until he does
2134 * the right thing by reconfiguring his network correctly
2135 * and then reloading the VF driver.
2137 i40e_vc_disable_vf(pf
, vf
);
2140 /* Check for condition where there was already a port VLAN ID
2141 * filter set and now it is being deleted by setting it to zero.
2142 * Additionally check for the condition where there was a port
2143 * VLAN but now there is a new and different port VLAN being set.
2144 * Before deleting all the old VLAN filters we must add new ones
2145 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2146 * MAC addresses deleted.
2148 if ((!(vlan_id
|| qos
) ||
2149 (vlan_id
| qos
) != le16_to_cpu(vsi
->info
.pvid
)) &&
2151 ret
= i40e_vsi_add_vlan(vsi
, I40E_VLAN_ANY
);
2153 if (vsi
->info
.pvid
) {
2155 ret
= i40e_vsi_kill_vlan(vsi
, (le16_to_cpu(vsi
->info
.pvid
) &
2158 dev_info(&vsi
->back
->pdev
->dev
,
2159 "remove VLAN failed, ret=%d, aq_err=%d\n",
2160 ret
, pf
->hw
.aq
.asq_last_status
);
2164 ret
= i40e_vsi_add_pvid(vsi
,
2165 vlan_id
| (qos
<< I40E_VLAN_PRIORITY_SHIFT
));
2167 i40e_vsi_remove_pvid(vsi
);
2170 dev_info(&pf
->pdev
->dev
, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2171 vlan_id
, qos
, vf_id
);
2173 /* add new VLAN filter */
2174 ret
= i40e_vsi_add_vlan(vsi
, vlan_id
);
2176 dev_info(&vsi
->back
->pdev
->dev
,
2177 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
2178 vsi
->back
->hw
.aq
.asq_last_status
);
2181 /* Kill non-vlan MAC filters - ignore error return since
2182 * there might not be any non-vlan MAC filters.
2184 i40e_vsi_kill_vlan(vsi
, I40E_VLAN_ANY
);
2188 dev_err(&pf
->pdev
->dev
, "Unable to update VF vsi context\n");
2191 /* The Port VLAN needs to be saved across resets the same as the
2192 * default LAN MAC address.
2194 vf
->port_vlan_id
= le16_to_cpu(vsi
->info
.pvid
);
2202 * i40e_ndo_set_vf_bw
2203 * @netdev: network interface device structure
2204 * @vf_id: vf identifier
2207 * configure vf tx rate
2209 int i40e_ndo_set_vf_bw(struct net_device
*netdev
, int vf_id
, int min_tx_rate
,
2212 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2213 struct i40e_pf
*pf
= np
->vsi
->back
;
2214 struct i40e_vsi
*vsi
;
2219 /* validate the request */
2220 if (vf_id
>= pf
->num_alloc_vfs
) {
2221 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d.\n", vf_id
);
2227 dev_err(&pf
->pdev
->dev
, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2228 min_tx_rate
, vf_id
);
2232 vf
= &(pf
->vf
[vf_id
]);
2233 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2234 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2235 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d.\n", vf_id
);
2240 switch (pf
->hw
.phy
.link_info
.link_speed
) {
2241 case I40E_LINK_SPEED_40GB
:
2244 case I40E_LINK_SPEED_10GB
:
2247 case I40E_LINK_SPEED_1GB
:
2254 if (max_tx_rate
> speed
) {
2255 dev_err(&pf
->pdev
->dev
, "Invalid max tx rate %d specified for vf %d.",
2256 max_tx_rate
, vf
->vf_id
);
2261 if ((max_tx_rate
< 50) && (max_tx_rate
> 0)) {
2262 dev_warn(&pf
->pdev
->dev
, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2266 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2267 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, vsi
->seid
, max_tx_rate
/ 50,
2270 dev_err(&pf
->pdev
->dev
, "Unable to set max tx rate, error code %d.\n",
2275 vf
->tx_rate
= max_tx_rate
;
2281 * i40e_ndo_get_vf_config
2282 * @netdev: network interface device structure
2283 * @vf_id: vf identifier
2284 * @ivi: vf configuration structure
2286 * return vf configuration
2288 int i40e_ndo_get_vf_config(struct net_device
*netdev
,
2289 int vf_id
, struct ifla_vf_info
*ivi
)
2291 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2292 struct i40e_vsi
*vsi
= np
->vsi
;
2293 struct i40e_pf
*pf
= vsi
->back
;
2297 /* validate the request */
2298 if (vf_id
>= pf
->num_alloc_vfs
) {
2299 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2304 vf
= &(pf
->vf
[vf_id
]);
2305 /* first vsi is always the LAN vsi */
2306 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2307 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2308 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2315 memcpy(&ivi
->mac
, vf
->default_lan_addr
.addr
, ETH_ALEN
);
2317 ivi
->max_tx_rate
= vf
->tx_rate
;
2318 ivi
->min_tx_rate
= 0;
2319 ivi
->vlan
= le16_to_cpu(vsi
->info
.pvid
) & I40E_VLAN_MASK
;
2320 ivi
->qos
= (le16_to_cpu(vsi
->info
.pvid
) & I40E_PRIORITY_MASK
) >>
2321 I40E_VLAN_PRIORITY_SHIFT
;
2322 if (vf
->link_forced
== false)
2323 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
2324 else if (vf
->link_up
== true)
2325 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
2327 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
2336 * i40e_ndo_set_vf_link_state
2337 * @netdev: network interface device structure
2338 * @vf_id: vf identifier
2339 * @link: required link state
2341 * Set the link state of a specified VF, regardless of physical link state
2343 int i40e_ndo_set_vf_link_state(struct net_device
*netdev
, int vf_id
, int link
)
2345 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2346 struct i40e_pf
*pf
= np
->vsi
->back
;
2347 struct i40e_virtchnl_pf_event pfe
;
2348 struct i40e_hw
*hw
= &pf
->hw
;
2352 /* validate the request */
2353 if (vf_id
>= pf
->num_alloc_vfs
) {
2354 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2359 vf
= &pf
->vf
[vf_id
];
2361 pfe
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
2362 pfe
.severity
= I40E_PF_EVENT_SEVERITY_INFO
;
2365 case IFLA_VF_LINK_STATE_AUTO
:
2366 vf
->link_forced
= false;
2367 pfe
.event_data
.link_event
.link_status
=
2368 pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
;
2369 pfe
.event_data
.link_event
.link_speed
=
2370 pf
->hw
.phy
.link_info
.link_speed
;
2372 case IFLA_VF_LINK_STATE_ENABLE
:
2373 vf
->link_forced
= true;
2375 pfe
.event_data
.link_event
.link_status
= true;
2376 pfe
.event_data
.link_event
.link_speed
= I40E_LINK_SPEED_40GB
;
2378 case IFLA_VF_LINK_STATE_DISABLE
:
2379 vf
->link_forced
= true;
2380 vf
->link_up
= false;
2381 pfe
.event_data
.link_event
.link_status
= false;
2382 pfe
.event_data
.link_event
.link_speed
= 0;
2388 /* Notify the VF of its new link state */
2389 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
2390 0, (u8
*)&pfe
, sizeof(pfe
), NULL
);