1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
29 /***********************misc routines*****************************/
33 * @pf: pointer to the pf info
34 * @vf: pointer to the vf info
36 * Disable the VF through a SW reset
38 static inline void i40e_vc_disable_vf(struct i40e_pf
*pf
, struct i40e_vf
*vf
)
40 struct i40e_hw
*hw
= &pf
->hw
;
43 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
44 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
45 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
50 * i40e_vc_isvalid_vsi_id
51 * @vf: pointer to the vf info
52 * @vsi_id: vf relative vsi id
54 * check for the valid vsi id
56 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf
*vf
, u8 vsi_id
)
58 struct i40e_pf
*pf
= vf
->pf
;
60 return pf
->vsi
[vsi_id
]->vf_id
== vf
->vf_id
;
64 * i40e_vc_isvalid_queue_id
65 * @vf: pointer to the vf info
67 * @qid: vsi relative queue id
69 * check for the valid queue id
71 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf
*vf
, u8 vsi_id
,
74 struct i40e_pf
*pf
= vf
->pf
;
76 return qid
< pf
->vsi
[vsi_id
]->num_queue_pairs
;
80 * i40e_vc_isvalid_vector_id
81 * @vf: pointer to the vf info
82 * @vector_id: vf relative vector id
84 * check for the valid vector id
86 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf
*vf
, u8 vector_id
)
88 struct i40e_pf
*pf
= vf
->pf
;
90 return vector_id
< pf
->hw
.func_caps
.num_msix_vectors_vf
;
93 /***********************vf resource mgmt routines*****************/
96 * i40e_vc_get_pf_queue_id
97 * @vf: pointer to the vf info
98 * @vsi_idx: index of VSI in PF struct
99 * @vsi_queue_id: vsi relative queue id
101 * return pf relative queue id
103 static u16
i40e_vc_get_pf_queue_id(struct i40e_vf
*vf
, u8 vsi_idx
,
106 struct i40e_pf
*pf
= vf
->pf
;
107 struct i40e_vsi
*vsi
= pf
->vsi
[vsi_idx
];
108 u16 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
110 if (le16_to_cpu(vsi
->info
.mapping_flags
) &
111 I40E_AQ_VSI_QUE_MAP_NONCONTIG
)
113 le16_to_cpu(vsi
->info
.queue_mapping
[vsi_queue_id
]);
115 pf_queue_id
= le16_to_cpu(vsi
->info
.queue_mapping
[0]) +
122 * i40e_config_irq_link_list
123 * @vf: pointer to the vf info
124 * @vsi_idx: index of VSI in PF struct
125 * @vecmap: irq map info
127 * configure irq link list from the map
129 static void i40e_config_irq_link_list(struct i40e_vf
*vf
, u16 vsi_idx
,
130 struct i40e_virtchnl_vector_map
*vecmap
)
132 unsigned long linklistmap
= 0, tempmap
;
133 struct i40e_pf
*pf
= vf
->pf
;
134 struct i40e_hw
*hw
= &pf
->hw
;
135 u16 vsi_queue_id
, pf_queue_id
;
136 enum i40e_queue_type qtype
;
137 u16 next_q
, vector_id
;
141 vector_id
= vecmap
->vector_id
;
144 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
146 reg_idx
= I40E_VPINT_LNKLSTN(
147 ((pf
->hw
.func_caps
.num_msix_vectors_vf
- 1) * vf
->vf_id
) +
150 if (vecmap
->rxq_map
== 0 && vecmap
->txq_map
== 0) {
151 /* Special case - No queues mapped on this vector */
152 wr32(hw
, reg_idx
, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK
);
155 tempmap
= vecmap
->rxq_map
;
156 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
158 (I40E_VIRTCHNL_SUPPORTED_QTYPES
*
162 tempmap
= vecmap
->txq_map
;
163 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
165 (I40E_VIRTCHNL_SUPPORTED_QTYPES
* vsi_queue_id
169 next_q
= find_first_bit(&linklistmap
,
171 I40E_VIRTCHNL_SUPPORTED_QTYPES
));
172 vsi_queue_id
= next_q
/I40E_VIRTCHNL_SUPPORTED_QTYPES
;
173 qtype
= next_q
%I40E_VIRTCHNL_SUPPORTED_QTYPES
;
174 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
175 reg
= ((qtype
<< I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
) | pf_queue_id
);
177 wr32(hw
, reg_idx
, reg
);
179 while (next_q
< (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
181 case I40E_QUEUE_TYPE_RX
:
182 reg_idx
= I40E_QINT_RQCTL(pf_queue_id
);
183 itr_idx
= vecmap
->rxitr_idx
;
185 case I40E_QUEUE_TYPE_TX
:
186 reg_idx
= I40E_QINT_TQCTL(pf_queue_id
);
187 itr_idx
= vecmap
->txitr_idx
;
193 next_q
= find_next_bit(&linklistmap
,
195 I40E_VIRTCHNL_SUPPORTED_QTYPES
),
198 (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
199 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
200 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
201 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
,
204 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
208 /* format for the RQCTL & TQCTL regs is same */
210 (qtype
<< I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
) |
211 (pf_queue_id
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
212 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT
) |
213 (itr_idx
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
);
214 wr32(hw
, reg_idx
, reg
);
222 * i40e_config_vsi_tx_queue
223 * @vf: pointer to the vf info
224 * @vsi_idx: index of VSI in PF struct
225 * @vsi_queue_id: vsi relative queue index
226 * @info: config. info
230 static int i40e_config_vsi_tx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
232 struct i40e_virtchnl_txq_info
*info
)
234 struct i40e_pf
*pf
= vf
->pf
;
235 struct i40e_hw
*hw
= &pf
->hw
;
236 struct i40e_hmc_obj_txq tx_ctx
;
241 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
243 /* clear the context structure first */
244 memset(&tx_ctx
, 0, sizeof(struct i40e_hmc_obj_txq
));
246 /* only set the required fields */
247 tx_ctx
.base
= info
->dma_ring_addr
/ 128;
248 tx_ctx
.qlen
= info
->ring_len
;
249 tx_ctx
.rdylist
= le16_to_cpu(pf
->vsi
[vsi_idx
]->info
.qs_handle
[0]);
250 tx_ctx
.rdylist_act
= 0;
251 tx_ctx
.head_wb_ena
= 1;
252 tx_ctx
.head_wb_addr
= info
->dma_ring_addr
+
253 (info
->ring_len
* sizeof(struct i40e_tx_desc
));
255 /* clear the context in the HMC */
256 ret
= i40e_clear_lan_tx_queue_context(hw
, pf_queue_id
);
258 dev_err(&pf
->pdev
->dev
,
259 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
265 /* set the context in the HMC */
266 ret
= i40e_set_lan_tx_queue_context(hw
, pf_queue_id
, &tx_ctx
);
268 dev_err(&pf
->pdev
->dev
,
269 "Failed to set VF LAN Tx queue context %d error: %d\n",
275 /* associate this queue with the PCI VF function */
276 qtx_ctl
= I40E_QTX_CTL_VF_QUEUE
;
277 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
)
278 & I40E_QTX_CTL_PF_INDX_MASK
);
279 qtx_ctl
|= (((vf
->vf_id
+ hw
->func_caps
.vf_base_id
)
280 << I40E_QTX_CTL_VFVM_INDX_SHIFT
)
281 & I40E_QTX_CTL_VFVM_INDX_MASK
);
282 wr32(hw
, I40E_QTX_CTL(pf_queue_id
), qtx_ctl
);
290 * i40e_config_vsi_rx_queue
291 * @vf: pointer to the vf info
292 * @vsi_idx: index of VSI in PF struct
293 * @vsi_queue_id: vsi relative queue index
294 * @info: config. info
298 static int i40e_config_vsi_rx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
300 struct i40e_virtchnl_rxq_info
*info
)
302 struct i40e_pf
*pf
= vf
->pf
;
303 struct i40e_hw
*hw
= &pf
->hw
;
304 struct i40e_hmc_obj_rxq rx_ctx
;
308 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
310 /* clear the context structure first */
311 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
313 /* only set the required fields */
314 rx_ctx
.base
= info
->dma_ring_addr
/ 128;
315 rx_ctx
.qlen
= info
->ring_len
;
317 if (info
->splithdr_enabled
) {
318 rx_ctx
.hsplit_0
= I40E_RX_SPLIT_L2
|
320 I40E_RX_SPLIT_TCP_UDP
|
322 /* header length validation */
323 if (info
->hdr_size
> ((2 * 1024) - 64)) {
327 rx_ctx
.hbuff
= info
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
329 /* set splitalways mode 10b */
333 /* databuffer length validation */
334 if (info
->databuffer_size
> ((16 * 1024) - 128)) {
338 rx_ctx
.dbuff
= info
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
340 /* max pkt. length validation */
341 if (info
->max_pkt_size
>= (16 * 1024) || info
->max_pkt_size
< 64) {
345 rx_ctx
.rxmax
= info
->max_pkt_size
;
347 /* enable 32bytes desc always */
351 rx_ctx
.tphrdesc_ena
= 1;
352 rx_ctx
.tphwdesc_ena
= 1;
353 rx_ctx
.tphdata_ena
= 1;
354 rx_ctx
.tphhead_ena
= 1;
355 rx_ctx
.lrxqthresh
= 2;
358 /* clear the context in the HMC */
359 ret
= i40e_clear_lan_rx_queue_context(hw
, pf_queue_id
);
361 dev_err(&pf
->pdev
->dev
,
362 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
368 /* set the context in the HMC */
369 ret
= i40e_set_lan_rx_queue_context(hw
, pf_queue_id
, &rx_ctx
);
371 dev_err(&pf
->pdev
->dev
,
372 "Failed to set VF LAN Rx queue context %d error: %d\n",
384 * @vf: pointer to the vf info
385 * @type: type of VSI to allocate
387 * alloc vf vsi context & resources
389 static int i40e_alloc_vsi_res(struct i40e_vf
*vf
, enum i40e_vsi_type type
)
391 struct i40e_mac_filter
*f
= NULL
;
392 struct i40e_pf
*pf
= vf
->pf
;
393 struct i40e_vsi
*vsi
;
396 vsi
= i40e_vsi_setup(pf
, type
, pf
->vsi
[pf
->lan_vsi
]->seid
, vf
->vf_id
);
399 dev_err(&pf
->pdev
->dev
,
400 "add vsi failed for vf %d, aq_err %d\n",
401 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
403 goto error_alloc_vsi_res
;
405 if (type
== I40E_VSI_SRIOV
) {
406 u8 brdcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
407 vf
->lan_vsi_index
= vsi
->idx
;
408 vf
->lan_vsi_id
= vsi
->id
;
409 dev_info(&pf
->pdev
->dev
,
410 "VF %d assigned LAN VSI index %d, VSI id %d\n",
411 vf
->vf_id
, vsi
->idx
, vsi
->id
);
412 /* If the port VLAN has been configured and then the
413 * VF driver was removed then the VSI port VLAN
414 * configuration was destroyed. Check if there is
415 * a port VLAN and restore the VSI configuration if
418 if (vf
->port_vlan_id
)
419 i40e_vsi_add_pvid(vsi
, vf
->port_vlan_id
);
420 f
= i40e_add_filter(vsi
, vf
->default_lan_addr
.addr
,
421 vf
->port_vlan_id
, true, false);
423 dev_info(&pf
->pdev
->dev
,
424 "Could not allocate VF MAC addr\n");
425 f
= i40e_add_filter(vsi
, brdcast
, vf
->port_vlan_id
,
428 dev_info(&pf
->pdev
->dev
,
429 "Could not allocate VF broadcast filter\n");
432 /* program mac filter */
433 ret
= i40e_sync_vsi_filters(vsi
);
435 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
437 /* Set VF bandwidth if specified */
439 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, vsi
->seid
,
440 vf
->tx_rate
/ 50, 0, NULL
);
442 dev_err(&pf
->pdev
->dev
, "Unable to set tx rate, VF %d, error code %d.\n",
451 * i40e_enable_vf_mappings
452 * @vf: pointer to the vf info
456 static void i40e_enable_vf_mappings(struct i40e_vf
*vf
)
458 struct i40e_pf
*pf
= vf
->pf
;
459 struct i40e_hw
*hw
= &pf
->hw
;
460 u32 reg
, total_queue_pairs
= 0;
463 /* Tell the hardware we're using noncontiguous mapping. HW requires
464 * that VF queues be mapped using this method, even when they are
465 * contiguous in real life
467 wr32(hw
, I40E_VSILAN_QBASE(vf
->lan_vsi_id
),
468 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
470 /* enable VF vplan_qtable mappings */
471 reg
= I40E_VPLAN_MAPENA_TXRX_ENA_MASK
;
472 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), reg
);
474 /* map PF queues to VF queues */
475 for (j
= 0; j
< pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
; j
++) {
476 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
, j
);
477 reg
= (qid
& I40E_VPLAN_QTABLE_QINDEX_MASK
);
478 wr32(hw
, I40E_VPLAN_QTABLE(total_queue_pairs
, vf
->vf_id
), reg
);
482 /* map PF queues to VSI */
483 for (j
= 0; j
< 7; j
++) {
484 if (j
* 2 >= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
) {
485 reg
= 0x07FF07FF; /* unused */
487 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
490 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
494 wr32(hw
, I40E_VSILAN_QTABLE(j
, vf
->lan_vsi_id
), reg
);
501 * i40e_disable_vf_mappings
502 * @vf: pointer to the vf info
504 * disable vf mappings
506 static void i40e_disable_vf_mappings(struct i40e_vf
*vf
)
508 struct i40e_pf
*pf
= vf
->pf
;
509 struct i40e_hw
*hw
= &pf
->hw
;
512 /* disable qp mappings */
513 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), 0);
514 for (i
= 0; i
< I40E_MAX_VSI_QP
; i
++)
515 wr32(hw
, I40E_VPLAN_QTABLE(i
, vf
->vf_id
),
516 I40E_QUEUE_END_OF_LIST
);
522 * @vf: pointer to the vf info
526 static void i40e_free_vf_res(struct i40e_vf
*vf
)
528 struct i40e_pf
*pf
= vf
->pf
;
529 struct i40e_hw
*hw
= &pf
->hw
;
533 /* free vsi & disconnect it from the parent uplink */
534 if (vf
->lan_vsi_index
) {
535 i40e_vsi_release(pf
->vsi
[vf
->lan_vsi_index
]);
536 vf
->lan_vsi_index
= 0;
539 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
541 /* disable interrupts so the VF starts in a known state */
542 for (i
= 0; i
< msix_vf
; i
++) {
543 /* format is same for both registers */
545 reg_idx
= I40E_VFINT_DYN_CTL0(vf
->vf_id
);
547 reg_idx
= I40E_VFINT_DYN_CTLN(((msix_vf
- 1) *
550 wr32(hw
, reg_idx
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
554 /* clear the irq settings */
555 for (i
= 0; i
< msix_vf
; i
++) {
556 /* format is same for both registers */
558 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
560 reg_idx
= I40E_VPINT_LNKLSTN(((msix_vf
- 1) *
563 reg
= (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
|
564 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
);
565 wr32(hw
, reg_idx
, reg
);
568 /* reset some of the state varibles keeping
569 * track of the resources
571 vf
->num_queue_pairs
= 0;
577 * @vf: pointer to the vf info
579 * allocate vf resources
581 static int i40e_alloc_vf_res(struct i40e_vf
*vf
)
583 struct i40e_pf
*pf
= vf
->pf
;
584 int total_queue_pairs
= 0;
587 /* allocate hw vsi context & associated resources */
588 ret
= i40e_alloc_vsi_res(vf
, I40E_VSI_SRIOV
);
591 total_queue_pairs
+= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
592 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
594 /* store the total qps number for the runtime
597 vf
->num_queue_pairs
= total_queue_pairs
;
599 /* vf is now completely initialized */
600 set_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
);
604 i40e_free_vf_res(vf
);
609 #define VF_DEVICE_STATUS 0xAA
610 #define VF_TRANS_PENDING_MASK 0x20
612 * i40e_quiesce_vf_pci
613 * @vf: pointer to the vf structure
615 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
616 * if the transactions never clear.
618 static int i40e_quiesce_vf_pci(struct i40e_vf
*vf
)
620 struct i40e_pf
*pf
= vf
->pf
;
621 struct i40e_hw
*hw
= &pf
->hw
;
625 vf_abs_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
627 wr32(hw
, I40E_PF_PCI_CIAA
,
628 VF_DEVICE_STATUS
| (vf_abs_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
629 for (i
= 0; i
< 100; i
++) {
630 reg
= rd32(hw
, I40E_PF_PCI_CIAD
);
631 if ((reg
& VF_TRANS_PENDING_MASK
) == 0)
640 * @vf: pointer to the vf structure
641 * @flr: VFLR was issued or not
645 void i40e_reset_vf(struct i40e_vf
*vf
, bool flr
)
647 struct i40e_pf
*pf
= vf
->pf
;
648 struct i40e_hw
*hw
= &pf
->hw
;
654 clear_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
656 /* In the case of a VFLR, the HW has already reset the VF and we
657 * just need to clean up, so don't hit the VFRTRIG register.
660 /* reset vf using VPGEN_VFRTRIG reg */
661 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
662 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
663 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
667 if (i40e_quiesce_vf_pci(vf
))
668 dev_err(&pf
->pdev
->dev
, "VF %d PCI transactions stuck\n",
671 /* poll VPGEN_VFRSTAT reg to make sure
672 * that reset is complete
674 for (i
= 0; i
< 100; i
++) {
675 /* vf reset requires driver to first reset the
676 * vf & than poll the status register to make sure
677 * that the requested op was completed
681 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
682 if (reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
) {
689 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
691 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_COMPLETED
);
692 /* clear the reset bit in the VPGEN_VFRTRIG reg */
693 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
694 reg
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
695 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
697 /* On initial reset, we won't have any queues */
698 if (vf
->lan_vsi_index
== 0)
701 i40e_vsi_control_rings(pf
->vsi
[vf
->lan_vsi_index
], false);
703 /* reallocate vf resources to reset the VSI state */
704 i40e_free_vf_res(vf
);
705 i40e_alloc_vf_res(vf
);
706 i40e_enable_vf_mappings(vf
);
707 set_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
709 /* tell the VF the reset is done */
710 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_VFACTIVE
);
715 * i40e_vfs_are_assigned
716 * @pf: pointer to the pf structure
718 * Determine if any VFs are assigned to VMs
720 static bool i40e_vfs_are_assigned(struct i40e_pf
*pf
)
722 struct pci_dev
*pdev
= pf
->pdev
;
723 struct pci_dev
*vfdev
;
725 /* loop through all the VFs to see if we own any that are assigned */
726 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, I40E_DEV_ID_VF
, NULL
);
728 /* if we don't own it we don't care */
729 if (vfdev
->is_virtfn
&& pci_physfn(vfdev
) == pdev
) {
730 /* if it is assigned we cannot release it */
731 if (vfdev
->dev_flags
& PCI_DEV_FLAGS_ASSIGNED
)
735 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
,
742 #ifdef CONFIG_PCI_IOV
745 * i40e_enable_pf_switch_lb
746 * @pf: pointer to the pf structure
748 * enable switch loop back or die - no point in a return value
750 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
752 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
753 struct i40e_vsi_context ctxt
;
756 ctxt
.seid
= pf
->main_vsi_seid
;
757 ctxt
.pf_num
= pf
->hw
.pf_id
;
759 aq_ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
761 dev_info(&pf
->pdev
->dev
,
762 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
763 __func__
, aq_ret
, pf
->hw
.aq
.asq_last_status
);
766 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
767 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
768 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
770 aq_ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
772 dev_info(&pf
->pdev
->dev
,
773 "%s: update vsi switch failed, aq_err=%d\n",
774 __func__
, vsi
->back
->hw
.aq
.asq_last_status
);
780 * i40e_disable_pf_switch_lb
781 * @pf: pointer to the pf structure
783 * disable switch loop back or die - no point in a return value
785 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
787 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
788 struct i40e_vsi_context ctxt
;
791 ctxt
.seid
= pf
->main_vsi_seid
;
792 ctxt
.pf_num
= pf
->hw
.pf_id
;
794 aq_ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
796 dev_info(&pf
->pdev
->dev
,
797 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
798 __func__
, aq_ret
, pf
->hw
.aq
.asq_last_status
);
801 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
802 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
803 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
805 aq_ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
807 dev_info(&pf
->pdev
->dev
,
808 "%s: update vsi switch failed, aq_err=%d\n",
809 __func__
, vsi
->back
->hw
.aq
.asq_last_status
);
815 * @pf: pointer to the pf structure
819 void i40e_free_vfs(struct i40e_pf
*pf
)
821 struct i40e_hw
*hw
= &pf
->hw
;
822 u32 reg_idx
, bit_idx
;
828 /* Disable interrupt 0 so we don't try to handle the VFLR. */
829 i40e_irq_dynamic_disable_icr0(pf
);
831 mdelay(10); /* let any messages in transit get finished up */
832 /* free up vf resources */
833 tmp
= pf
->num_alloc_vfs
;
834 pf
->num_alloc_vfs
= 0;
835 for (i
= 0; i
< tmp
; i
++) {
836 if (test_bit(I40E_VF_STAT_INIT
, &pf
->vf
[i
].vf_states
))
837 i40e_free_vf_res(&pf
->vf
[i
]);
838 /* disable qp mappings */
839 i40e_disable_vf_mappings(&pf
->vf
[i
]);
845 /* This check is for when the driver is unloaded while VFs are
846 * assigned. Setting the number of VFs to 0 through sysfs is caught
847 * before this function ever gets called.
849 if (!i40e_vfs_are_assigned(pf
)) {
850 pci_disable_sriov(pf
->pdev
);
851 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
852 * work correctly when SR-IOV gets re-enabled.
854 for (vf_id
= 0; vf_id
< tmp
; vf_id
++) {
855 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
856 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
857 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
859 i40e_disable_pf_switch_lb(pf
);
861 dev_warn(&pf
->pdev
->dev
,
862 "unable to disable SR-IOV because VFs are assigned.\n");
865 /* Re-enable interrupt 0. */
866 i40e_irq_dynamic_enable_icr0(pf
);
869 #ifdef CONFIG_PCI_IOV
872 * @pf: pointer to the pf structure
873 * @num_alloc_vfs: number of vfs to allocate
875 * allocate vf resources
877 int i40e_alloc_vfs(struct i40e_pf
*pf
, u16 num_alloc_vfs
)
882 /* Disable interrupt 0 so we don't try to handle the VFLR. */
883 i40e_irq_dynamic_disable_icr0(pf
);
885 /* Check to see if we're just allocating resources for extant VFs */
886 if (pci_num_vf(pf
->pdev
) != num_alloc_vfs
) {
887 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
889 dev_err(&pf
->pdev
->dev
,
890 "Failed to enable SR-IOV, error %d.\n", ret
);
891 pf
->num_alloc_vfs
= 0;
895 /* allocate memory */
896 vfs
= kcalloc(num_alloc_vfs
, sizeof(struct i40e_vf
), GFP_KERNEL
);
902 /* apply default profile */
903 for (i
= 0; i
< num_alloc_vfs
; i
++) {
905 vfs
[i
].parent_type
= I40E_SWITCH_ELEMENT_TYPE_VEB
;
908 /* assign default capabilities */
909 set_bit(I40E_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
910 /* vf resources get allocated during reset */
911 i40e_reset_vf(&vfs
[i
], false);
913 /* enable vf vplan_qtable mappings */
914 i40e_enable_vf_mappings(&vfs
[i
]);
917 pf
->num_alloc_vfs
= num_alloc_vfs
;
919 i40e_enable_pf_switch_lb(pf
);
924 /* Re-enable interrupt 0. */
925 i40e_irq_dynamic_enable_icr0(pf
);
931 * i40e_pci_sriov_enable
932 * @pdev: pointer to a pci_dev structure
933 * @num_vfs: number of vfs to allocate
935 * Enable or change the number of VFs
937 static int i40e_pci_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
939 #ifdef CONFIG_PCI_IOV
940 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
941 int pre_existing_vfs
= pci_num_vf(pdev
);
944 dev_info(&pdev
->dev
, "Allocating %d VFs.\n", num_vfs
);
945 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
947 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
950 if (num_vfs
> pf
->num_req_vfs
) {
955 err
= i40e_alloc_vfs(pf
, num_vfs
);
957 dev_warn(&pdev
->dev
, "Failed to enable SR-IOV: %d\n", err
);
971 * i40e_pci_sriov_configure
972 * @pdev: pointer to a pci_dev structure
973 * @num_vfs: number of vfs to allocate
975 * Enable or change the number of VFs. Called when the user updates the number
978 int i40e_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
980 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
983 return i40e_pci_sriov_enable(pdev
, num_vfs
);
985 if (!i40e_vfs_are_assigned(pf
)) {
988 dev_warn(&pdev
->dev
, "Unable to free VFs because some are assigned to VMs.\n");
994 /***********************virtual channel routines******************/
997 * i40e_vc_send_msg_to_vf
998 * @vf: pointer to the vf info
999 * @v_opcode: virtual channel opcode
1000 * @v_retval: virtual channel return value
1001 * @msg: pointer to the msg buffer
1002 * @msglen: msg length
1006 static int i40e_vc_send_msg_to_vf(struct i40e_vf
*vf
, u32 v_opcode
,
1007 u32 v_retval
, u8
*msg
, u16 msglen
)
1009 struct i40e_pf
*pf
= vf
->pf
;
1010 struct i40e_hw
*hw
= &pf
->hw
;
1011 int true_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1014 /* single place to detect unsuccessful return values */
1016 vf
->num_invalid_msgs
++;
1017 dev_err(&pf
->pdev
->dev
, "Failed opcode %d Error: %d\n",
1018 v_opcode
, v_retval
);
1019 if (vf
->num_invalid_msgs
>
1020 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED
) {
1021 dev_err(&pf
->pdev
->dev
,
1022 "Number of invalid messages exceeded for VF %d\n",
1024 dev_err(&pf
->pdev
->dev
, "Use PF Control I/F to enable the VF\n");
1025 set_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
);
1028 vf
->num_valid_msgs
++;
1031 aq_ret
= i40e_aq_send_msg_to_vf(hw
, true_vf_id
, v_opcode
, v_retval
,
1034 dev_err(&pf
->pdev
->dev
,
1035 "Unable to send the message to VF %d aq_err %d\n",
1036 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
1044 * i40e_vc_send_resp_to_vf
1045 * @vf: pointer to the vf info
1046 * @opcode: operation code
1047 * @retval: return value
1049 * send resp msg to vf
1051 static int i40e_vc_send_resp_to_vf(struct i40e_vf
*vf
,
1052 enum i40e_virtchnl_ops opcode
,
1055 return i40e_vc_send_msg_to_vf(vf
, opcode
, retval
, NULL
, 0);
1059 * i40e_vc_get_version_msg
1060 * @vf: pointer to the vf info
1062 * called from the vf to request the API version used by the PF
1064 static int i40e_vc_get_version_msg(struct i40e_vf
*vf
)
1066 struct i40e_virtchnl_version_info info
= {
1067 I40E_VIRTCHNL_VERSION_MAJOR
, I40E_VIRTCHNL_VERSION_MINOR
1070 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_VERSION
,
1071 I40E_SUCCESS
, (u8
*)&info
,
1073 i40e_virtchnl_version_info
));
1077 * i40e_vc_get_vf_resources_msg
1078 * @vf: pointer to the vf info
1079 * @msg: pointer to the msg buffer
1080 * @msglen: msg length
1082 * called from the vf to request its resources
1084 static int i40e_vc_get_vf_resources_msg(struct i40e_vf
*vf
)
1086 struct i40e_virtchnl_vf_resource
*vfres
= NULL
;
1087 struct i40e_pf
*pf
= vf
->pf
;
1088 i40e_status aq_ret
= 0;
1089 struct i40e_vsi
*vsi
;
1094 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
1095 aq_ret
= I40E_ERR_PARAM
;
1099 len
= (sizeof(struct i40e_virtchnl_vf_resource
) +
1100 sizeof(struct i40e_virtchnl_vsi_resource
) * num_vsis
);
1102 vfres
= kzalloc(len
, GFP_KERNEL
);
1104 aq_ret
= I40E_ERR_NO_MEMORY
;
1109 vfres
->vf_offload_flags
= I40E_VIRTCHNL_VF_OFFLOAD_L2
;
1110 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
1111 if (!vsi
->info
.pvid
)
1112 vfres
->vf_offload_flags
|= I40E_VIRTCHNL_VF_OFFLOAD_VLAN
;
1114 vfres
->num_vsis
= num_vsis
;
1115 vfres
->num_queue_pairs
= vf
->num_queue_pairs
;
1116 vfres
->max_vectors
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
1117 if (vf
->lan_vsi_index
) {
1118 vfres
->vsi_res
[i
].vsi_id
= vf
->lan_vsi_index
;
1119 vfres
->vsi_res
[i
].vsi_type
= I40E_VSI_SRIOV
;
1120 vfres
->vsi_res
[i
].num_queue_pairs
=
1121 pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
1122 memcpy(vfres
->vsi_res
[i
].default_mac_addr
,
1123 vf
->default_lan_addr
.addr
, ETH_ALEN
);
1126 set_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
1129 /* send the response back to the vf */
1130 ret
= i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_VF_RESOURCES
,
1131 aq_ret
, (u8
*)vfres
, len
);
1138 * i40e_vc_reset_vf_msg
1139 * @vf: pointer to the vf info
1140 * @msg: pointer to the msg buffer
1141 * @msglen: msg length
1143 * called from the vf to reset itself,
1144 * unlike other virtchnl messages, pf driver
1145 * doesn't send the response back to the vf
1147 static void i40e_vc_reset_vf_msg(struct i40e_vf
*vf
)
1149 if (test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
))
1150 i40e_reset_vf(vf
, false);
1154 * i40e_vc_config_promiscuous_mode_msg
1155 * @vf: pointer to the vf info
1156 * @msg: pointer to the msg buffer
1157 * @msglen: msg length
1159 * called from the vf to configure the promiscuous mode of
1162 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
*vf
,
1163 u8
*msg
, u16 msglen
)
1165 struct i40e_virtchnl_promisc_info
*info
=
1166 (struct i40e_virtchnl_promisc_info
*)msg
;
1167 struct i40e_pf
*pf
= vf
->pf
;
1168 struct i40e_hw
*hw
= &pf
->hw
;
1169 bool allmulti
= false;
1170 bool promisc
= false;
1173 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1174 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1175 !i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
) ||
1176 (pf
->vsi
[info
->vsi_id
]->type
!= I40E_VSI_FCOE
)) {
1177 aq_ret
= I40E_ERR_PARAM
;
1181 if (info
->flags
& I40E_FLAG_VF_UNICAST_PROMISC
)
1183 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, info
->vsi_id
,
1188 if (info
->flags
& I40E_FLAG_VF_MULTICAST_PROMISC
)
1190 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, info
->vsi_id
,
1194 /* send the response to the vf */
1195 return i40e_vc_send_resp_to_vf(vf
,
1196 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
,
1201 * i40e_vc_config_queues_msg
1202 * @vf: pointer to the vf info
1203 * @msg: pointer to the msg buffer
1204 * @msglen: msg length
1206 * called from the vf to configure the rx/tx
1209 static int i40e_vc_config_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1211 struct i40e_virtchnl_vsi_queue_config_info
*qci
=
1212 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1213 struct i40e_virtchnl_queue_pair_info
*qpi
;
1214 u16 vsi_id
, vsi_queue_id
;
1215 i40e_status aq_ret
= 0;
1218 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1219 aq_ret
= I40E_ERR_PARAM
;
1223 vsi_id
= qci
->vsi_id
;
1224 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1225 aq_ret
= I40E_ERR_PARAM
;
1228 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
1229 qpi
= &qci
->qpair
[i
];
1230 vsi_queue_id
= qpi
->txq
.queue_id
;
1231 if ((qpi
->txq
.vsi_id
!= vsi_id
) ||
1232 (qpi
->rxq
.vsi_id
!= vsi_id
) ||
1233 (qpi
->rxq
.queue_id
!= vsi_queue_id
) ||
1234 !i40e_vc_isvalid_queue_id(vf
, vsi_id
, vsi_queue_id
)) {
1235 aq_ret
= I40E_ERR_PARAM
;
1239 if (i40e_config_vsi_rx_queue(vf
, vsi_id
, vsi_queue_id
,
1241 i40e_config_vsi_tx_queue(vf
, vsi_id
, vsi_queue_id
,
1243 aq_ret
= I40E_ERR_PARAM
;
1249 /* send the response to the vf */
1250 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
1255 * i40e_vc_config_irq_map_msg
1256 * @vf: pointer to the vf info
1257 * @msg: pointer to the msg buffer
1258 * @msglen: msg length
1260 * called from the vf to configure the irq to
1263 static int i40e_vc_config_irq_map_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1265 struct i40e_virtchnl_irq_map_info
*irqmap_info
=
1266 (struct i40e_virtchnl_irq_map_info
*)msg
;
1267 struct i40e_virtchnl_vector_map
*map
;
1268 u16 vsi_id
, vsi_queue_id
, vector_id
;
1269 i40e_status aq_ret
= 0;
1270 unsigned long tempmap
;
1273 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1274 aq_ret
= I40E_ERR_PARAM
;
1278 for (i
= 0; i
< irqmap_info
->num_vectors
; i
++) {
1279 map
= &irqmap_info
->vecmap
[i
];
1281 vector_id
= map
->vector_id
;
1282 vsi_id
= map
->vsi_id
;
1283 /* validate msg params */
1284 if (!i40e_vc_isvalid_vector_id(vf
, vector_id
) ||
1285 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1286 aq_ret
= I40E_ERR_PARAM
;
1290 /* lookout for the invalid queue index */
1291 tempmap
= map
->rxq_map
;
1292 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1293 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1295 aq_ret
= I40E_ERR_PARAM
;
1300 tempmap
= map
->txq_map
;
1301 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1302 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1304 aq_ret
= I40E_ERR_PARAM
;
1309 i40e_config_irq_link_list(vf
, vsi_id
, map
);
1312 /* send the response to the vf */
1313 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
,
1318 * i40e_vc_enable_queues_msg
1319 * @vf: pointer to the vf info
1320 * @msg: pointer to the msg buffer
1321 * @msglen: msg length
1323 * called from the vf to enable all or specific queue(s)
1325 static int i40e_vc_enable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1327 struct i40e_virtchnl_queue_select
*vqs
=
1328 (struct i40e_virtchnl_queue_select
*)msg
;
1329 struct i40e_pf
*pf
= vf
->pf
;
1330 u16 vsi_id
= vqs
->vsi_id
;
1331 i40e_status aq_ret
= 0;
1333 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1334 aq_ret
= I40E_ERR_PARAM
;
1338 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1339 aq_ret
= I40E_ERR_PARAM
;
1343 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1344 aq_ret
= I40E_ERR_PARAM
;
1347 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], true))
1348 aq_ret
= I40E_ERR_TIMEOUT
;
1350 /* send the response to the vf */
1351 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ENABLE_QUEUES
,
1356 * i40e_vc_disable_queues_msg
1357 * @vf: pointer to the vf info
1358 * @msg: pointer to the msg buffer
1359 * @msglen: msg length
1361 * called from the vf to disable all or specific
1364 static int i40e_vc_disable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1366 struct i40e_virtchnl_queue_select
*vqs
=
1367 (struct i40e_virtchnl_queue_select
*)msg
;
1368 struct i40e_pf
*pf
= vf
->pf
;
1369 u16 vsi_id
= vqs
->vsi_id
;
1370 i40e_status aq_ret
= 0;
1372 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1373 aq_ret
= I40E_ERR_PARAM
;
1377 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1378 aq_ret
= I40E_ERR_PARAM
;
1382 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1383 aq_ret
= I40E_ERR_PARAM
;
1386 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], false))
1387 aq_ret
= I40E_ERR_TIMEOUT
;
1390 /* send the response to the vf */
1391 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DISABLE_QUEUES
,
1396 * i40e_vc_get_stats_msg
1397 * @vf: pointer to the vf info
1398 * @msg: pointer to the msg buffer
1399 * @msglen: msg length
1401 * called from the vf to get vsi stats
1403 static int i40e_vc_get_stats_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1405 struct i40e_virtchnl_queue_select
*vqs
=
1406 (struct i40e_virtchnl_queue_select
*)msg
;
1407 struct i40e_pf
*pf
= vf
->pf
;
1408 struct i40e_eth_stats stats
;
1409 i40e_status aq_ret
= 0;
1410 struct i40e_vsi
*vsi
;
1412 memset(&stats
, 0, sizeof(struct i40e_eth_stats
));
1414 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1415 aq_ret
= I40E_ERR_PARAM
;
1419 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1420 aq_ret
= I40E_ERR_PARAM
;
1424 vsi
= pf
->vsi
[vqs
->vsi_id
];
1426 aq_ret
= I40E_ERR_PARAM
;
1429 i40e_update_eth_stats(vsi
);
1430 stats
= vsi
->eth_stats
;
1433 /* send the response back to the vf */
1434 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_STATS
, aq_ret
,
1435 (u8
*)&stats
, sizeof(stats
));
1439 * i40e_check_vf_permission
1440 * @vf: pointer to the vf info
1441 * @macaddr: pointer to the MAC Address being checked
1443 * Check if the VF has permission to add or delete unicast MAC address
1444 * filters and return error code -EPERM if not. Then check if the
1445 * address filter requested is broadcast or zero and if so return
1446 * an invalid MAC address error code.
1448 static inline int i40e_check_vf_permission(struct i40e_vf
*vf
, u8
*macaddr
)
1450 struct i40e_pf
*pf
= vf
->pf
;
1453 if (is_broadcast_ether_addr(macaddr
) ||
1454 is_zero_ether_addr(macaddr
)) {
1455 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n", macaddr
);
1456 ret
= I40E_ERR_INVALID_MAC_ADDR
;
1457 } else if (vf
->pf_set_mac
&& !is_multicast_ether_addr(macaddr
) &&
1458 !ether_addr_equal(macaddr
, vf
->default_lan_addr
.addr
)) {
1459 /* If the host VMM administrator has set the VF MAC address
1460 * administratively via the ndo_set_vf_mac command then deny
1461 * permission to the VF to add or delete unicast MAC addresses.
1462 * The VF may request to set the MAC address filter already
1463 * assigned to it so do not return an error in that case.
1465 dev_err(&pf
->pdev
->dev
,
1466 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1473 * i40e_vc_add_mac_addr_msg
1474 * @vf: pointer to the vf info
1475 * @msg: pointer to the msg buffer
1476 * @msglen: msg length
1478 * add guest mac address filter
1480 static int i40e_vc_add_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1482 struct i40e_virtchnl_ether_addr_list
*al
=
1483 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1484 struct i40e_pf
*pf
= vf
->pf
;
1485 struct i40e_vsi
*vsi
= NULL
;
1486 u16 vsi_id
= al
->vsi_id
;
1487 i40e_status ret
= 0;
1490 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1491 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1492 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1493 ret
= I40E_ERR_PARAM
;
1497 for (i
= 0; i
< al
->num_elements
; i
++) {
1498 ret
= i40e_check_vf_permission(vf
, al
->list
[i
].addr
);
1502 vsi
= pf
->vsi
[vsi_id
];
1504 /* add new addresses to the list */
1505 for (i
= 0; i
< al
->num_elements
; i
++) {
1506 struct i40e_mac_filter
*f
;
1508 f
= i40e_find_mac(vsi
, al
->list
[i
].addr
, true, false);
1510 if (i40e_is_vsi_in_vlan(vsi
))
1511 f
= i40e_put_mac_in_vlan(vsi
, al
->list
[i
].addr
,
1514 f
= i40e_add_filter(vsi
, al
->list
[i
].addr
, -1,
1519 dev_err(&pf
->pdev
->dev
,
1520 "Unable to add VF MAC filter\n");
1521 ret
= I40E_ERR_PARAM
;
1526 /* program the updated filter list */
1527 if (i40e_sync_vsi_filters(vsi
))
1528 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1531 /* send the response to the vf */
1532 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
,
1537 * i40e_vc_del_mac_addr_msg
1538 * @vf: pointer to the vf info
1539 * @msg: pointer to the msg buffer
1540 * @msglen: msg length
1542 * remove guest mac address filter
1544 static int i40e_vc_del_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1546 struct i40e_virtchnl_ether_addr_list
*al
=
1547 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1548 struct i40e_pf
*pf
= vf
->pf
;
1549 struct i40e_vsi
*vsi
= NULL
;
1550 u16 vsi_id
= al
->vsi_id
;
1551 i40e_status ret
= 0;
1554 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1555 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1556 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1557 ret
= I40E_ERR_PARAM
;
1561 for (i
= 0; i
< al
->num_elements
; i
++) {
1562 if (is_broadcast_ether_addr(al
->list
[i
].addr
) ||
1563 is_zero_ether_addr(al
->list
[i
].addr
)) {
1564 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n",
1566 ret
= I40E_ERR_INVALID_MAC_ADDR
;
1570 vsi
= pf
->vsi
[vsi_id
];
1572 /* delete addresses from the list */
1573 for (i
= 0; i
< al
->num_elements
; i
++)
1574 i40e_del_filter(vsi
, al
->list
[i
].addr
,
1575 I40E_VLAN_ANY
, true, false);
1577 /* program the updated filter list */
1578 if (i40e_sync_vsi_filters(vsi
))
1579 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1582 /* send the response to the vf */
1583 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
,
1588 * i40e_vc_add_vlan_msg
1589 * @vf: pointer to the vf info
1590 * @msg: pointer to the msg buffer
1591 * @msglen: msg length
1593 * program guest vlan id
1595 static int i40e_vc_add_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1597 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1598 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1599 struct i40e_pf
*pf
= vf
->pf
;
1600 struct i40e_vsi
*vsi
= NULL
;
1601 u16 vsi_id
= vfl
->vsi_id
;
1602 i40e_status aq_ret
= 0;
1605 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1606 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1607 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1608 aq_ret
= I40E_ERR_PARAM
;
1612 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1613 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1614 aq_ret
= I40E_ERR_PARAM
;
1615 dev_err(&pf
->pdev
->dev
,
1616 "invalid VF VLAN id %d\n", vfl
->vlan_id
[i
]);
1620 vsi
= pf
->vsi
[vsi_id
];
1621 if (vsi
->info
.pvid
) {
1622 aq_ret
= I40E_ERR_PARAM
;
1626 i40e_vlan_stripping_enable(vsi
);
1627 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1628 /* add new VLAN filter */
1629 int ret
= i40e_vsi_add_vlan(vsi
, vfl
->vlan_id
[i
]);
1631 dev_err(&pf
->pdev
->dev
,
1632 "Unable to add VF vlan filter %d, error %d\n",
1633 vfl
->vlan_id
[i
], ret
);
1637 /* send the response to the vf */
1638 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_VLAN
, aq_ret
);
1642 * i40e_vc_remove_vlan_msg
1643 * @vf: pointer to the vf info
1644 * @msg: pointer to the msg buffer
1645 * @msglen: msg length
1647 * remove programmed guest vlan id
1649 static int i40e_vc_remove_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1651 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1652 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1653 struct i40e_pf
*pf
= vf
->pf
;
1654 struct i40e_vsi
*vsi
= NULL
;
1655 u16 vsi_id
= vfl
->vsi_id
;
1656 i40e_status aq_ret
= 0;
1659 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1660 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1661 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1662 aq_ret
= I40E_ERR_PARAM
;
1666 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1667 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1668 aq_ret
= I40E_ERR_PARAM
;
1673 vsi
= pf
->vsi
[vsi_id
];
1674 if (vsi
->info
.pvid
) {
1675 aq_ret
= I40E_ERR_PARAM
;
1679 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1680 int ret
= i40e_vsi_kill_vlan(vsi
, vfl
->vlan_id
[i
]);
1682 dev_err(&pf
->pdev
->dev
,
1683 "Unable to delete VF vlan filter %d, error %d\n",
1684 vfl
->vlan_id
[i
], ret
);
1688 /* send the response to the vf */
1689 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_VLAN
, aq_ret
);
1693 * i40e_vc_validate_vf_msg
1694 * @vf: pointer to the vf info
1695 * @msg: pointer to the msg buffer
1696 * @msglen: msg length
1697 * @msghndl: msg handle
1701 static int i40e_vc_validate_vf_msg(struct i40e_vf
*vf
, u32 v_opcode
,
1702 u32 v_retval
, u8
*msg
, u16 msglen
)
1704 bool err_msg_format
= false;
1707 /* Check if VF is disabled. */
1708 if (test_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
))
1709 return I40E_ERR_PARAM
;
1711 /* Validate message length. */
1713 case I40E_VIRTCHNL_OP_VERSION
:
1714 valid_len
= sizeof(struct i40e_virtchnl_version_info
);
1716 case I40E_VIRTCHNL_OP_RESET_VF
:
1717 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1720 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
:
1721 valid_len
= sizeof(struct i40e_virtchnl_txq_info
);
1723 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
:
1724 valid_len
= sizeof(struct i40e_virtchnl_rxq_info
);
1726 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1727 valid_len
= sizeof(struct i40e_virtchnl_vsi_queue_config_info
);
1728 if (msglen
>= valid_len
) {
1729 struct i40e_virtchnl_vsi_queue_config_info
*vqc
=
1730 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1731 valid_len
+= (vqc
->num_queue_pairs
*
1733 i40e_virtchnl_queue_pair_info
));
1734 if (vqc
->num_queue_pairs
== 0)
1735 err_msg_format
= true;
1738 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1739 valid_len
= sizeof(struct i40e_virtchnl_irq_map_info
);
1740 if (msglen
>= valid_len
) {
1741 struct i40e_virtchnl_irq_map_info
*vimi
=
1742 (struct i40e_virtchnl_irq_map_info
*)msg
;
1743 valid_len
+= (vimi
->num_vectors
*
1744 sizeof(struct i40e_virtchnl_vector_map
));
1745 if (vimi
->num_vectors
== 0)
1746 err_msg_format
= true;
1749 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1750 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1751 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1753 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1754 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1755 valid_len
= sizeof(struct i40e_virtchnl_ether_addr_list
);
1756 if (msglen
>= valid_len
) {
1757 struct i40e_virtchnl_ether_addr_list
*veal
=
1758 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1759 valid_len
+= veal
->num_elements
*
1760 sizeof(struct i40e_virtchnl_ether_addr
);
1761 if (veal
->num_elements
== 0)
1762 err_msg_format
= true;
1765 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1766 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1767 valid_len
= sizeof(struct i40e_virtchnl_vlan_filter_list
);
1768 if (msglen
>= valid_len
) {
1769 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1770 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1771 valid_len
+= vfl
->num_elements
* sizeof(u16
);
1772 if (vfl
->num_elements
== 0)
1773 err_msg_format
= true;
1776 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1777 valid_len
= sizeof(struct i40e_virtchnl_promisc_info
);
1779 case I40E_VIRTCHNL_OP_GET_STATS
:
1780 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1782 /* These are always errors coming from the VF. */
1783 case I40E_VIRTCHNL_OP_EVENT
:
1784 case I40E_VIRTCHNL_OP_UNKNOWN
:
1789 /* few more checks */
1790 if ((valid_len
!= msglen
) || (err_msg_format
)) {
1791 i40e_vc_send_resp_to_vf(vf
, v_opcode
, I40E_ERR_PARAM
);
1799 * i40e_vc_process_vf_msg
1800 * @pf: pointer to the pf structure
1801 * @vf_id: source vf id
1802 * @msg: pointer to the msg buffer
1803 * @msglen: msg length
1804 * @msghndl: msg handle
1806 * called from the common aeq/arq handler to
1807 * process request from vf
1809 int i40e_vc_process_vf_msg(struct i40e_pf
*pf
, u16 vf_id
, u32 v_opcode
,
1810 u32 v_retval
, u8
*msg
, u16 msglen
)
1812 struct i40e_hw
*hw
= &pf
->hw
;
1813 unsigned int local_vf_id
= vf_id
- hw
->func_caps
.vf_base_id
;
1817 pf
->vf_aq_requests
++;
1818 if (local_vf_id
>= pf
->num_alloc_vfs
)
1820 vf
= &(pf
->vf
[local_vf_id
]);
1821 /* perform basic checks on the msg */
1822 ret
= i40e_vc_validate_vf_msg(vf
, v_opcode
, v_retval
, msg
, msglen
);
1825 dev_err(&pf
->pdev
->dev
, "Invalid message from vf %d, opcode %d, len %d\n",
1826 local_vf_id
, v_opcode
, msglen
);
1831 case I40E_VIRTCHNL_OP_VERSION
:
1832 ret
= i40e_vc_get_version_msg(vf
);
1834 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1835 ret
= i40e_vc_get_vf_resources_msg(vf
);
1837 case I40E_VIRTCHNL_OP_RESET_VF
:
1838 i40e_vc_reset_vf_msg(vf
);
1841 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1842 ret
= i40e_vc_config_promiscuous_mode_msg(vf
, msg
, msglen
);
1844 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1845 ret
= i40e_vc_config_queues_msg(vf
, msg
, msglen
);
1847 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1848 ret
= i40e_vc_config_irq_map_msg(vf
, msg
, msglen
);
1850 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1851 ret
= i40e_vc_enable_queues_msg(vf
, msg
, msglen
);
1853 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1854 ret
= i40e_vc_disable_queues_msg(vf
, msg
, msglen
);
1856 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1857 ret
= i40e_vc_add_mac_addr_msg(vf
, msg
, msglen
);
1859 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1860 ret
= i40e_vc_del_mac_addr_msg(vf
, msg
, msglen
);
1862 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1863 ret
= i40e_vc_add_vlan_msg(vf
, msg
, msglen
);
1865 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1866 ret
= i40e_vc_remove_vlan_msg(vf
, msg
, msglen
);
1868 case I40E_VIRTCHNL_OP_GET_STATS
:
1869 ret
= i40e_vc_get_stats_msg(vf
, msg
, msglen
);
1871 case I40E_VIRTCHNL_OP_UNKNOWN
:
1873 dev_err(&pf
->pdev
->dev
, "Unsupported opcode %d from vf %d\n",
1874 v_opcode
, local_vf_id
);
1875 ret
= i40e_vc_send_resp_to_vf(vf
, v_opcode
,
1876 I40E_ERR_NOT_IMPLEMENTED
);
1884 * i40e_vc_process_vflr_event
1885 * @pf: pointer to the pf structure
1887 * called from the vlfr irq handler to
1888 * free up vf resources and state variables
1890 int i40e_vc_process_vflr_event(struct i40e_pf
*pf
)
1892 u32 reg
, reg_idx
, bit_idx
, vf_id
;
1893 struct i40e_hw
*hw
= &pf
->hw
;
1896 if (!test_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
))
1899 clear_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
);
1900 for (vf_id
= 0; vf_id
< pf
->num_alloc_vfs
; vf_id
++) {
1901 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1902 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1903 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1904 vf
= &pf
->vf
[vf_id
];
1905 reg
= rd32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
));
1906 if (reg
& (1 << bit_idx
)) {
1907 /* clear the bit in GLGEN_VFLRSTAT */
1908 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
1910 if (!test_bit(__I40E_DOWN
, &pf
->state
))
1911 i40e_reset_vf(vf
, true);
1915 /* re-enable vflr interrupt cause */
1916 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
1917 reg
|= I40E_PFINT_ICR0_ENA_VFLR_MASK
;
1918 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
1925 * i40e_vc_vf_broadcast
1926 * @pf: pointer to the pf structure
1927 * @opcode: operation code
1928 * @retval: return value
1929 * @msg: pointer to the msg buffer
1930 * @msglen: msg length
1932 * send a message to all VFs on a given PF
1934 static void i40e_vc_vf_broadcast(struct i40e_pf
*pf
,
1935 enum i40e_virtchnl_ops v_opcode
,
1936 i40e_status v_retval
, u8
*msg
,
1939 struct i40e_hw
*hw
= &pf
->hw
;
1940 struct i40e_vf
*vf
= pf
->vf
;
1943 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1944 /* Ignore return value on purpose - a given VF may fail, but
1945 * we need to keep going and send to all of them
1947 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, v_opcode
, v_retval
,
1954 * i40e_vc_notify_link_state
1955 * @pf: pointer to the pf structure
1957 * send a link status message to all VFs on a given PF
1959 void i40e_vc_notify_link_state(struct i40e_pf
*pf
)
1961 struct i40e_virtchnl_pf_event pfe
;
1962 struct i40e_hw
*hw
= &pf
->hw
;
1963 struct i40e_vf
*vf
= pf
->vf
;
1964 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
1967 pfe
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
1968 pfe
.severity
= I40E_PF_EVENT_SEVERITY_INFO
;
1969 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1970 if (vf
->link_forced
) {
1971 pfe
.event_data
.link_event
.link_status
= vf
->link_up
;
1972 pfe
.event_data
.link_event
.link_speed
=
1973 (vf
->link_up
? I40E_LINK_SPEED_40GB
: 0);
1975 pfe
.event_data
.link_event
.link_status
=
1976 ls
->link_info
& I40E_AQ_LINK_UP
;
1977 pfe
.event_data
.link_event
.link_speed
= ls
->link_speed
;
1979 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
1980 0, (u8
*)&pfe
, sizeof(pfe
),
1987 * i40e_vc_notify_reset
1988 * @pf: pointer to the pf structure
1990 * indicate a pending reset to all VFs on a given PF
1992 void i40e_vc_notify_reset(struct i40e_pf
*pf
)
1994 struct i40e_virtchnl_pf_event pfe
;
1996 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
1997 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1998 i40e_vc_vf_broadcast(pf
, I40E_VIRTCHNL_OP_EVENT
, I40E_SUCCESS
,
1999 (u8
*)&pfe
, sizeof(struct i40e_virtchnl_pf_event
));
2003 * i40e_vc_notify_vf_reset
2004 * @vf: pointer to the vf structure
2006 * indicate a pending reset to the given VF
2008 void i40e_vc_notify_vf_reset(struct i40e_vf
*vf
)
2010 struct i40e_virtchnl_pf_event pfe
;
2012 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
2013 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
2014 i40e_aq_send_msg_to_vf(&vf
->pf
->hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
2015 I40E_SUCCESS
, (u8
*)&pfe
,
2016 sizeof(struct i40e_virtchnl_pf_event
), NULL
);
2020 * i40e_ndo_set_vf_mac
2021 * @netdev: network interface device structure
2022 * @vf_id: vf identifier
2025 * program vf mac address
2027 int i40e_ndo_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
2029 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2030 struct i40e_vsi
*vsi
= np
->vsi
;
2031 struct i40e_pf
*pf
= vsi
->back
;
2032 struct i40e_mac_filter
*f
;
2036 /* validate the request */
2037 if (vf_id
>= pf
->num_alloc_vfs
) {
2038 dev_err(&pf
->pdev
->dev
,
2039 "Invalid VF Identifier %d\n", vf_id
);
2044 vf
= &(pf
->vf
[vf_id
]);
2045 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2046 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2047 dev_err(&pf
->pdev
->dev
,
2048 "Uninitialized VF %d\n", vf_id
);
2053 if (!is_valid_ether_addr(mac
)) {
2054 dev_err(&pf
->pdev
->dev
,
2055 "Invalid VF ethernet address\n");
2060 /* delete the temporary mac address */
2061 i40e_del_filter(vsi
, vf
->default_lan_addr
.addr
, vf
->port_vlan_id
,
2064 /* add the new mac address */
2065 f
= i40e_add_filter(vsi
, mac
, vf
->port_vlan_id
, true, false);
2067 dev_err(&pf
->pdev
->dev
,
2068 "Unable to add VF ucast filter\n");
2073 dev_info(&pf
->pdev
->dev
, "Setting MAC %pM on VF %d\n", mac
, vf_id
);
2074 /* program mac filter */
2075 if (i40e_sync_vsi_filters(vsi
)) {
2076 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
2080 memcpy(vf
->default_lan_addr
.addr
, mac
, ETH_ALEN
);
2081 vf
->pf_set_mac
= true;
2082 dev_info(&pf
->pdev
->dev
, "Reload the VF driver to make this change effective.\n");
2090 * i40e_ndo_set_vf_port_vlan
2091 * @netdev: network interface device structure
2092 * @vf_id: vf identifier
2093 * @vlan_id: mac address
2094 * @qos: priority setting
2096 * program vf vlan id and/or qos
2098 int i40e_ndo_set_vf_port_vlan(struct net_device
*netdev
,
2099 int vf_id
, u16 vlan_id
, u8 qos
)
2101 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2102 struct i40e_pf
*pf
= np
->vsi
->back
;
2103 struct i40e_vsi
*vsi
;
2107 /* validate the request */
2108 if (vf_id
>= pf
->num_alloc_vfs
) {
2109 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2114 if ((vlan_id
> I40E_MAX_VLANID
) || (qos
> 7)) {
2115 dev_err(&pf
->pdev
->dev
, "Invalid VF Parameters\n");
2120 vf
= &(pf
->vf
[vf_id
]);
2121 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2122 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2123 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2128 if (vsi
->info
.pvid
== 0 && i40e_is_vsi_in_vlan(vsi
)) {
2129 dev_err(&pf
->pdev
->dev
,
2130 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2132 /* Administrator Error - knock the VF offline until he does
2133 * the right thing by reconfiguring his network correctly
2134 * and then reloading the VF driver.
2136 i40e_vc_disable_vf(pf
, vf
);
2139 /* Check for condition where there was already a port VLAN ID
2140 * filter set and now it is being deleted by setting it to zero.
2141 * Additionally check for the condition where there was a port
2142 * VLAN but now there is a new and different port VLAN being set.
2143 * Before deleting all the old VLAN filters we must add new ones
2144 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2145 * MAC addresses deleted.
2147 if ((!(vlan_id
|| qos
) ||
2148 (vlan_id
| qos
) != le16_to_cpu(vsi
->info
.pvid
)) &&
2150 ret
= i40e_vsi_add_vlan(vsi
, I40E_VLAN_ANY
);
2152 if (vsi
->info
.pvid
) {
2154 ret
= i40e_vsi_kill_vlan(vsi
, (le16_to_cpu(vsi
->info
.pvid
) &
2157 dev_info(&vsi
->back
->pdev
->dev
,
2158 "remove VLAN failed, ret=%d, aq_err=%d\n",
2159 ret
, pf
->hw
.aq
.asq_last_status
);
2163 ret
= i40e_vsi_add_pvid(vsi
,
2164 vlan_id
| (qos
<< I40E_VLAN_PRIORITY_SHIFT
));
2166 i40e_vsi_remove_pvid(vsi
);
2169 dev_info(&pf
->pdev
->dev
, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2170 vlan_id
, qos
, vf_id
);
2172 /* add new VLAN filter */
2173 ret
= i40e_vsi_add_vlan(vsi
, vlan_id
);
2175 dev_info(&vsi
->back
->pdev
->dev
,
2176 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
2177 vsi
->back
->hw
.aq
.asq_last_status
);
2180 /* Kill non-vlan MAC filters - ignore error return since
2181 * there might not be any non-vlan MAC filters.
2183 i40e_vsi_kill_vlan(vsi
, I40E_VLAN_ANY
);
2187 dev_err(&pf
->pdev
->dev
, "Unable to update VF vsi context\n");
2190 /* The Port VLAN needs to be saved across resets the same as the
2191 * default LAN MAC address.
2193 vf
->port_vlan_id
= le16_to_cpu(vsi
->info
.pvid
);
2201 * i40e_ndo_set_vf_bw
2202 * @netdev: network interface device structure
2203 * @vf_id: vf identifier
2206 * configure vf tx rate
2208 int i40e_ndo_set_vf_bw(struct net_device
*netdev
, int vf_id
, int min_tx_rate
,
2211 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2212 struct i40e_pf
*pf
= np
->vsi
->back
;
2213 struct i40e_vsi
*vsi
;
2218 /* validate the request */
2219 if (vf_id
>= pf
->num_alloc_vfs
) {
2220 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d.\n", vf_id
);
2226 dev_err(&pf
->pdev
->dev
, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2227 min_tx_rate
, vf_id
);
2231 vf
= &(pf
->vf
[vf_id
]);
2232 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2233 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2234 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d.\n", vf_id
);
2239 switch (pf
->hw
.phy
.link_info
.link_speed
) {
2240 case I40E_LINK_SPEED_40GB
:
2243 case I40E_LINK_SPEED_10GB
:
2246 case I40E_LINK_SPEED_1GB
:
2253 if (max_tx_rate
> speed
) {
2254 dev_err(&pf
->pdev
->dev
, "Invalid max tx rate %d specified for vf %d.",
2255 max_tx_rate
, vf
->vf_id
);
2260 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2261 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, vsi
->seid
, max_tx_rate
/ 50,
2264 dev_err(&pf
->pdev
->dev
, "Unable to set max tx rate, error code %d.\n",
2269 vf
->tx_rate
= max_tx_rate
;
2275 * i40e_ndo_get_vf_config
2276 * @netdev: network interface device structure
2277 * @vf_id: vf identifier
2278 * @ivi: vf configuration structure
2280 * return vf configuration
2282 int i40e_ndo_get_vf_config(struct net_device
*netdev
,
2283 int vf_id
, struct ifla_vf_info
*ivi
)
2285 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2286 struct i40e_vsi
*vsi
= np
->vsi
;
2287 struct i40e_pf
*pf
= vsi
->back
;
2291 /* validate the request */
2292 if (vf_id
>= pf
->num_alloc_vfs
) {
2293 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2298 vf
= &(pf
->vf
[vf_id
]);
2299 /* first vsi is always the LAN vsi */
2300 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2301 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2302 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2309 memcpy(&ivi
->mac
, vf
->default_lan_addr
.addr
, ETH_ALEN
);
2311 ivi
->max_tx_rate
= vf
->tx_rate
;
2312 ivi
->min_tx_rate
= 0;
2313 ivi
->vlan
= le16_to_cpu(vsi
->info
.pvid
) & I40E_VLAN_MASK
;
2314 ivi
->qos
= (le16_to_cpu(vsi
->info
.pvid
) & I40E_PRIORITY_MASK
) >>
2315 I40E_VLAN_PRIORITY_SHIFT
;
2316 if (vf
->link_forced
== false)
2317 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
2318 else if (vf
->link_up
== true)
2319 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
2321 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
2330 * i40e_ndo_set_vf_link_state
2331 * @netdev: network interface device structure
2332 * @vf_id: vf identifier
2333 * @link: required link state
2335 * Set the link state of a specified VF, regardless of physical link state
2337 int i40e_ndo_set_vf_link_state(struct net_device
*netdev
, int vf_id
, int link
)
2339 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2340 struct i40e_pf
*pf
= np
->vsi
->back
;
2341 struct i40e_virtchnl_pf_event pfe
;
2342 struct i40e_hw
*hw
= &pf
->hw
;
2346 /* validate the request */
2347 if (vf_id
>= pf
->num_alloc_vfs
) {
2348 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2353 vf
= &pf
->vf
[vf_id
];
2355 pfe
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
2356 pfe
.severity
= I40E_PF_EVENT_SEVERITY_INFO
;
2359 case IFLA_VF_LINK_STATE_AUTO
:
2360 vf
->link_forced
= false;
2361 pfe
.event_data
.link_event
.link_status
=
2362 pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
;
2363 pfe
.event_data
.link_event
.link_speed
=
2364 pf
->hw
.phy
.link_info
.link_speed
;
2366 case IFLA_VF_LINK_STATE_ENABLE
:
2367 vf
->link_forced
= true;
2369 pfe
.event_data
.link_event
.link_status
= true;
2370 pfe
.event_data
.link_event
.link_speed
= I40E_LINK_SPEED_40GB
;
2372 case IFLA_VF_LINK_STATE_DISABLE
:
2373 vf
->link_forced
= true;
2374 vf
->link_up
= false;
2375 pfe
.event_data
.link_event
.link_status
= false;
2376 pfe
.event_data
.link_event
.link_speed
= 0;
2382 /* Notify the VF of its new link state */
2383 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
2384 0, (u8
*)&pfe
, sizeof(pfe
), NULL
);