1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 ******************************************************************************/
30 /***********************misc routines*****************************/
33 * i40e_vc_isvalid_vsi_id
34 * @vf: pointer to the vf info
35 * @vsi_id: vf relative vsi id
37 * check for the valid vsi id
39 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf
*vf
, u8 vsi_id
)
41 struct i40e_pf
*pf
= vf
->pf
;
43 return pf
->vsi
[vsi_id
]->vf_id
== vf
->vf_id
;
47 * i40e_vc_isvalid_queue_id
48 * @vf: pointer to the vf info
50 * @qid: vsi relative queue id
52 * check for the valid queue id
54 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf
*vf
, u8 vsi_id
,
57 struct i40e_pf
*pf
= vf
->pf
;
59 return qid
< pf
->vsi
[vsi_id
]->num_queue_pairs
;
63 * i40e_vc_isvalid_vector_id
64 * @vf: pointer to the vf info
65 * @vector_id: vf relative vector id
67 * check for the valid vector id
69 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf
*vf
, u8 vector_id
)
71 struct i40e_pf
*pf
= vf
->pf
;
73 return vector_id
<= pf
->hw
.func_caps
.num_msix_vectors_vf
;
76 /***********************vf resource mgmt routines*****************/
79 * i40e_vc_get_pf_queue_id
80 * @vf: pointer to the vf info
81 * @vsi_idx: index of VSI in PF struct
82 * @vsi_queue_id: vsi relative queue id
84 * return pf relative queue id
86 static u16
i40e_vc_get_pf_queue_id(struct i40e_vf
*vf
, u8 vsi_idx
,
89 struct i40e_pf
*pf
= vf
->pf
;
90 struct i40e_vsi
*vsi
= pf
->vsi
[vsi_idx
];
91 u16 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
93 if (le16_to_cpu(vsi
->info
.mapping_flags
) &
94 I40E_AQ_VSI_QUE_MAP_NONCONTIG
)
96 le16_to_cpu(vsi
->info
.queue_mapping
[vsi_queue_id
]);
98 pf_queue_id
= le16_to_cpu(vsi
->info
.queue_mapping
[0]) +
105 * i40e_config_irq_link_list
106 * @vf: pointer to the vf info
107 * @vsi_idx: index of VSI in PF struct
108 * @vecmap: irq map info
110 * configure irq link list from the map
112 static void i40e_config_irq_link_list(struct i40e_vf
*vf
, u16 vsi_idx
,
113 struct i40e_virtchnl_vector_map
*vecmap
)
115 unsigned long linklistmap
= 0, tempmap
;
116 struct i40e_pf
*pf
= vf
->pf
;
117 struct i40e_hw
*hw
= &pf
->hw
;
118 u16 vsi_queue_id
, pf_queue_id
;
119 enum i40e_queue_type qtype
;
120 u16 next_q
, vector_id
;
124 vector_id
= vecmap
->vector_id
;
127 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
129 reg_idx
= I40E_VPINT_LNKLSTN(
130 (pf
->hw
.func_caps
.num_msix_vectors_vf
131 * vf
->vf_id
) + (vector_id
- 1));
133 if (vecmap
->rxq_map
== 0 && vecmap
->txq_map
== 0) {
134 /* Special case - No queues mapped on this vector */
135 wr32(hw
, reg_idx
, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK
);
138 tempmap
= vecmap
->rxq_map
;
139 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
141 (I40E_VIRTCHNL_SUPPORTED_QTYPES
*
145 tempmap
= vecmap
->txq_map
;
146 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
148 (I40E_VIRTCHNL_SUPPORTED_QTYPES
* vsi_queue_id
152 next_q
= find_first_bit(&linklistmap
,
154 I40E_VIRTCHNL_SUPPORTED_QTYPES
));
155 vsi_queue_id
= next_q
/I40E_VIRTCHNL_SUPPORTED_QTYPES
;
156 qtype
= next_q
%I40E_VIRTCHNL_SUPPORTED_QTYPES
;
157 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
158 reg
= ((qtype
<< I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
) | pf_queue_id
);
160 wr32(hw
, reg_idx
, reg
);
162 while (next_q
< (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
164 case I40E_QUEUE_TYPE_RX
:
165 reg_idx
= I40E_QINT_RQCTL(pf_queue_id
);
166 itr_idx
= vecmap
->rxitr_idx
;
168 case I40E_QUEUE_TYPE_TX
:
169 reg_idx
= I40E_QINT_TQCTL(pf_queue_id
);
170 itr_idx
= vecmap
->txitr_idx
;
176 next_q
= find_next_bit(&linklistmap
,
178 I40E_VIRTCHNL_SUPPORTED_QTYPES
),
180 if (next_q
< (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
181 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
182 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
183 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
,
186 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
190 /* format for the RQCTL & TQCTL regs is same */
192 (qtype
<< I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
) |
193 (pf_queue_id
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
194 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT
) |
195 (itr_idx
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
);
196 wr32(hw
, reg_idx
, reg
);
204 * i40e_config_vsi_tx_queue
205 * @vf: pointer to the vf info
206 * @vsi_idx: index of VSI in PF struct
207 * @vsi_queue_id: vsi relative queue index
208 * @info: config. info
212 static int i40e_config_vsi_tx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
214 struct i40e_virtchnl_txq_info
*info
)
216 struct i40e_pf
*pf
= vf
->pf
;
217 struct i40e_hw
*hw
= &pf
->hw
;
218 struct i40e_hmc_obj_txq tx_ctx
;
223 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
225 /* clear the context structure first */
226 memset(&tx_ctx
, 0, sizeof(struct i40e_hmc_obj_txq
));
228 /* only set the required fields */
229 tx_ctx
.base
= info
->dma_ring_addr
/ 128;
230 tx_ctx
.qlen
= info
->ring_len
;
231 tx_ctx
.rdylist
= le16_to_cpu(pf
->vsi
[vsi_idx
]->info
.qs_handle
[0]);
232 tx_ctx
.rdylist_act
= 0;
234 /* clear the context in the HMC */
235 ret
= i40e_clear_lan_tx_queue_context(hw
, pf_queue_id
);
237 dev_err(&pf
->pdev
->dev
,
238 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
244 /* set the context in the HMC */
245 ret
= i40e_set_lan_tx_queue_context(hw
, pf_queue_id
, &tx_ctx
);
247 dev_err(&pf
->pdev
->dev
,
248 "Failed to set VF LAN Tx queue context %d error: %d\n",
254 /* associate this queue with the PCI VF function */
255 qtx_ctl
= I40E_QTX_CTL_VF_QUEUE
;
256 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
)
257 & I40E_QTX_CTL_PF_INDX_MASK
);
258 qtx_ctl
|= (((vf
->vf_id
+ hw
->func_caps
.vf_base_id
)
259 << I40E_QTX_CTL_VFVM_INDX_SHIFT
)
260 & I40E_QTX_CTL_VFVM_INDX_MASK
);
261 wr32(hw
, I40E_QTX_CTL(pf_queue_id
), qtx_ctl
);
269 * i40e_config_vsi_rx_queue
270 * @vf: pointer to the vf info
271 * @vsi_idx: index of VSI in PF struct
272 * @vsi_queue_id: vsi relative queue index
273 * @info: config. info
277 static int i40e_config_vsi_rx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
279 struct i40e_virtchnl_rxq_info
*info
)
281 struct i40e_pf
*pf
= vf
->pf
;
282 struct i40e_hw
*hw
= &pf
->hw
;
283 struct i40e_hmc_obj_rxq rx_ctx
;
287 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
289 /* clear the context structure first */
290 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
292 /* only set the required fields */
293 rx_ctx
.base
= info
->dma_ring_addr
/ 128;
294 rx_ctx
.qlen
= info
->ring_len
;
296 if (info
->splithdr_enabled
) {
297 rx_ctx
.hsplit_0
= I40E_RX_SPLIT_L2
|
299 I40E_RX_SPLIT_TCP_UDP
|
301 /* header length validation */
302 if (info
->hdr_size
> ((2 * 1024) - 64)) {
306 rx_ctx
.hbuff
= info
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
308 /* set splitalways mode 10b */
312 /* databuffer length validation */
313 if (info
->databuffer_size
> ((16 * 1024) - 128)) {
317 rx_ctx
.dbuff
= info
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
319 /* max pkt. length validation */
320 if (info
->max_pkt_size
>= (16 * 1024) || info
->max_pkt_size
< 64) {
324 rx_ctx
.rxmax
= info
->max_pkt_size
;
326 /* enable 32bytes desc always */
330 rx_ctx
.tphrdesc_ena
= 1;
331 rx_ctx
.tphwdesc_ena
= 1;
332 rx_ctx
.tphdata_ena
= 1;
333 rx_ctx
.tphhead_ena
= 1;
334 rx_ctx
.lrxqthresh
= 2;
337 /* clear the context in the HMC */
338 ret
= i40e_clear_lan_rx_queue_context(hw
, pf_queue_id
);
340 dev_err(&pf
->pdev
->dev
,
341 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
347 /* set the context in the HMC */
348 ret
= i40e_set_lan_rx_queue_context(hw
, pf_queue_id
, &rx_ctx
);
350 dev_err(&pf
->pdev
->dev
,
351 "Failed to set VF LAN Rx queue context %d error: %d\n",
363 * @vf: pointer to the vf info
364 * @type: type of VSI to allocate
366 * alloc vf vsi context & resources
368 static int i40e_alloc_vsi_res(struct i40e_vf
*vf
, enum i40e_vsi_type type
)
370 struct i40e_mac_filter
*f
= NULL
;
371 struct i40e_pf
*pf
= vf
->pf
;
372 struct i40e_hw
*hw
= &pf
->hw
;
373 struct i40e_vsi
*vsi
;
376 vsi
= i40e_vsi_setup(pf
, type
, pf
->vsi
[pf
->lan_vsi
]->seid
, vf
->vf_id
);
379 dev_err(&pf
->pdev
->dev
,
380 "add vsi failed for vf %d, aq_err %d\n",
381 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
383 goto error_alloc_vsi_res
;
385 if (type
== I40E_VSI_SRIOV
) {
386 vf
->lan_vsi_index
= vsi
->idx
;
387 vf
->lan_vsi_id
= vsi
->id
;
388 dev_info(&pf
->pdev
->dev
,
389 "LAN VSI index %d, VSI id %d\n",
391 f
= i40e_add_filter(vsi
, vf
->default_lan_addr
.addr
,
396 dev_err(&pf
->pdev
->dev
, "Unable to add ucast filter\n");
398 goto error_alloc_vsi_res
;
401 /* program mac filter */
402 ret
= i40e_sync_vsi_filters(vsi
);
404 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
405 goto error_alloc_vsi_res
;
408 /* accept bcast pkts. by default */
409 ret
= i40e_aq_set_vsi_broadcast(hw
, vsi
->seid
, true, NULL
);
411 dev_err(&pf
->pdev
->dev
,
412 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
413 vf
->vf_id
, vsi
->idx
, pf
->hw
.aq
.asq_last_status
);
422 * i40e_enable_vf_mappings
423 * @vf: pointer to the vf info
427 static void i40e_enable_vf_mappings(struct i40e_vf
*vf
)
429 struct i40e_pf
*pf
= vf
->pf
;
430 struct i40e_hw
*hw
= &pf
->hw
;
431 u32 reg
, total_queue_pairs
= 0;
434 /* Tell the hardware we're using noncontiguous mapping. HW requires
435 * that VF queues be mapped using this method, even when they are
436 * contiguous in real life
438 wr32(hw
, I40E_VSILAN_QBASE(vf
->lan_vsi_id
),
439 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
441 /* enable VF vplan_qtable mappings */
442 reg
= I40E_VPLAN_MAPENA_TXRX_ENA_MASK
;
443 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), reg
);
445 /* map PF queues to VF queues */
446 for (j
= 0; j
< pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
; j
++) {
447 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
, j
);
448 reg
= (qid
& I40E_VPLAN_QTABLE_QINDEX_MASK
);
449 wr32(hw
, I40E_VPLAN_QTABLE(total_queue_pairs
, vf
->vf_id
), reg
);
453 /* map PF queues to VSI */
454 for (j
= 0; j
< 7; j
++) {
455 if (j
* 2 >= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
) {
456 reg
= 0x07FF07FF; /* unused */
458 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
461 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
465 wr32(hw
, I40E_VSILAN_QTABLE(j
, vf
->lan_vsi_id
), reg
);
472 * i40e_disable_vf_mappings
473 * @vf: pointer to the vf info
475 * disable vf mappings
477 static void i40e_disable_vf_mappings(struct i40e_vf
*vf
)
479 struct i40e_pf
*pf
= vf
->pf
;
480 struct i40e_hw
*hw
= &pf
->hw
;
483 /* disable qp mappings */
484 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), 0);
485 for (i
= 0; i
< I40E_MAX_VSI_QP
; i
++)
486 wr32(hw
, I40E_VPLAN_QTABLE(i
, vf
->vf_id
),
487 I40E_QUEUE_END_OF_LIST
);
493 * @vf: pointer to the vf info
497 static void i40e_free_vf_res(struct i40e_vf
*vf
)
499 struct i40e_pf
*pf
= vf
->pf
;
500 struct i40e_hw
*hw
= &pf
->hw
;
504 /* free vsi & disconnect it from the parent uplink */
505 if (vf
->lan_vsi_index
) {
506 i40e_vsi_release(pf
->vsi
[vf
->lan_vsi_index
]);
507 vf
->lan_vsi_index
= 0;
510 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
+ 1;
511 /* disable interrupts so the VF starts in a known state */
512 for (i
= 0; i
< msix_vf
; i
++) {
513 /* format is same for both registers */
515 reg_idx
= I40E_VFINT_DYN_CTL0(vf
->vf_id
);
517 reg_idx
= I40E_VFINT_DYN_CTLN(((msix_vf
- 1) *
520 wr32(hw
, reg_idx
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
524 /* clear the irq settings */
525 for (i
= 0; i
< msix_vf
; i
++) {
526 /* format is same for both registers */
528 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
530 reg_idx
= I40E_VPINT_LNKLSTN(((msix_vf
- 1) *
533 reg
= (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
|
534 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
);
535 wr32(hw
, reg_idx
, reg
);
538 /* reset some of the state varibles keeping
539 * track of the resources
541 vf
->num_queue_pairs
= 0;
547 * @vf: pointer to the vf info
549 * allocate vf resources
551 static int i40e_alloc_vf_res(struct i40e_vf
*vf
)
553 struct i40e_pf
*pf
= vf
->pf
;
554 int total_queue_pairs
= 0;
557 /* allocate hw vsi context & associated resources */
558 ret
= i40e_alloc_vsi_res(vf
, I40E_VSI_SRIOV
);
561 total_queue_pairs
+= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
562 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
564 /* store the total qps number for the runtime
567 vf
->num_queue_pairs
= total_queue_pairs
;
569 /* vf is now completely initialized */
570 set_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
);
574 i40e_free_vf_res(vf
);
579 #define VF_DEVICE_STATUS 0xAA
580 #define VF_TRANS_PENDING_MASK 0x20
582 * i40e_quiesce_vf_pci
583 * @vf: pointer to the vf structure
585 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
586 * if the transactions never clear.
588 static int i40e_quiesce_vf_pci(struct i40e_vf
*vf
)
590 struct i40e_pf
*pf
= vf
->pf
;
591 struct i40e_hw
*hw
= &pf
->hw
;
595 reg
= rd32(hw
, I40E_PF_VT_PFALLOC
);
596 vf_abs_id
= vf
->vf_id
+ (reg
& I40E_PF_VT_PFALLOC_FIRSTVF_MASK
);
598 wr32(hw
, I40E_PF_PCI_CIAA
,
599 VF_DEVICE_STATUS
| (vf_abs_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
600 for (i
= 0; i
< 100; i
++) {
601 reg
= rd32(hw
, I40E_PF_PCI_CIAD
);
602 if ((reg
& VF_TRANS_PENDING_MASK
) == 0)
611 * @vf: pointer to the vf structure
612 * @flr: VFLR was issued or not
616 void i40e_reset_vf(struct i40e_vf
*vf
, bool flr
)
618 struct i40e_pf
*pf
= vf
->pf
;
619 struct i40e_hw
*hw
= &pf
->hw
;
625 clear_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
627 /* In the case of a VFLR, the HW has already reset the VF and we
628 * just need to clean up, so don't hit the VFRTRIG register.
631 /* reset vf using VPGEN_VFRTRIG reg */
632 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
633 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
634 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
638 if (i40e_quiesce_vf_pci(vf
))
639 dev_err(&pf
->pdev
->dev
, "VF %d PCI transactions stuck\n",
642 /* poll VPGEN_VFRSTAT reg to make sure
643 * that reset is complete
645 for (i
= 0; i
< 100; i
++) {
646 /* vf reset requires driver to first reset the
647 * vf & than poll the status register to make sure
648 * that the requested op was completed
652 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
653 if (reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
) {
660 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
662 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_COMPLETED
);
663 /* clear the reset bit in the VPGEN_VFRTRIG reg */
664 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
665 reg
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
666 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
668 /* On initial reset, we won't have any queues */
669 if (vf
->lan_vsi_index
== 0)
672 i40e_vsi_control_rings(pf
->vsi
[vf
->lan_vsi_index
], false);
674 /* reallocate vf resources to reset the VSI state */
675 i40e_free_vf_res(vf
);
677 i40e_alloc_vf_res(vf
);
678 i40e_enable_vf_mappings(vf
);
680 /* tell the VF the reset is done */
681 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_VFACTIVE
);
686 * i40e_vfs_are_assigned
687 * @pf: pointer to the pf structure
689 * Determine if any VFs are assigned to VMs
691 static bool i40e_vfs_are_assigned(struct i40e_pf
*pf
)
693 struct pci_dev
*pdev
= pf
->pdev
;
694 struct pci_dev
*vfdev
;
696 /* loop through all the VFs to see if we own any that are assigned */
697 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, I40E_VF_DEVICE_ID
, NULL
);
699 /* if we don't own it we don't care */
700 if (vfdev
->is_virtfn
&& pci_physfn(vfdev
) == pdev
) {
701 /* if it is assigned we cannot release it */
702 if (vfdev
->dev_flags
& PCI_DEV_FLAGS_ASSIGNED
)
706 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
,
716 * @pf: pointer to the pf structure
720 void i40e_free_vfs(struct i40e_pf
*pf
)
722 struct i40e_hw
*hw
= &pf
->hw
;
728 /* Disable interrupt 0 so we don't try to handle the VFLR. */
729 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
732 /* free up vf resources */
733 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
734 if (test_bit(I40E_VF_STAT_INIT
, &pf
->vf
[i
].vf_states
))
735 i40e_free_vf_res(&pf
->vf
[i
]);
736 /* disable qp mappings */
737 i40e_disable_vf_mappings(&pf
->vf
[i
]);
742 pf
->num_alloc_vfs
= 0;
744 if (!i40e_vfs_are_assigned(pf
))
745 pci_disable_sriov(pf
->pdev
);
747 dev_warn(&pf
->pdev
->dev
,
748 "unable to disable SR-IOV because VFs are assigned.\n");
750 /* Re-enable interrupt 0. */
751 wr32(hw
, I40E_PFINT_DYN_CTL0
,
752 I40E_PFINT_DYN_CTL0_INTENA_MASK
|
753 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK
|
754 (I40E_ITR_NONE
<< I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT
));
758 #ifdef CONFIG_PCI_IOV
761 * @pf: pointer to the pf structure
762 * @num_alloc_vfs: number of vfs to allocate
764 * allocate vf resources
766 static int i40e_alloc_vfs(struct i40e_pf
*pf
, u16 num_alloc_vfs
)
771 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
773 dev_err(&pf
->pdev
->dev
,
774 "pci_enable_sriov failed with error %d!\n", ret
);
775 pf
->num_alloc_vfs
= 0;
779 /* allocate memory */
780 vfs
= kzalloc(num_alloc_vfs
* sizeof(struct i40e_vf
), GFP_KERNEL
);
786 /* apply default profile */
787 for (i
= 0; i
< num_alloc_vfs
; i
++) {
789 vfs
[i
].parent_type
= I40E_SWITCH_ELEMENT_TYPE_VEB
;
792 /* assign default capabilities */
793 set_bit(I40E_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
794 /* vf resources get allocated during reset */
795 i40e_reset_vf(&vfs
[i
], false);
797 /* enable vf vplan_qtable mappings */
798 i40e_enable_vf_mappings(&vfs
[i
]);
801 pf
->num_alloc_vfs
= num_alloc_vfs
;
812 * i40e_pci_sriov_enable
813 * @pdev: pointer to a pci_dev structure
814 * @num_vfs: number of vfs to allocate
816 * Enable or change the number of VFs
818 static int i40e_pci_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
820 #ifdef CONFIG_PCI_IOV
821 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
822 int pre_existing_vfs
= pci_num_vf(pdev
);
825 dev_info(&pdev
->dev
, "Allocating %d VFs.\n", num_vfs
);
826 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
828 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
831 if (num_vfs
> pf
->num_req_vfs
) {
836 err
= i40e_alloc_vfs(pf
, num_vfs
);
838 dev_warn(&pdev
->dev
, "Failed to enable SR-IOV: %d\n", err
);
852 * i40e_pci_sriov_configure
853 * @pdev: pointer to a pci_dev structure
854 * @num_vfs: number of vfs to allocate
856 * Enable or change the number of VFs. Called when the user updates the number
859 int i40e_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
861 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
864 return i40e_pci_sriov_enable(pdev
, num_vfs
);
870 /***********************virtual channel routines******************/
873 * i40e_vc_send_msg_to_vf
874 * @vf: pointer to the vf info
875 * @v_opcode: virtual channel opcode
876 * @v_retval: virtual channel return value
877 * @msg: pointer to the msg buffer
878 * @msglen: msg length
882 static int i40e_vc_send_msg_to_vf(struct i40e_vf
*vf
, u32 v_opcode
,
883 u32 v_retval
, u8
*msg
, u16 msglen
)
885 struct i40e_pf
*pf
= vf
->pf
;
886 struct i40e_hw
*hw
= &pf
->hw
;
889 /* single place to detect unsuccessful return values */
891 vf
->num_invalid_msgs
++;
892 dev_err(&pf
->pdev
->dev
, "Failed opcode %d Error: %d\n",
894 if (vf
->num_invalid_msgs
>
895 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED
) {
896 dev_err(&pf
->pdev
->dev
,
897 "Number of invalid messages exceeded for VF %d\n",
899 dev_err(&pf
->pdev
->dev
, "Use PF Control I/F to enable the VF\n");
900 set_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
);
903 vf
->num_valid_msgs
++;
906 aq_ret
= i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, v_opcode
, v_retval
,
909 dev_err(&pf
->pdev
->dev
,
910 "Unable to send the message to VF %d aq_err %d\n",
911 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
919 * i40e_vc_send_resp_to_vf
920 * @vf: pointer to the vf info
921 * @opcode: operation code
922 * @retval: return value
924 * send resp msg to vf
926 static int i40e_vc_send_resp_to_vf(struct i40e_vf
*vf
,
927 enum i40e_virtchnl_ops opcode
,
930 return i40e_vc_send_msg_to_vf(vf
, opcode
, retval
, NULL
, 0);
934 * i40e_vc_get_version_msg
935 * @vf: pointer to the vf info
937 * called from the vf to request the API version used by the PF
939 static int i40e_vc_get_version_msg(struct i40e_vf
*vf
)
941 struct i40e_virtchnl_version_info info
= {
942 I40E_VIRTCHNL_VERSION_MAJOR
, I40E_VIRTCHNL_VERSION_MINOR
945 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_VERSION
,
946 I40E_SUCCESS
, (u8
*)&info
,
948 i40e_virtchnl_version_info
));
952 * i40e_vc_get_vf_resources_msg
953 * @vf: pointer to the vf info
954 * @msg: pointer to the msg buffer
955 * @msglen: msg length
957 * called from the vf to request its resources
959 static int i40e_vc_get_vf_resources_msg(struct i40e_vf
*vf
)
961 struct i40e_virtchnl_vf_resource
*vfres
= NULL
;
962 struct i40e_pf
*pf
= vf
->pf
;
963 i40e_status aq_ret
= 0;
964 struct i40e_vsi
*vsi
;
969 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
970 aq_ret
= I40E_ERR_PARAM
;
974 len
= (sizeof(struct i40e_virtchnl_vf_resource
) +
975 sizeof(struct i40e_virtchnl_vsi_resource
) * num_vsis
);
977 vfres
= kzalloc(len
, GFP_KERNEL
);
979 aq_ret
= I40E_ERR_NO_MEMORY
;
984 vfres
->vf_offload_flags
= I40E_VIRTCHNL_VF_OFFLOAD_L2
;
985 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
987 vfres
->vf_offload_flags
|= I40E_VIRTCHNL_VF_OFFLOAD_VLAN
;
989 vfres
->num_vsis
= num_vsis
;
990 vfres
->num_queue_pairs
= vf
->num_queue_pairs
;
991 vfres
->max_vectors
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
992 if (vf
->lan_vsi_index
) {
993 vfres
->vsi_res
[i
].vsi_id
= vf
->lan_vsi_index
;
994 vfres
->vsi_res
[i
].vsi_type
= I40E_VSI_SRIOV
;
995 vfres
->vsi_res
[i
].num_queue_pairs
=
996 pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
997 memcpy(vfres
->vsi_res
[i
].default_mac_addr
,
998 vf
->default_lan_addr
.addr
, ETH_ALEN
);
1001 set_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
1004 /* send the response back to the vf */
1005 ret
= i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_VF_RESOURCES
,
1006 aq_ret
, (u8
*)vfres
, len
);
1013 * i40e_vc_reset_vf_msg
1014 * @vf: pointer to the vf info
1015 * @msg: pointer to the msg buffer
1016 * @msglen: msg length
1018 * called from the vf to reset itself,
1019 * unlike other virtchnl messages, pf driver
1020 * doesn't send the response back to the vf
1022 static void i40e_vc_reset_vf_msg(struct i40e_vf
*vf
)
1024 if (test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
))
1025 i40e_reset_vf(vf
, false);
1029 * i40e_vc_config_promiscuous_mode_msg
1030 * @vf: pointer to the vf info
1031 * @msg: pointer to the msg buffer
1032 * @msglen: msg length
1034 * called from the vf to configure the promiscuous mode of
1037 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
*vf
,
1038 u8
*msg
, u16 msglen
)
1040 struct i40e_virtchnl_promisc_info
*info
=
1041 (struct i40e_virtchnl_promisc_info
*)msg
;
1042 struct i40e_pf
*pf
= vf
->pf
;
1043 struct i40e_hw
*hw
= &pf
->hw
;
1044 bool allmulti
= false;
1045 bool promisc
= false;
1048 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1049 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1050 !i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
) ||
1051 (pf
->vsi
[info
->vsi_id
]->type
!= I40E_VSI_FCOE
)) {
1052 aq_ret
= I40E_ERR_PARAM
;
1056 if (info
->flags
& I40E_FLAG_VF_UNICAST_PROMISC
)
1058 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, info
->vsi_id
,
1063 if (info
->flags
& I40E_FLAG_VF_MULTICAST_PROMISC
)
1065 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, info
->vsi_id
,
1069 /* send the response to the vf */
1070 return i40e_vc_send_resp_to_vf(vf
,
1071 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
,
1076 * i40e_vc_config_queues_msg
1077 * @vf: pointer to the vf info
1078 * @msg: pointer to the msg buffer
1079 * @msglen: msg length
1081 * called from the vf to configure the rx/tx
1084 static int i40e_vc_config_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1086 struct i40e_virtchnl_vsi_queue_config_info
*qci
=
1087 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1088 struct i40e_virtchnl_queue_pair_info
*qpi
;
1089 u16 vsi_id
, vsi_queue_id
;
1090 i40e_status aq_ret
= 0;
1093 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1094 aq_ret
= I40E_ERR_PARAM
;
1098 vsi_id
= qci
->vsi_id
;
1099 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1100 aq_ret
= I40E_ERR_PARAM
;
1103 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
1104 qpi
= &qci
->qpair
[i
];
1105 vsi_queue_id
= qpi
->txq
.queue_id
;
1106 if ((qpi
->txq
.vsi_id
!= vsi_id
) ||
1107 (qpi
->rxq
.vsi_id
!= vsi_id
) ||
1108 (qpi
->rxq
.queue_id
!= vsi_queue_id
) ||
1109 !i40e_vc_isvalid_queue_id(vf
, vsi_id
, vsi_queue_id
)) {
1110 aq_ret
= I40E_ERR_PARAM
;
1114 if (i40e_config_vsi_rx_queue(vf
, vsi_id
, vsi_queue_id
,
1116 i40e_config_vsi_tx_queue(vf
, vsi_id
, vsi_queue_id
,
1118 aq_ret
= I40E_ERR_PARAM
;
1124 /* send the response to the vf */
1125 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
1130 * i40e_vc_config_irq_map_msg
1131 * @vf: pointer to the vf info
1132 * @msg: pointer to the msg buffer
1133 * @msglen: msg length
1135 * called from the vf to configure the irq to
1138 static int i40e_vc_config_irq_map_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1140 struct i40e_virtchnl_irq_map_info
*irqmap_info
=
1141 (struct i40e_virtchnl_irq_map_info
*)msg
;
1142 struct i40e_virtchnl_vector_map
*map
;
1143 u16 vsi_id
, vsi_queue_id
, vector_id
;
1144 i40e_status aq_ret
= 0;
1145 unsigned long tempmap
;
1148 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1149 aq_ret
= I40E_ERR_PARAM
;
1153 for (i
= 0; i
< irqmap_info
->num_vectors
; i
++) {
1154 map
= &irqmap_info
->vecmap
[i
];
1156 vector_id
= map
->vector_id
;
1157 vsi_id
= map
->vsi_id
;
1158 /* validate msg params */
1159 if (!i40e_vc_isvalid_vector_id(vf
, vector_id
) ||
1160 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1161 aq_ret
= I40E_ERR_PARAM
;
1165 /* lookout for the invalid queue index */
1166 tempmap
= map
->rxq_map
;
1167 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1168 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1170 aq_ret
= I40E_ERR_PARAM
;
1175 tempmap
= map
->txq_map
;
1176 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1177 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1179 aq_ret
= I40E_ERR_PARAM
;
1184 i40e_config_irq_link_list(vf
, vsi_id
, map
);
1187 /* send the response to the vf */
1188 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
,
1193 * i40e_vc_enable_queues_msg
1194 * @vf: pointer to the vf info
1195 * @msg: pointer to the msg buffer
1196 * @msglen: msg length
1198 * called from the vf to enable all or specific queue(s)
1200 static int i40e_vc_enable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1202 struct i40e_virtchnl_queue_select
*vqs
=
1203 (struct i40e_virtchnl_queue_select
*)msg
;
1204 struct i40e_pf
*pf
= vf
->pf
;
1205 u16 vsi_id
= vqs
->vsi_id
;
1206 i40e_status aq_ret
= 0;
1208 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1209 aq_ret
= I40E_ERR_PARAM
;
1213 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1214 aq_ret
= I40E_ERR_PARAM
;
1218 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1219 aq_ret
= I40E_ERR_PARAM
;
1222 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], true))
1223 aq_ret
= I40E_ERR_TIMEOUT
;
1225 /* send the response to the vf */
1226 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ENABLE_QUEUES
,
1231 * i40e_vc_disable_queues_msg
1232 * @vf: pointer to the vf info
1233 * @msg: pointer to the msg buffer
1234 * @msglen: msg length
1236 * called from the vf to disable all or specific
1239 static int i40e_vc_disable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1241 struct i40e_virtchnl_queue_select
*vqs
=
1242 (struct i40e_virtchnl_queue_select
*)msg
;
1243 struct i40e_pf
*pf
= vf
->pf
;
1244 u16 vsi_id
= vqs
->vsi_id
;
1245 i40e_status aq_ret
= 0;
1247 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1248 aq_ret
= I40E_ERR_PARAM
;
1252 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1253 aq_ret
= I40E_ERR_PARAM
;
1257 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1258 aq_ret
= I40E_ERR_PARAM
;
1261 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], false))
1262 aq_ret
= I40E_ERR_TIMEOUT
;
1265 /* send the response to the vf */
1266 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DISABLE_QUEUES
,
1271 * i40e_vc_get_stats_msg
1272 * @vf: pointer to the vf info
1273 * @msg: pointer to the msg buffer
1274 * @msglen: msg length
1276 * called from the vf to get vsi stats
1278 static int i40e_vc_get_stats_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1280 struct i40e_virtchnl_queue_select
*vqs
=
1281 (struct i40e_virtchnl_queue_select
*)msg
;
1282 struct i40e_pf
*pf
= vf
->pf
;
1283 struct i40e_eth_stats stats
;
1284 i40e_status aq_ret
= 0;
1285 struct i40e_vsi
*vsi
;
1287 memset(&stats
, 0, sizeof(struct i40e_eth_stats
));
1289 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1290 aq_ret
= I40E_ERR_PARAM
;
1294 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1295 aq_ret
= I40E_ERR_PARAM
;
1299 vsi
= pf
->vsi
[vqs
->vsi_id
];
1301 aq_ret
= I40E_ERR_PARAM
;
1304 i40e_update_eth_stats(vsi
);
1305 memcpy(&stats
, &vsi
->eth_stats
, sizeof(struct i40e_eth_stats
));
1308 /* send the response back to the vf */
1309 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_STATS
, aq_ret
,
1310 (u8
*)&stats
, sizeof(stats
));
1314 * i40e_vc_add_mac_addr_msg
1315 * @vf: pointer to the vf info
1316 * @msg: pointer to the msg buffer
1317 * @msglen: msg length
1319 * add guest mac address filter
1321 static int i40e_vc_add_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1323 struct i40e_virtchnl_ether_addr_list
*al
=
1324 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1325 struct i40e_pf
*pf
= vf
->pf
;
1326 struct i40e_vsi
*vsi
= NULL
;
1327 u16 vsi_id
= al
->vsi_id
;
1328 i40e_status aq_ret
= 0;
1331 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1332 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1333 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1334 aq_ret
= I40E_ERR_PARAM
;
1338 for (i
= 0; i
< al
->num_elements
; i
++) {
1339 if (is_broadcast_ether_addr(al
->list
[i
].addr
) ||
1340 is_zero_ether_addr(al
->list
[i
].addr
)) {
1341 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pMAC\n",
1343 aq_ret
= I40E_ERR_PARAM
;
1347 vsi
= pf
->vsi
[vsi_id
];
1349 /* add new addresses to the list */
1350 for (i
= 0; i
< al
->num_elements
; i
++) {
1351 struct i40e_mac_filter
*f
;
1353 f
= i40e_find_mac(vsi
, al
->list
[i
].addr
, true, false);
1355 if (i40e_is_vsi_in_vlan(vsi
))
1356 f
= i40e_put_mac_in_vlan(vsi
, al
->list
[i
].addr
,
1359 f
= i40e_add_filter(vsi
, al
->list
[i
].addr
, -1,
1364 dev_err(&pf
->pdev
->dev
,
1365 "Unable to add VF MAC filter\n");
1366 aq_ret
= I40E_ERR_PARAM
;
1371 /* program the updated filter list */
1372 if (i40e_sync_vsi_filters(vsi
))
1373 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1376 /* send the response to the vf */
1377 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
,
1382 * i40e_vc_del_mac_addr_msg
1383 * @vf: pointer to the vf info
1384 * @msg: pointer to the msg buffer
1385 * @msglen: msg length
1387 * remove guest mac address filter
1389 static int i40e_vc_del_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1391 struct i40e_virtchnl_ether_addr_list
*al
=
1392 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1393 struct i40e_pf
*pf
= vf
->pf
;
1394 struct i40e_vsi
*vsi
= NULL
;
1395 u16 vsi_id
= al
->vsi_id
;
1396 i40e_status aq_ret
= 0;
1399 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1400 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1401 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1402 aq_ret
= I40E_ERR_PARAM
;
1405 vsi
= pf
->vsi
[vsi_id
];
1407 /* delete addresses from the list */
1408 for (i
= 0; i
< al
->num_elements
; i
++)
1409 i40e_del_filter(vsi
, al
->list
[i
].addr
,
1410 I40E_VLAN_ANY
, true, false);
1412 /* program the updated filter list */
1413 if (i40e_sync_vsi_filters(vsi
))
1414 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1417 /* send the response to the vf */
1418 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
,
1423 * i40e_vc_add_vlan_msg
1424 * @vf: pointer to the vf info
1425 * @msg: pointer to the msg buffer
1426 * @msglen: msg length
1428 * program guest vlan id
1430 static int i40e_vc_add_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1432 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1433 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1434 struct i40e_pf
*pf
= vf
->pf
;
1435 struct i40e_vsi
*vsi
= NULL
;
1436 u16 vsi_id
= vfl
->vsi_id
;
1437 i40e_status aq_ret
= 0;
1440 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1441 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1442 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1443 aq_ret
= I40E_ERR_PARAM
;
1447 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1448 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1449 aq_ret
= I40E_ERR_PARAM
;
1450 dev_err(&pf
->pdev
->dev
,
1451 "invalid VF VLAN id %d\n", vfl
->vlan_id
[i
]);
1455 vsi
= pf
->vsi
[vsi_id
];
1456 if (vsi
->info
.pvid
) {
1457 aq_ret
= I40E_ERR_PARAM
;
1461 i40e_vlan_stripping_enable(vsi
);
1462 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1463 /* add new VLAN filter */
1464 int ret
= i40e_vsi_add_vlan(vsi
, vfl
->vlan_id
[i
]);
1466 dev_err(&pf
->pdev
->dev
,
1467 "Unable to add VF vlan filter %d, error %d\n",
1468 vfl
->vlan_id
[i
], ret
);
1472 /* send the response to the vf */
1473 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_VLAN
, aq_ret
);
1477 * i40e_vc_remove_vlan_msg
1478 * @vf: pointer to the vf info
1479 * @msg: pointer to the msg buffer
1480 * @msglen: msg length
1482 * remove programmed guest vlan id
1484 static int i40e_vc_remove_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1486 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1487 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1488 struct i40e_pf
*pf
= vf
->pf
;
1489 struct i40e_vsi
*vsi
= NULL
;
1490 u16 vsi_id
= vfl
->vsi_id
;
1491 i40e_status aq_ret
= 0;
1494 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1495 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1496 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1497 aq_ret
= I40E_ERR_PARAM
;
1501 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1502 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1503 aq_ret
= I40E_ERR_PARAM
;
1508 vsi
= pf
->vsi
[vsi_id
];
1509 if (vsi
->info
.pvid
) {
1510 aq_ret
= I40E_ERR_PARAM
;
1514 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1515 int ret
= i40e_vsi_kill_vlan(vsi
, vfl
->vlan_id
[i
]);
1517 dev_err(&pf
->pdev
->dev
,
1518 "Unable to delete VF vlan filter %d, error %d\n",
1519 vfl
->vlan_id
[i
], ret
);
1523 /* send the response to the vf */
1524 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_VLAN
, aq_ret
);
1528 * i40e_vc_validate_vf_msg
1529 * @vf: pointer to the vf info
1530 * @msg: pointer to the msg buffer
1531 * @msglen: msg length
1532 * @msghndl: msg handle
1536 static int i40e_vc_validate_vf_msg(struct i40e_vf
*vf
, u32 v_opcode
,
1537 u32 v_retval
, u8
*msg
, u16 msglen
)
1539 bool err_msg_format
= false;
1542 /* Check if VF is disabled. */
1543 if (test_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
))
1544 return I40E_ERR_PARAM
;
1546 /* Validate message length. */
1548 case I40E_VIRTCHNL_OP_VERSION
:
1549 valid_len
= sizeof(struct i40e_virtchnl_version_info
);
1551 case I40E_VIRTCHNL_OP_RESET_VF
:
1552 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1555 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
:
1556 valid_len
= sizeof(struct i40e_virtchnl_txq_info
);
1558 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
:
1559 valid_len
= sizeof(struct i40e_virtchnl_rxq_info
);
1561 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1562 valid_len
= sizeof(struct i40e_virtchnl_vsi_queue_config_info
);
1563 if (msglen
>= valid_len
) {
1564 struct i40e_virtchnl_vsi_queue_config_info
*vqc
=
1565 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1566 valid_len
+= (vqc
->num_queue_pairs
*
1568 i40e_virtchnl_queue_pair_info
));
1569 if (vqc
->num_queue_pairs
== 0)
1570 err_msg_format
= true;
1573 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1574 valid_len
= sizeof(struct i40e_virtchnl_irq_map_info
);
1575 if (msglen
>= valid_len
) {
1576 struct i40e_virtchnl_irq_map_info
*vimi
=
1577 (struct i40e_virtchnl_irq_map_info
*)msg
;
1578 valid_len
+= (vimi
->num_vectors
*
1579 sizeof(struct i40e_virtchnl_vector_map
));
1580 if (vimi
->num_vectors
== 0)
1581 err_msg_format
= true;
1584 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1585 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1586 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1588 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1589 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1590 valid_len
= sizeof(struct i40e_virtchnl_ether_addr_list
);
1591 if (msglen
>= valid_len
) {
1592 struct i40e_virtchnl_ether_addr_list
*veal
=
1593 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1594 valid_len
+= veal
->num_elements
*
1595 sizeof(struct i40e_virtchnl_ether_addr
);
1596 if (veal
->num_elements
== 0)
1597 err_msg_format
= true;
1600 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1601 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1602 valid_len
= sizeof(struct i40e_virtchnl_vlan_filter_list
);
1603 if (msglen
>= valid_len
) {
1604 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1605 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1606 valid_len
+= vfl
->num_elements
* sizeof(u16
);
1607 if (vfl
->num_elements
== 0)
1608 err_msg_format
= true;
1611 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1612 valid_len
= sizeof(struct i40e_virtchnl_promisc_info
);
1614 case I40E_VIRTCHNL_OP_GET_STATS
:
1615 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1617 /* These are always errors coming from the VF. */
1618 case I40E_VIRTCHNL_OP_EVENT
:
1619 case I40E_VIRTCHNL_OP_UNKNOWN
:
1624 /* few more checks */
1625 if ((valid_len
!= msglen
) || (err_msg_format
)) {
1626 i40e_vc_send_resp_to_vf(vf
, v_opcode
, I40E_ERR_PARAM
);
1634 * i40e_vc_process_vf_msg
1635 * @pf: pointer to the pf structure
1636 * @vf_id: source vf id
1637 * @msg: pointer to the msg buffer
1638 * @msglen: msg length
1639 * @msghndl: msg handle
1641 * called from the common aeq/arq handler to
1642 * process request from vf
1644 int i40e_vc_process_vf_msg(struct i40e_pf
*pf
, u16 vf_id
, u32 v_opcode
,
1645 u32 v_retval
, u8
*msg
, u16 msglen
)
1647 struct i40e_vf
*vf
= &(pf
->vf
[vf_id
]);
1648 struct i40e_hw
*hw
= &pf
->hw
;
1651 pf
->vf_aq_requests
++;
1652 /* perform basic checks on the msg */
1653 ret
= i40e_vc_validate_vf_msg(vf
, v_opcode
, v_retval
, msg
, msglen
);
1656 dev_err(&pf
->pdev
->dev
, "invalid message from vf %d\n", vf_id
);
1659 wr32(hw
, I40E_VFGEN_RSTAT1(vf_id
), I40E_VFR_VFACTIVE
);
1661 case I40E_VIRTCHNL_OP_VERSION
:
1662 ret
= i40e_vc_get_version_msg(vf
);
1664 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1665 ret
= i40e_vc_get_vf_resources_msg(vf
);
1667 case I40E_VIRTCHNL_OP_RESET_VF
:
1668 i40e_vc_reset_vf_msg(vf
);
1671 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1672 ret
= i40e_vc_config_promiscuous_mode_msg(vf
, msg
, msglen
);
1674 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1675 ret
= i40e_vc_config_queues_msg(vf
, msg
, msglen
);
1677 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1678 ret
= i40e_vc_config_irq_map_msg(vf
, msg
, msglen
);
1680 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1681 ret
= i40e_vc_enable_queues_msg(vf
, msg
, msglen
);
1683 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1684 ret
= i40e_vc_disable_queues_msg(vf
, msg
, msglen
);
1686 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1687 ret
= i40e_vc_add_mac_addr_msg(vf
, msg
, msglen
);
1689 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1690 ret
= i40e_vc_del_mac_addr_msg(vf
, msg
, msglen
);
1692 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1693 ret
= i40e_vc_add_vlan_msg(vf
, msg
, msglen
);
1695 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1696 ret
= i40e_vc_remove_vlan_msg(vf
, msg
, msglen
);
1698 case I40E_VIRTCHNL_OP_GET_STATS
:
1699 ret
= i40e_vc_get_stats_msg(vf
, msg
, msglen
);
1701 case I40E_VIRTCHNL_OP_UNKNOWN
:
1703 dev_err(&pf
->pdev
->dev
,
1704 "Unsupported opcode %d from vf %d\n", v_opcode
, vf_id
);
1705 ret
= i40e_vc_send_resp_to_vf(vf
, v_opcode
,
1706 I40E_ERR_NOT_IMPLEMENTED
);
1714 * i40e_vc_process_vflr_event
1715 * @pf: pointer to the pf structure
1717 * called from the vlfr irq handler to
1718 * free up vf resources and state variables
1720 int i40e_vc_process_vflr_event(struct i40e_pf
*pf
)
1722 u32 reg
, reg_idx
, bit_idx
, vf_id
;
1723 struct i40e_hw
*hw
= &pf
->hw
;
1726 if (!test_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
))
1729 clear_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
);
1730 for (vf_id
= 0; vf_id
< pf
->num_alloc_vfs
; vf_id
++) {
1731 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1732 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1733 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1734 vf
= &pf
->vf
[vf_id
];
1735 reg
= rd32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
));
1736 if (reg
& (1 << bit_idx
)) {
1737 /* clear the bit in GLGEN_VFLRSTAT */
1738 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
1740 i40e_reset_vf(vf
, true);
1744 /* re-enable vflr interrupt cause */
1745 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
1746 reg
|= I40E_PFINT_ICR0_ENA_VFLR_MASK
;
1747 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
1754 * i40e_vc_vf_broadcast
1755 * @pf: pointer to the pf structure
1756 * @opcode: operation code
1757 * @retval: return value
1758 * @msg: pointer to the msg buffer
1759 * @msglen: msg length
1761 * send a message to all VFs on a given PF
1763 static void i40e_vc_vf_broadcast(struct i40e_pf
*pf
,
1764 enum i40e_virtchnl_ops v_opcode
,
1765 i40e_status v_retval
, u8
*msg
,
1768 struct i40e_hw
*hw
= &pf
->hw
;
1769 struct i40e_vf
*vf
= pf
->vf
;
1772 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1773 /* Ignore return value on purpose - a given VF may fail, but
1774 * we need to keep going and send to all of them
1776 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, v_opcode
, v_retval
,
1783 * i40e_vc_notify_link_state
1784 * @pf: pointer to the pf structure
1786 * send a link status message to all VFs on a given PF
1788 void i40e_vc_notify_link_state(struct i40e_pf
*pf
)
1790 struct i40e_virtchnl_pf_event pfe
;
1792 pfe
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
1793 pfe
.severity
= I40E_PF_EVENT_SEVERITY_INFO
;
1794 pfe
.event_data
.link_event
.link_status
=
1795 pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
;
1796 pfe
.event_data
.link_event
.link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
1798 i40e_vc_vf_broadcast(pf
, I40E_VIRTCHNL_OP_EVENT
, I40E_SUCCESS
,
1799 (u8
*)&pfe
, sizeof(struct i40e_virtchnl_pf_event
));
1803 * i40e_vc_notify_reset
1804 * @pf: pointer to the pf structure
1806 * indicate a pending reset to all VFs on a given PF
1808 void i40e_vc_notify_reset(struct i40e_pf
*pf
)
1810 struct i40e_virtchnl_pf_event pfe
;
1812 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
1813 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1814 i40e_vc_vf_broadcast(pf
, I40E_VIRTCHNL_OP_EVENT
, I40E_SUCCESS
,
1815 (u8
*)&pfe
, sizeof(struct i40e_virtchnl_pf_event
));
1819 * i40e_vc_notify_vf_reset
1820 * @vf: pointer to the vf structure
1822 * indicate a pending reset to the given VF
1824 void i40e_vc_notify_vf_reset(struct i40e_vf
*vf
)
1826 struct i40e_virtchnl_pf_event pfe
;
1828 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
1829 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1830 i40e_aq_send_msg_to_vf(&vf
->pf
->hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
1831 I40E_SUCCESS
, (u8
*)&pfe
,
1832 sizeof(struct i40e_virtchnl_pf_event
), NULL
);
1836 * i40e_ndo_set_vf_mac
1837 * @netdev: network interface device structure
1838 * @vf_id: vf identifier
1841 * program vf mac address
1843 int i40e_ndo_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
1845 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1846 struct i40e_vsi
*vsi
= np
->vsi
;
1847 struct i40e_pf
*pf
= vsi
->back
;
1848 struct i40e_mac_filter
*f
;
1852 /* validate the request */
1853 if (vf_id
>= pf
->num_alloc_vfs
) {
1854 dev_err(&pf
->pdev
->dev
,
1855 "Invalid VF Identifier %d\n", vf_id
);
1860 vf
= &(pf
->vf
[vf_id
]);
1861 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
1862 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
1863 dev_err(&pf
->pdev
->dev
,
1864 "Uninitialized VF %d\n", vf_id
);
1869 if (!is_valid_ether_addr(mac
)) {
1870 dev_err(&pf
->pdev
->dev
,
1871 "Invalid VF ethernet address\n");
1876 /* delete the temporary mac address */
1877 i40e_del_filter(vsi
, vf
->default_lan_addr
.addr
, 0, true, false);
1879 /* add the new mac address */
1880 f
= i40e_add_filter(vsi
, mac
, 0, true, false);
1882 dev_err(&pf
->pdev
->dev
,
1883 "Unable to add VF ucast filter\n");
1888 dev_info(&pf
->pdev
->dev
, "Setting MAC %pM on VF %d\n", mac
, vf_id
);
1889 /* program mac filter */
1890 if (i40e_sync_vsi_filters(vsi
)) {
1891 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
1895 memcpy(vf
->default_lan_addr
.addr
, mac
, ETH_ALEN
);
1896 dev_info(&pf
->pdev
->dev
, "Reload the VF driver to make this change effective.\n");
1904 * i40e_ndo_set_vf_port_vlan
1905 * @netdev: network interface device structure
1906 * @vf_id: vf identifier
1907 * @vlan_id: mac address
1908 * @qos: priority setting
1910 * program vf vlan id and/or qos
1912 int i40e_ndo_set_vf_port_vlan(struct net_device
*netdev
,
1913 int vf_id
, u16 vlan_id
, u8 qos
)
1915 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1916 struct i40e_pf
*pf
= np
->vsi
->back
;
1917 struct i40e_vsi
*vsi
;
1921 /* validate the request */
1922 if (vf_id
>= pf
->num_alloc_vfs
) {
1923 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
1928 if ((vlan_id
> I40E_MAX_VLANID
) || (qos
> 7)) {
1929 dev_err(&pf
->pdev
->dev
, "Invalid VF Parameters\n");
1934 vf
= &(pf
->vf
[vf_id
]);
1935 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
1936 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
1937 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
1942 if (vsi
->info
.pvid
) {
1944 ret
= i40e_vsi_kill_vlan(vsi
, (le16_to_cpu(vsi
->info
.pvid
) &
1947 dev_info(&vsi
->back
->pdev
->dev
,
1948 "remove VLAN failed, ret=%d, aq_err=%d\n",
1949 ret
, pf
->hw
.aq
.asq_last_status
);
1953 ret
= i40e_vsi_add_pvid(vsi
,
1954 vlan_id
| (qos
<< I40E_VLAN_PRIORITY_SHIFT
));
1956 i40e_vlan_stripping_disable(vsi
);
1959 dev_info(&pf
->pdev
->dev
, "Setting VLAN %d, QOS 0x%x on VF %d\n",
1960 vlan_id
, qos
, vf_id
);
1962 /* add new VLAN filter */
1963 ret
= i40e_vsi_add_vlan(vsi
, vlan_id
);
1965 dev_info(&vsi
->back
->pdev
->dev
,
1966 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
1967 vsi
->back
->hw
.aq
.asq_last_status
);
1973 dev_err(&pf
->pdev
->dev
, "Unable to update VF vsi context\n");
1983 * i40e_ndo_set_vf_bw
1984 * @netdev: network interface device structure
1985 * @vf_id: vf identifier
1988 * configure vf tx rate
1990 int i40e_ndo_set_vf_bw(struct net_device
*netdev
, int vf_id
, int tx_rate
)
1996 * i40e_ndo_get_vf_config
1997 * @netdev: network interface device structure
1998 * @vf_id: vf identifier
1999 * @ivi: vf configuration structure
2001 * return vf configuration
2003 int i40e_ndo_get_vf_config(struct net_device
*netdev
,
2004 int vf_id
, struct ifla_vf_info
*ivi
)
2006 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2007 struct i40e_mac_filter
*f
, *ftmp
;
2008 struct i40e_vsi
*vsi
= np
->vsi
;
2009 struct i40e_pf
*pf
= vsi
->back
;
2013 /* validate the request */
2014 if (vf_id
>= pf
->num_alloc_vfs
) {
2015 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2020 vf
= &(pf
->vf
[vf_id
]);
2021 /* first vsi is always the LAN vsi */
2022 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2023 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2024 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2031 /* first entry of the list is the default ethernet address */
2032 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
2033 memcpy(&ivi
->mac
, f
->macaddr
, I40E_ETH_LENGTH_OF_ADDRESS
);
2038 ivi
->vlan
= le16_to_cpu(vsi
->info
.pvid
) & I40E_VLAN_MASK
;
2039 ivi
->qos
= (le16_to_cpu(vsi
->info
.pvid
) & I40E_PRIORITY_MASK
) >>
2040 I40E_VLAN_PRIORITY_SHIFT
;