1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/etherdevice.h>
10 #include <linux/crc32.h>
11 #include <linux/qed/qed_iov_if.h>
15 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
24 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
,
25 u32 concrete_vfid
, u16 opaque_vfid
)
27 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
28 struct qed_spq_entry
*p_ent
= NULL
;
29 struct qed_sp_init_data init_data
;
33 memset(&init_data
, 0, sizeof(init_data
));
34 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
35 init_data
.opaque_fid
= opaque_vfid
;
36 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
38 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
39 COMMON_RAMROD_VF_START
,
40 PROTOCOLID_COMMON
, &init_data
);
44 p_ramrod
= &p_ent
->ramrod
.vf_start
;
46 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
47 p_ramrod
->opaque_fid
= cpu_to_le16(opaque_vfid
);
49 p_ramrod
->personality
= PERSONALITY_ETH
;
51 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
54 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
55 u32 concrete_vfid
, u16 opaque_vfid
)
57 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
58 struct qed_spq_entry
*p_ent
= NULL
;
59 struct qed_sp_init_data init_data
;
63 memset(&init_data
, 0, sizeof(init_data
));
64 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
65 init_data
.opaque_fid
= opaque_vfid
;
66 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
68 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
69 COMMON_RAMROD_VF_STOP
,
70 PROTOCOLID_COMMON
, &init_data
);
74 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
76 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
78 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
81 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
82 int rel_vf_id
, bool b_enabled_only
)
84 if (!p_hwfn
->pf_iov_info
) {
85 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
89 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
93 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
100 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
104 struct qed_vf_info
*vf
= NULL
;
106 if (!p_hwfn
->pf_iov_info
) {
107 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
111 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
, b_enabled_only
))
112 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
114 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
120 int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
121 int vfid
, struct qed_ptt
*p_ptt
)
123 struct qed_bulletin_content
*p_bulletin
;
124 int crc_size
= sizeof(p_bulletin
->crc
);
125 struct qed_dmae_params params
;
126 struct qed_vf_info
*p_vf
;
128 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
132 if (!p_vf
->vf_bulletin
)
135 p_bulletin
= p_vf
->bulletin
.p_virt
;
137 /* Increment bulletin board version and compute crc */
138 p_bulletin
->version
++;
139 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
140 p_vf
->bulletin
.size
- crc_size
);
142 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
143 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
144 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
146 /* propagate bulletin board via dmae to vm memory */
147 memset(¶ms
, 0, sizeof(params
));
148 params
.flags
= QED_DMAE_FLAG_VF_DST
;
149 params
.dst_vfid
= p_vf
->abs_vf_id
;
150 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
151 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
155 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
157 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
160 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
161 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
163 pci_read_config_word(cdev
->pdev
,
164 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
165 pci_read_config_word(cdev
->pdev
,
166 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
168 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
172 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
176 pci_read_config_word(cdev
->pdev
,
177 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
179 pci_read_config_word(cdev
->pdev
,
180 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
182 pci_read_config_word(cdev
->pdev
,
183 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
185 pci_read_config_dword(cdev
->pdev
,
186 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
188 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
190 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
194 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
200 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
202 /* Some sanity checks */
203 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
204 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
205 /* This can happen only due to a bug. In this case we set
206 * num_vfs to zero to avoid memory corruption in the code that
207 * assumes max number of vfs
210 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
220 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn
*p_hwfn
,
221 struct qed_ptt
*p_ptt
)
223 struct qed_igu_block
*p_sb
;
227 if (!p_hwfn
->hw_info
.p_igu_info
) {
229 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
233 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
235 p_sb
= &p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
[sb_id
];
236 if ((p_sb
->status
& QED_IGU_STATUS_FREE
) &&
237 !(p_sb
->status
& QED_IGU_STATUS_PF
)) {
238 val
= qed_rd(p_hwfn
, p_ptt
,
239 IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
240 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
241 qed_wr(p_hwfn
, p_ptt
,
242 IGU_REG_MAPPING_MEMORY
+ 4 * sb_id
, val
);
247 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
249 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
250 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
251 struct qed_bulletin_content
*p_bulletin_virt
;
252 dma_addr_t req_p
, rply_p
, bulletin_p
;
253 union pfvf_tlvs
*p_reply_virt_addr
;
254 union vfpf_tlvs
*p_req_virt_addr
;
257 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
259 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
260 req_p
= p_iov_info
->mbx_msg_phys_addr
;
261 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
262 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
263 p_bulletin_virt
= p_iov_info
->p_bulletins
;
264 bulletin_p
= p_iov_info
->bulletins_phys
;
265 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
267 "qed_iov_setup_vfdb called without allocating mem first\n");
271 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
272 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
275 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
276 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
277 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
278 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
280 vf
->state
= VF_STOPPED
;
283 vf
->bulletin
.phys
= idx
*
284 sizeof(struct qed_bulletin_content
) +
286 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
287 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
289 vf
->relative_vf_id
= idx
;
290 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
291 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
292 vf
->concrete_fid
= concrete
;
293 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
294 (vf
->abs_vf_id
<< 8);
295 vf
->vport_id
= idx
+ 1;
299 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
301 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
305 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
307 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
308 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
310 /* Allocate PF Mailbox buffer (per-VF) */
311 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
312 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
313 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
314 p_iov_info
->mbx_msg_size
,
315 &p_iov_info
->mbx_msg_phys_addr
,
320 /* Allocate PF Mailbox Reply buffer (per-VF) */
321 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
322 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
323 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
324 p_iov_info
->mbx_reply_size
,
325 &p_iov_info
->mbx_reply_phys_addr
,
330 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
332 p_v_addr
= &p_iov_info
->p_bulletins
;
333 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
334 p_iov_info
->bulletins_size
,
335 &p_iov_info
->bulletins_phys
,
342 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
343 p_iov_info
->mbx_msg_virt_addr
,
344 (u64
) p_iov_info
->mbx_msg_phys_addr
,
345 p_iov_info
->mbx_reply_virt_addr
,
346 (u64
) p_iov_info
->mbx_reply_phys_addr
,
347 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
352 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
354 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
356 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
357 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
358 p_iov_info
->mbx_msg_size
,
359 p_iov_info
->mbx_msg_virt_addr
,
360 p_iov_info
->mbx_msg_phys_addr
);
362 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
363 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
364 p_iov_info
->mbx_reply_size
,
365 p_iov_info
->mbx_reply_virt_addr
,
366 p_iov_info
->mbx_reply_phys_addr
);
368 if (p_iov_info
->p_bulletins
)
369 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
370 p_iov_info
->bulletins_size
,
371 p_iov_info
->p_bulletins
,
372 p_iov_info
->bulletins_phys
);
375 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
377 struct qed_pf_iov
*p_sriov
;
379 if (!IS_PF_SRIOV(p_hwfn
)) {
380 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
381 "No SR-IOV - no need for IOV db\n");
385 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
387 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_sriov'\n");
391 p_hwfn
->pf_iov_info
= p_sriov
;
393 return qed_iov_allocate_vfdb(p_hwfn
);
396 void qed_iov_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
398 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
401 qed_iov_setup_vfdb(p_hwfn
);
402 qed_iov_clear_vf_igu_blocks(p_hwfn
, p_ptt
);
405 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
407 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
408 qed_iov_free_vfdb(p_hwfn
);
409 kfree(p_hwfn
->pf_iov_info
);
413 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
415 kfree(cdev
->p_iov_info
);
416 cdev
->p_iov_info
= NULL
;
419 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
421 struct qed_dev
*cdev
= p_hwfn
->cdev
;
425 if (IS_VF(p_hwfn
->cdev
))
428 /* Learn the PCI configuration */
429 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
430 PCI_EXT_CAP_ID_SRIOV
);
432 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
436 /* Allocate a new struct for IOV information */
437 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
438 if (!cdev
->p_iov_info
) {
439 DP_NOTICE(p_hwfn
, "Can't support IOV due to lack of memory\n");
442 cdev
->p_iov_info
->pos
= pos
;
444 rc
= qed_iov_pci_cfg_info(cdev
);
448 /* We want PF IOV to be synonemous with the existance of p_iov_info;
449 * In case the capability is published but there are no VFs, simply
450 * de-allocate the struct.
452 if (!cdev
->p_iov_info
->total_vfs
) {
453 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
454 "IOV capabilities, but no VFs are published\n");
455 kfree(cdev
->p_iov_info
);
456 cdev
->p_iov_info
= NULL
;
460 /* Calculate the first VF index - this is a bit tricky; Basically,
461 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
462 * after the first engine's VFs.
464 cdev
->p_iov_info
->first_vf_in_pf
= p_hwfn
->cdev
->p_iov_info
->offset
+
465 p_hwfn
->abs_pf_id
- 16;
466 if (QED_PATH_ID(p_hwfn
))
467 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
469 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
470 "First VF in hwfn 0x%08x\n",
471 cdev
->p_iov_info
->first_vf_in_pf
);
476 static bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
478 /* Check PF supports sriov */
479 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
480 !IS_PF_SRIOV_ALLOC(p_hwfn
))
483 /* Check VF validity */
484 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true))
490 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
491 u16 rel_vf_id
, u8 to_disable
)
493 struct qed_vf_info
*vf
;
496 for_each_hwfn(cdev
, i
) {
497 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
499 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
503 vf
->to_disable
= to_disable
;
507 void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
511 if (!IS_QED_SRIOV(cdev
))
514 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
515 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
518 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
519 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
521 qed_wr(p_hwfn
, p_ptt
,
522 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
523 1 << (abs_vfid
& 0x1f));
526 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
527 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
531 /* Set VF masks and configuration - pretend */
532 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
534 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
537 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
539 /* iterate over all queues, clear sb consumer */
540 for (i
= 0; i
< vf
->num_sbs
; i
++)
541 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
543 vf
->opaque_fid
, true);
546 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
547 struct qed_ptt
*p_ptt
,
548 struct qed_vf_info
*vf
, bool enable
)
552 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
554 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
557 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
559 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
561 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
564 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
567 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
568 struct qed_ptt
*p_ptt
,
569 struct qed_vf_info
*vf
)
571 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
579 "Enable internal access for vf %x [abs %x]\n",
580 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
582 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
584 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
586 rc
= qed_mcp_config_vf_msix(p_hwfn
, p_ptt
, vf
->abs_vf_id
, vf
->num_sbs
);
590 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
592 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
593 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
595 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
596 p_hwfn
->hw_info
.hw_mode
);
599 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
601 if (vf
->state
!= VF_STOPPED
) {
602 DP_NOTICE(p_hwfn
, "VF[%02x] is already started\n",
608 rc
= qed_sp_vf_start(p_hwfn
, vf
->concrete_fid
, vf
->opaque_fid
);
610 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
618 * @brief qed_iov_config_perm_table - configure the permission
620 * In E4, queue zone permission table size is 320x9. There
621 * are 320 VF queues for single engine device (256 for dual
622 * engine device), and each entry has the following format:
629 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
630 struct qed_ptt
*p_ptt
,
631 struct qed_vf_info
*vf
, u8 enable
)
637 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
638 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
641 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
642 val
= enable
? (vf
->abs_vf_id
| (1 << 8)) : 0;
643 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
647 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
648 struct qed_ptt
*p_ptt
,
649 struct qed_vf_info
*vf
)
651 /* Reset vf in IGU - interrupts are still disabled */
652 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
654 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
656 /* Permission Table */
657 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
660 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
661 struct qed_ptt
*p_ptt
,
662 struct qed_vf_info
*vf
, u16 num_rx_queues
)
664 struct qed_igu_block
*igu_blocks
;
665 int qid
= 0, igu_id
= 0;
668 igu_blocks
= p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
;
670 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->free_blks
)
671 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->free_blks
;
672 p_hwfn
->hw_info
.p_igu_info
->free_blks
-= num_rx_queues
;
674 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
675 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
676 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
678 while ((qid
< num_rx_queues
) &&
679 (igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
))) {
680 if (igu_blocks
[igu_id
].status
& QED_IGU_STATUS_FREE
) {
681 struct cau_sb_entry sb_entry
;
683 vf
->igu_sbs
[qid
] = (u16
)igu_id
;
684 igu_blocks
[igu_id
].status
&= ~QED_IGU_STATUS_FREE
;
686 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
688 qed_wr(p_hwfn
, p_ptt
,
689 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
,
692 /* Configure igu sb in CAU which were marked valid */
693 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
696 qed_dmae_host2grc(p_hwfn
, p_ptt
,
697 (u64
)(uintptr_t)&sb_entry
,
698 CAU_REG_SB_VAR_MEMORY
+
699 igu_id
* sizeof(u64
), 2, 0);
705 vf
->num_sbs
= (u8
) num_rx_queues
;
710 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
711 struct qed_ptt
*p_ptt
,
712 struct qed_vf_info
*vf
)
714 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
718 /* Invalidate igu CAM lines and mark them as free */
719 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
720 igu_id
= vf
->igu_sbs
[idx
];
721 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
723 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
724 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
725 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
727 p_info
->igu_map
.igu_blocks
[igu_id
].status
|=
730 p_hwfn
->hw_info
.p_igu_info
->free_blks
++;
736 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
737 struct qed_ptt
*p_ptt
,
738 u16 rel_vf_id
, u16 num_rx_queues
)
740 u8 num_of_vf_avaiable_chains
= 0;
741 struct qed_vf_info
*vf
= NULL
;
746 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
748 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
753 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n", rel_vf_id
);
757 /* Limit number of queues according to number of CIDs */
758 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
761 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
762 vf
->relative_vf_id
, num_rx_queues
, (u16
) cids
);
763 num_rx_queues
= min_t(u16
, num_rx_queues
, ((u16
) cids
));
765 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
769 if (!num_of_vf_avaiable_chains
) {
770 DP_ERR(p_hwfn
, "no available igu sbs\n");
774 /* Choose queue number and index ranges */
775 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
776 vf
->num_txqs
= num_of_vf_avaiable_chains
;
778 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
779 u16 queue_id
= qed_int_queue_id_from_sb_id(p_hwfn
,
782 if (queue_id
> RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
784 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
785 vf
->relative_vf_id
, queue_id
);
789 /* CIDs are per-VF, so no problem having them 0-based. */
790 vf
->vf_queues
[i
].fw_rx_qid
= queue_id
;
791 vf
->vf_queues
[i
].fw_tx_qid
= queue_id
;
792 vf
->vf_queues
[i
].fw_cid
= i
;
794 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
795 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
796 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
], queue_id
, i
);
798 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
802 if (IS_LEAD_HWFN(p_hwfn
))
803 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
809 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
811 struct qed_mcp_link_params
*params
,
812 struct qed_mcp_link_state
*link
,
813 struct qed_mcp_link_capabilities
*p_caps
)
815 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
818 struct qed_bulletin_content
*p_bulletin
;
823 p_bulletin
= p_vf
->bulletin
.p_virt
;
824 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
825 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
826 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
827 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
828 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
829 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
830 p_bulletin
->req_loopback
= params
->loopback_mode
;
832 p_bulletin
->link_up
= link
->link_up
;
833 p_bulletin
->speed
= link
->speed
;
834 p_bulletin
->full_duplex
= link
->full_duplex
;
835 p_bulletin
->autoneg
= link
->an
;
836 p_bulletin
->autoneg_complete
= link
->an_complete
;
837 p_bulletin
->parallel_detection
= link
->parallel_detection
;
838 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
839 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
840 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
841 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
842 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
843 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
845 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
848 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
849 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
851 struct qed_mcp_link_capabilities caps
;
852 struct qed_mcp_link_params params
;
853 struct qed_mcp_link_state link
;
854 struct qed_vf_info
*vf
= NULL
;
857 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
859 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
863 if (vf
->bulletin
.p_virt
)
864 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
866 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
868 /* Get the link configuration back in bulletin so
869 * that when VFs are re-enabled they get the actual
870 * link configuration.
872 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
873 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
874 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
875 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
877 if (vf
->state
!= VF_STOPPED
) {
878 /* Stopping the VF */
879 rc
= qed_sp_vf_stop(p_hwfn
, vf
->concrete_fid
, vf
->opaque_fid
);
882 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
887 vf
->state
= VF_STOPPED
;
890 /* disablng interrupts and resetting permission table was done during
891 * vf-close, however, we could get here without going through vf_close
893 /* Disable Interrupts for VF */
894 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
896 /* Reset Permission table */
897 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
901 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
906 if (IS_LEAD_HWFN(p_hwfn
))
907 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
913 static bool qed_iov_tlv_supported(u16 tlvtype
)
915 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
918 /* place a given tlv on the tlv buffer, continuing current tlv list */
919 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
921 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
926 /* Offset should keep pointing to next TLV (the end of the last) */
929 /* Return a pointer to the start of the added tlv */
930 return *offset
- length
;
933 /* list the types and lengths of the tlvs on the buffer */
934 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
936 u16 i
= 1, total_length
= 0;
937 struct channel_tlv
*tlv
;
940 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
943 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
944 "TLV number %d: type %d, length %d\n",
945 i
, tlv
->type
, tlv
->length
);
947 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
950 /* Validate entry - protect against malicious VFs */
952 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
956 total_length
+= tlv
->length
;
958 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
959 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
967 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
968 struct qed_ptt
*p_ptt
,
969 struct qed_vf_info
*p_vf
,
970 u16 length
, u8 status
)
972 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
973 struct qed_dmae_params params
;
976 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
978 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
980 eng_vf_id
= p_vf
->abs_vf_id
;
982 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
983 params
.flags
= QED_DMAE_FLAG_VF_DST
;
984 params
.dst_vfid
= eng_vf_id
;
986 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
987 mbx
->req_virt
->first_tlv
.reply_address
+
989 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
992 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
993 mbx
->req_virt
->first_tlv
.reply_address
,
994 sizeof(u64
) / 4, ¶ms
);
997 GTT_BAR0_MAP_REG_USDM_RAM
+
998 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1001 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1002 enum qed_iov_vport_update_flag flag
)
1005 case QED_IOV_VP_UPDATE_ACTIVATE
:
1006 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1007 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1008 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1009 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1010 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1011 case QED_IOV_VP_UPDATE_MCAST
:
1012 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1013 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1014 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1015 case QED_IOV_VP_UPDATE_RSS
:
1016 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1017 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1018 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1019 case QED_IOV_VP_UPDATE_SGE_TPA
:
1020 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1026 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1027 struct qed_vf_info
*p_vf
,
1028 struct qed_iov_vf_mbx
*p_mbx
,
1030 u16 tlvs_mask
, u16 tlvs_accepted
)
1032 struct pfvf_def_resp_tlv
*resp
;
1033 u16 size
, total_len
, i
;
1035 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1036 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1037 size
= sizeof(struct pfvf_def_resp_tlv
);
1040 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1042 /* Prepare response for all extended tlvs if they are found by PF */
1043 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1044 if (!(tlvs_mask
& (1 << i
)))
1047 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1048 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1050 if (tlvs_accepted
& (1 << i
))
1051 resp
->hdr
.status
= status
;
1053 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1057 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1058 p_vf
->relative_vf_id
,
1059 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1064 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1065 sizeof(struct channel_list_end_tlv
));
1070 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1071 struct qed_ptt
*p_ptt
,
1072 struct qed_vf_info
*vf_info
,
1073 u16 type
, u16 length
, u8 status
)
1075 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1077 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1079 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1080 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1081 sizeof(struct channel_list_end_tlv
));
1083 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1086 struct qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1088 bool b_enabled_only
)
1090 struct qed_vf_info
*vf
= NULL
;
1092 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1096 return &vf
->p_vf_info
;
1099 void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1101 struct qed_public_vf_info
*vf_info
;
1103 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1108 /* Clear the VF mac */
1109 memset(vf_info
->mac
, 0, ETH_ALEN
);
1112 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1113 struct qed_vf_info
*p_vf
)
1117 p_vf
->vf_bulletin
= 0;
1118 p_vf
->vport_instance
= 0;
1119 p_vf
->num_mac_filters
= 0;
1120 p_vf
->num_vlan_filters
= 0;
1121 p_vf
->configured_features
= 0;
1123 /* If VF previously requested less resources, go back to default */
1124 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1125 p_vf
->num_txqs
= p_vf
->num_sbs
;
1127 p_vf
->num_active_rxqs
= 0;
1129 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++)
1130 p_vf
->vf_queues
[i
].rxq_active
= 0;
1132 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1133 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1136 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1137 struct qed_ptt
*p_ptt
,
1138 struct qed_vf_info
*vf
)
1140 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1141 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1142 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1143 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1144 u8 i
, vfpf_status
= PFVF_STATUS_SUCCESS
;
1145 struct pf_vf_resc
*resc
= &resp
->resc
;
1147 /* Validate FW compatibility */
1148 if (req
->vfdev_info
.fw_major
!= FW_MAJOR_VERSION
||
1149 req
->vfdev_info
.fw_minor
!= FW_MINOR_VERSION
||
1150 req
->vfdev_info
.fw_revision
!= FW_REVISION_VERSION
||
1151 req
->vfdev_info
.fw_engineering
!= FW_ENGINEERING_VERSION
) {
1153 "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
1155 req
->vfdev_info
.fw_major
,
1156 req
->vfdev_info
.fw_minor
,
1157 req
->vfdev_info
.fw_revision
,
1158 req
->vfdev_info
.fw_engineering
,
1161 FW_REVISION_VERSION
, FW_ENGINEERING_VERSION
);
1162 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1166 /* On 100g PFs, prevent old VFs from loading */
1167 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1168 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1170 "VF[%d] is running an old driver that doesn't support 100g\n",
1172 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1176 memset(resp
, 0, sizeof(*resp
));
1178 /* Fill in vf info stuff */
1179 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1180 vf
->num_mac_filters
= 1;
1181 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
1183 vf
->vf_bulletin
= req
->bulletin_addr
;
1184 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1185 vf
->bulletin
.size
: req
->bulletin_size
;
1187 /* fill in pfdev info */
1188 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1189 pfdev_info
->db_size
= 0;
1190 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1192 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1193 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1194 if (p_hwfn
->cdev
->num_hwfns
> 1)
1195 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1197 pfdev_info
->stats_info
.mstats
.address
=
1198 PXP_VF_BAR0_START_MSDM_ZONE_B
+
1199 offsetof(struct mstorm_vf_zone
, non_trigger
.eth_queue_stat
);
1200 pfdev_info
->stats_info
.mstats
.len
=
1201 sizeof(struct eth_mstorm_per_queue_stat
);
1203 pfdev_info
->stats_info
.ustats
.address
=
1204 PXP_VF_BAR0_START_USDM_ZONE_B
+
1205 offsetof(struct ustorm_vf_zone
, non_trigger
.eth_queue_stat
);
1206 pfdev_info
->stats_info
.ustats
.len
=
1207 sizeof(struct eth_ustorm_per_queue_stat
);
1209 pfdev_info
->stats_info
.pstats
.address
=
1210 PXP_VF_BAR0_START_PSDM_ZONE_B
+
1211 offsetof(struct pstorm_vf_zone
, non_trigger
.eth_queue_stat
);
1212 pfdev_info
->stats_info
.pstats
.len
=
1213 sizeof(struct eth_pstorm_per_queue_stat
);
1215 pfdev_info
->stats_info
.tstats
.address
= 0;
1216 pfdev_info
->stats_info
.tstats
.len
= 0;
1218 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1220 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1221 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1222 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1223 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1224 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1225 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1227 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1228 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1230 resc
->num_rxqs
= vf
->num_rxqs
;
1231 resc
->num_txqs
= vf
->num_txqs
;
1232 resc
->num_sbs
= vf
->num_sbs
;
1233 for (i
= 0; i
< resc
->num_sbs
; i
++) {
1234 resc
->hw_sbs
[i
].hw_sb_id
= vf
->igu_sbs
[i
];
1235 resc
->hw_sbs
[i
].sb_qid
= 0;
1238 for (i
= 0; i
< resc
->num_rxqs
; i
++) {
1239 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[i
].fw_rx_qid
,
1240 (u16
*)&resc
->hw_qid
[i
]);
1241 resc
->cid
[i
] = vf
->vf_queues
[i
].fw_cid
;
1244 resc
->num_mac_filters
= min_t(u8
, vf
->num_mac_filters
,
1245 req
->resc_request
.num_mac_filters
);
1246 resc
->num_vlan_filters
= min_t(u8
, vf
->num_vlan_filters
,
1247 req
->resc_request
.num_vlan_filters
);
1249 /* This isn't really required as VF isn't limited, but some VFs might
1250 * actually test this value, so need to provide it.
1252 resc
->num_mc_filters
= req
->resc_request
.num_mc_filters
;
1254 /* Fill agreed size of bulletin board in response */
1255 resp
->bulletin_size
= vf
->bulletin
.size
;
1256 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1260 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1261 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1263 resp
->pfdev_info
.chip_num
,
1264 resp
->pfdev_info
.db_size
,
1265 resp
->pfdev_info
.indices_per_sb
,
1266 resp
->pfdev_info
.capabilities
,
1270 resc
->num_mac_filters
,
1271 resc
->num_vlan_filters
);
1272 vf
->state
= VF_ACQUIRED
;
1274 /* Prepare Response */
1276 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1277 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1280 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1281 struct qed_vf_info
*p_vf
, bool val
)
1283 struct qed_sp_vport_update_params params
;
1286 if (val
== p_vf
->spoof_chk
) {
1287 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1288 "Spoofchk value[%d] is already configured\n", val
);
1292 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1293 params
.opaque_fid
= p_vf
->opaque_fid
;
1294 params
.vport_id
= p_vf
->vport_id
;
1295 params
.update_anti_spoofing_en_flg
= 1;
1296 params
.anti_spoofing_en
= val
;
1298 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1300 p_vf
->spoof_chk
= val
;
1301 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1302 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1303 "Spoofchk val[%d] configured\n", val
);
1305 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1306 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1307 val
, p_vf
->relative_vf_id
);
1313 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1314 struct qed_vf_info
*p_vf
)
1316 struct qed_filter_ucast filter
;
1320 memset(&filter
, 0, sizeof(filter
));
1321 filter
.is_rx_filter
= 1;
1322 filter
.is_tx_filter
= 1;
1323 filter
.vport_to_add_to
= p_vf
->vport_id
;
1324 filter
.opcode
= QED_FILTER_ADD
;
1326 /* Reconfigure vlans */
1327 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1328 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1331 filter
.type
= QED_FILTER_VLAN
;
1332 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1335 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1336 filter
.vlan
, p_vf
->relative_vf_id
);
1337 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1340 QED_SPQ_MODE_CB
, NULL
);
1343 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1344 filter
.vlan
, p_vf
->relative_vf_id
);
1353 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1354 struct qed_vf_info
*p_vf
, u64 events
)
1358 if ((events
& (1 << VLAN_ADDR_FORCED
)) &&
1359 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1360 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1365 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1366 struct qed_vf_info
*p_vf
, u64 events
)
1369 struct qed_filter_ucast filter
;
1371 if (!p_vf
->vport_instance
)
1374 if (events
& (1 << MAC_ADDR_FORCED
)) {
1375 /* Since there's no way [currently] of removing the MAC,
1376 * we can always assume this means we need to force it.
1378 memset(&filter
, 0, sizeof(filter
));
1379 filter
.type
= QED_FILTER_MAC
;
1380 filter
.opcode
= QED_FILTER_REPLACE
;
1381 filter
.is_rx_filter
= 1;
1382 filter
.is_tx_filter
= 1;
1383 filter
.vport_to_add_to
= p_vf
->vport_id
;
1384 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1386 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1387 &filter
, QED_SPQ_MODE_CB
, NULL
);
1390 "PF failed to configure MAC for VF\n");
1394 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1397 if (events
& (1 << VLAN_ADDR_FORCED
)) {
1398 struct qed_sp_vport_update_params vport_update
;
1402 memset(&filter
, 0, sizeof(filter
));
1403 filter
.type
= QED_FILTER_VLAN
;
1404 filter
.is_rx_filter
= 1;
1405 filter
.is_tx_filter
= 1;
1406 filter
.vport_to_add_to
= p_vf
->vport_id
;
1407 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1408 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1411 /* Send the ramrod */
1412 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1413 &filter
, QED_SPQ_MODE_CB
, NULL
);
1416 "PF failed to configure VLAN for VF\n");
1420 /* Update the default-vlan & silent vlan stripping */
1421 memset(&vport_update
, 0, sizeof(vport_update
));
1422 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1423 vport_update
.vport_id
= p_vf
->vport_id
;
1424 vport_update
.update_default_vlan_enable_flg
= 1;
1425 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1426 vport_update
.update_default_vlan_flg
= 1;
1427 vport_update
.default_vlan
= filter
.vlan
;
1429 vport_update
.update_inner_vlan_removal_flg
= 1;
1430 removal
= filter
.vlan
? 1
1431 : p_vf
->shadow_config
.inner_vlan_removal
;
1432 vport_update
.inner_vlan_removal_flg
= removal
;
1433 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1434 rc
= qed_sp_vport_update(p_hwfn
,
1436 QED_SPQ_MODE_EBLOCK
, NULL
);
1439 "PF failed to configure VF vport for vlan\n");
1443 /* Update all the Rx queues */
1444 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1447 if (!p_vf
->vf_queues
[i
].rxq_active
)
1450 qid
= p_vf
->vf_queues
[i
].fw_rx_qid
;
1452 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, qid
,
1454 QED_SPQ_MODE_EBLOCK
,
1458 "Failed to send Rx update fo queue[0x%04x]\n",
1465 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1467 p_vf
->configured_features
&= ~(1 << VLAN_ADDR_FORCED
);
1470 /* If forced features are terminated, we need to configure the shadow
1471 * configuration back again.
1474 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1479 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1480 struct qed_ptt
*p_ptt
,
1481 struct qed_vf_info
*vf
)
1483 struct qed_sp_vport_start_params params
= { 0 };
1484 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1485 struct vfpf_vport_start_tlv
*start
;
1486 u8 status
= PFVF_STATUS_SUCCESS
;
1487 struct qed_vf_info
*vf_info
;
1492 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1494 DP_NOTICE(p_hwfn
->cdev
,
1495 "Failed to get VF info, invalid vfid [%d]\n",
1496 vf
->relative_vf_id
);
1500 vf
->state
= VF_ENABLED
;
1501 start
= &mbx
->req_virt
->start_vport
;
1503 /* Initialize Status block in CAU */
1504 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1505 if (!start
->sb_addr
[sb_id
]) {
1506 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1507 "VF[%d] did not fill the address of SB %d\n",
1508 vf
->relative_vf_id
, sb_id
);
1512 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1513 start
->sb_addr
[sb_id
],
1517 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1519 vf
->mtu
= start
->mtu
;
1520 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1522 /* Take into consideration configuration forced by hypervisor;
1523 * If none is configured, use the supplied VF values [for old
1524 * vfs that would still be fine, since they passed '0' as padding].
1526 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1527 if (!(*p_bitmap
& (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1528 u8 vf_req
= start
->only_untagged
;
1530 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1531 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1534 params
.tpa_mode
= start
->tpa_mode
;
1535 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1536 params
.tx_switching
= true;
1538 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1539 params
.drop_ttl0
= false;
1540 params
.concrete_fid
= vf
->concrete_fid
;
1541 params
.opaque_fid
= vf
->opaque_fid
;
1542 params
.vport_id
= vf
->vport_id
;
1543 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1544 params
.mtu
= vf
->mtu
;
1546 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1549 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1550 status
= PFVF_STATUS_FAILURE
;
1552 vf
->vport_instance
++;
1554 /* Force configuration if needed on the newly opened vport */
1555 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1557 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1559 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1560 sizeof(struct pfvf_def_resp_tlv
), status
);
1563 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1564 struct qed_ptt
*p_ptt
,
1565 struct qed_vf_info
*vf
)
1567 u8 status
= PFVF_STATUS_SUCCESS
;
1570 vf
->vport_instance
--;
1571 vf
->spoof_chk
= false;
1573 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1575 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1577 status
= PFVF_STATUS_FAILURE
;
1580 /* Forget the configuration on the vport */
1581 vf
->configured_features
= 0;
1582 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
1584 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
1585 sizeof(struct pfvf_def_resp_tlv
), status
);
1588 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
1589 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
1590 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
1592 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
1593 struct qed_ptt
*p_ptt
,
1594 struct qed_vf_info
*vf
, u8 status
)
1596 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1597 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1598 struct vfpf_start_rxq_tlv
*req
;
1600 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1602 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
1604 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1605 sizeof(struct channel_list_end_tlv
));
1607 /* Update the TLV with the response */
1608 if (status
== PFVF_STATUS_SUCCESS
) {
1611 req
= &mbx
->req_virt
->start_rxq
;
1612 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[req
->rx_qid
].fw_rx_qid
,
1615 p_tlv
->offset
= MSTORM_QZONE_START(p_hwfn
->cdev
) +
1616 hw_qid
* MSTORM_QZONE_SIZE
+
1617 offsetof(struct mstorm_eth_queue_zone
,
1621 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, sizeof(*p_tlv
), status
);
1624 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
1625 struct qed_ptt
*p_ptt
,
1626 struct qed_vf_info
*vf
)
1628 struct qed_queue_start_common_params params
;
1629 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1630 u8 status
= PFVF_STATUS_SUCCESS
;
1631 struct vfpf_start_rxq_tlv
*req
;
1634 memset(¶ms
, 0, sizeof(params
));
1635 req
= &mbx
->req_virt
->start_rxq
;
1636 params
.queue_id
= vf
->vf_queues
[req
->rx_qid
].fw_rx_qid
;
1637 params
.vport_id
= vf
->vport_id
;
1638 params
.sb
= req
->hw_sb
;
1639 params
.sb_idx
= req
->sb_index
;
1641 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
, vf
->opaque_fid
,
1642 vf
->vf_queues
[req
->rx_qid
].fw_cid
,
1644 vf
->abs_vf_id
+ 0x10,
1647 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
1650 status
= PFVF_STATUS_FAILURE
;
1652 vf
->vf_queues
[req
->rx_qid
].rxq_active
= true;
1653 vf
->num_active_rxqs
++;
1656 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
);
1659 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
1660 struct qed_ptt
*p_ptt
,
1661 struct qed_vf_info
*vf
)
1663 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1664 struct qed_queue_start_common_params params
;
1665 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1666 union qed_qm_pq_params pq_params
;
1667 u8 status
= PFVF_STATUS_SUCCESS
;
1668 struct vfpf_start_txq_tlv
*req
;
1671 /* Prepare the parameters which would choose the right PQ */
1672 memset(&pq_params
, 0, sizeof(pq_params
));
1673 pq_params
.eth
.is_vf
= 1;
1674 pq_params
.eth
.vf_id
= vf
->relative_vf_id
;
1676 memset(¶ms
, 0, sizeof(params
));
1677 req
= &mbx
->req_virt
->start_txq
;
1678 params
.queue_id
= vf
->vf_queues
[req
->tx_qid
].fw_tx_qid
;
1679 params
.vport_id
= vf
->vport_id
;
1680 params
.sb
= req
->hw_sb
;
1681 params
.sb_idx
= req
->sb_index
;
1683 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
1685 vf
->vf_queues
[req
->tx_qid
].fw_cid
,
1687 vf
->abs_vf_id
+ 0x10,
1689 req
->pbl_size
, &pq_params
);
1692 status
= PFVF_STATUS_FAILURE
;
1694 vf
->vf_queues
[req
->tx_qid
].txq_active
= true;
1696 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_START_TXQ
,
1700 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1701 struct qed_vf_info
*vf
,
1702 u16 rxq_id
, u8 num_rxqs
, bool cqe_completion
)
1707 if (rxq_id
+ num_rxqs
> ARRAY_SIZE(vf
->vf_queues
))
1710 for (qid
= rxq_id
; qid
< rxq_id
+ num_rxqs
; qid
++) {
1711 if (vf
->vf_queues
[qid
].rxq_active
) {
1712 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1720 vf
->vf_queues
[qid
].rxq_active
= false;
1721 vf
->num_active_rxqs
--;
1727 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
1728 struct qed_vf_info
*vf
, u16 txq_id
, u8 num_txqs
)
1733 if (txq_id
+ num_txqs
> ARRAY_SIZE(vf
->vf_queues
))
1736 for (qid
= txq_id
; qid
< txq_id
+ num_txqs
; qid
++) {
1737 if (vf
->vf_queues
[qid
].txq_active
) {
1738 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1745 vf
->vf_queues
[qid
].txq_active
= false;
1750 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1751 struct qed_ptt
*p_ptt
,
1752 struct qed_vf_info
*vf
)
1754 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1755 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1756 u8 status
= PFVF_STATUS_SUCCESS
;
1757 struct vfpf_stop_rxqs_tlv
*req
;
1760 /* We give the option of starting from qid != 0, in this case we
1761 * need to make sure that qid + num_qs doesn't exceed the actual
1762 * amount of queues that exist.
1764 req
= &mbx
->req_virt
->stop_rxqs
;
1765 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
1766 req
->num_rxqs
, req
->cqe_completion
);
1768 status
= PFVF_STATUS_FAILURE
;
1770 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
1774 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
1775 struct qed_ptt
*p_ptt
,
1776 struct qed_vf_info
*vf
)
1778 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1779 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1780 u8 status
= PFVF_STATUS_SUCCESS
;
1781 struct vfpf_stop_txqs_tlv
*req
;
1784 /* We give the option of starting from qid != 0, in this case we
1785 * need to make sure that qid + num_qs doesn't exceed the actual
1786 * amount of queues that exist.
1788 req
= &mbx
->req_virt
->stop_txqs
;
1789 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, req
->num_txqs
);
1791 status
= PFVF_STATUS_FAILURE
;
1793 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
1797 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
1798 struct qed_ptt
*p_ptt
,
1799 struct qed_vf_info
*vf
)
1801 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1802 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1803 struct vfpf_update_rxq_tlv
*req
;
1804 u8 status
= PFVF_STATUS_SUCCESS
;
1805 u8 complete_event_flg
;
1806 u8 complete_cqe_flg
;
1811 req
= &mbx
->req_virt
->update_rxq
;
1812 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
1813 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
1815 for (i
= 0; i
< req
->num_rxqs
; i
++) {
1816 qid
= req
->rx_qid
+ i
;
1818 if (!vf
->vf_queues
[qid
].rxq_active
) {
1819 DP_NOTICE(p_hwfn
, "VF rx_qid = %d isn`t active!\n",
1821 status
= PFVF_STATUS_FAILURE
;
1825 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1826 vf
->vf_queues
[qid
].fw_rx_qid
,
1830 QED_SPQ_MODE_EBLOCK
, NULL
);
1833 status
= PFVF_STATUS_FAILURE
;
1838 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
1842 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
1843 void *p_tlvs_list
, u16 req_type
)
1845 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
1849 if (!p_tlv
->length
) {
1850 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
1854 if (p_tlv
->type
== req_type
) {
1855 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1856 "Extended tlv type %d, length %d found\n",
1857 p_tlv
->type
, p_tlv
->length
);
1861 len
+= p_tlv
->length
;
1862 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
1864 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
1865 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
1868 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
1874 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
1875 struct qed_sp_vport_update_params
*p_data
,
1876 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1878 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1879 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1881 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
1882 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1886 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
1887 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
1888 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
1889 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
1890 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
1894 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
1895 struct qed_sp_vport_update_params
*p_data
,
1896 struct qed_vf_info
*p_vf
,
1897 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1899 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
1900 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1902 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
1903 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1907 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
1909 /* Ignore the VF request if we're forcing a vlan */
1910 if (!(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
))) {
1911 p_data
->update_inner_vlan_removal_flg
= 1;
1912 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
1915 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
1919 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
1920 struct qed_sp_vport_update_params
*p_data
,
1921 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1923 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1924 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1926 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
1927 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
1929 if (!p_tx_switch_tlv
)
1932 p_data
->update_tx_switching_flg
= 1;
1933 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
1934 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
1938 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
1939 struct qed_sp_vport_update_params
*p_data
,
1940 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1942 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1943 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1945 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
1946 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1950 p_data
->update_approx_mcast_flg
= 1;
1951 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
1952 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1953 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
1957 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
1958 struct qed_sp_vport_update_params
*p_data
,
1959 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1961 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
1962 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1963 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1965 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
1966 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1970 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
1971 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
1972 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
1973 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
1974 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
1978 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
1979 struct qed_sp_vport_update_params
*p_data
,
1980 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1982 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
1983 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1985 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
1986 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
1988 if (!p_accept_any_vlan
)
1991 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
1992 p_data
->update_accept_any_vlan_flg
=
1993 p_accept_any_vlan
->update_accept_any_vlan_flg
;
1994 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
1998 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
1999 struct qed_vf_info
*vf
,
2000 struct qed_sp_vport_update_params
*p_data
,
2001 struct qed_rss_params
*p_rss
,
2002 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2004 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2005 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2006 u16 i
, q_idx
, max_q_idx
;
2009 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2010 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2012 p_data
->rss_params
= NULL
;
2016 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2018 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2019 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2020 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2021 VFPF_UPDATE_RSS_CAPS_FLAG
);
2022 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2023 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2024 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2025 VFPF_UPDATE_RSS_KEY_FLAG
);
2027 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2028 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2029 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2030 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2031 memcpy(p_rss
->rss_ind_table
, p_rss_tlv
->rss_ind_table
,
2032 sizeof(p_rss
->rss_ind_table
));
2033 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2035 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2036 (1 << p_rss_tlv
->rss_table_size_log
));
2038 max_q_idx
= ARRAY_SIZE(vf
->vf_queues
);
2040 for (i
= 0; i
< table_size
; i
++) {
2041 u16 index
= vf
->vf_queues
[0].fw_rx_qid
;
2043 q_idx
= p_rss
->rss_ind_table
[i
];
2044 if (q_idx
>= max_q_idx
)
2046 "rss_ind_table[%d] = %d, rxq is out of range\n",
2048 else if (!vf
->vf_queues
[q_idx
].rxq_active
)
2050 "rss_ind_table[%d] = %d, rxq is not active\n",
2053 index
= vf
->vf_queues
[q_idx
].fw_rx_qid
;
2054 p_rss
->rss_ind_table
[i
] = index
;
2057 p_data
->rss_params
= p_rss
;
2058 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2062 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2063 struct qed_vf_info
*vf
,
2064 struct qed_sp_vport_update_params
*p_data
,
2065 struct qed_sge_tpa_params
*p_sge_tpa
,
2066 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2068 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2069 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2071 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2072 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2074 if (!p_sge_tpa_tlv
) {
2075 p_data
->sge_tpa_params
= NULL
;
2079 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2081 p_sge_tpa
->update_tpa_en_flg
=
2082 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2083 p_sge_tpa
->update_tpa_param_flg
=
2084 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2085 VFPF_UPDATE_TPA_PARAM_FLAG
);
2087 p_sge_tpa
->tpa_ipv4_en_flg
=
2088 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2089 p_sge_tpa
->tpa_ipv6_en_flg
=
2090 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2091 p_sge_tpa
->tpa_pkt_split_flg
=
2092 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2093 p_sge_tpa
->tpa_hdr_data_split_flg
=
2094 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2095 p_sge_tpa
->tpa_gro_consistent_flg
=
2096 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2098 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2099 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2100 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2101 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2102 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2104 p_data
->sge_tpa_params
= p_sge_tpa
;
2106 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2109 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
2110 struct qed_ptt
*p_ptt
,
2111 struct qed_vf_info
*vf
)
2113 struct qed_sp_vport_update_params params
;
2114 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2115 struct qed_sge_tpa_params sge_tpa_params
;
2116 struct qed_rss_params rss_params
;
2117 u8 status
= PFVF_STATUS_SUCCESS
;
2122 memset(¶ms
, 0, sizeof(params
));
2123 params
.opaque_fid
= vf
->opaque_fid
;
2124 params
.vport_id
= vf
->vport_id
;
2125 params
.rss_params
= NULL
;
2127 /* Search for extended tlvs list and update values
2128 * from VF in struct qed_sp_vport_update_params.
2130 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2131 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
2132 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2133 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2134 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2135 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, &rss_params
,
2137 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2138 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
2139 &sge_tpa_params
, mbx
, &tlvs_mask
);
2141 /* Just log a message if there is no single extended tlv in buffer.
2142 * When all features of vport update ramrod would be requested by VF
2143 * as extended TLVs in buffer then an error can be returned in response
2144 * if there is no extended TLV present in buffer.
2148 "No feature tlvs found for vport update\n");
2149 status
= PFVF_STATUS_NOT_SUPPORTED
;
2153 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
2156 status
= PFVF_STATUS_FAILURE
;
2159 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
2160 tlvs_mask
, tlvs_mask
);
2161 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2164 static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
2165 struct qed_vf_info
*p_vf
,
2166 struct qed_filter_ucast
*p_params
)
2170 if (p_params
->type
== QED_FILTER_MAC
)
2173 /* First remove entries and then add new ones */
2174 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2175 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2176 if (p_vf
->shadow_config
.vlans
[i
].used
&&
2177 p_vf
->shadow_config
.vlans
[i
].vid
==
2179 p_vf
->shadow_config
.vlans
[i
].used
= false;
2182 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2185 "VF [%d] - Tries to remove a non-existing vlan\n",
2186 p_vf
->relative_vf_id
);
2189 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2190 p_params
->opcode
== QED_FILTER_FLUSH
) {
2191 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2192 p_vf
->shadow_config
.vlans
[i
].used
= false;
2195 /* In forced mode, we're willing to remove entries - but we don't add
2198 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
))
2201 if (p_params
->opcode
== QED_FILTER_ADD
||
2202 p_params
->opcode
== QED_FILTER_REPLACE
) {
2203 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
2204 if (p_vf
->shadow_config
.vlans
[i
].used
)
2207 p_vf
->shadow_config
.vlans
[i
].used
= true;
2208 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
2212 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2215 "VF [%d] - Tries to configure more than %d vlan filters\n",
2216 p_vf
->relative_vf_id
,
2217 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
2225 int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
2226 int vfid
, struct qed_filter_ucast
*params
)
2228 struct qed_public_vf_info
*vf
;
2230 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
2234 /* No real decision to make; Store the configured MAC */
2235 if (params
->type
== QED_FILTER_MAC
||
2236 params
->type
== QED_FILTER_MAC_VLAN
)
2237 ether_addr_copy(vf
->mac
, params
->mac
);
2242 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
2243 struct qed_ptt
*p_ptt
,
2244 struct qed_vf_info
*vf
)
2246 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
2247 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2248 struct vfpf_ucast_filter_tlv
*req
;
2249 u8 status
= PFVF_STATUS_SUCCESS
;
2250 struct qed_filter_ucast params
;
2253 /* Prepare the unicast filter params */
2254 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
2255 req
= &mbx
->req_virt
->ucast_filter
;
2256 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
2257 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
2259 params
.is_rx_filter
= 1;
2260 params
.is_tx_filter
= 1;
2261 params
.vport_to_remove_from
= vf
->vport_id
;
2262 params
.vport_to_add_to
= vf
->vport_id
;
2263 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
2264 params
.vlan
= req
->vlan
;
2268 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2269 vf
->abs_vf_id
, params
.opcode
, params
.type
,
2270 params
.is_rx_filter
? "RX" : "",
2271 params
.is_tx_filter
? "TX" : "",
2272 params
.vport_to_add_to
,
2273 params
.mac
[0], params
.mac
[1],
2274 params
.mac
[2], params
.mac
[3],
2275 params
.mac
[4], params
.mac
[5], params
.vlan
);
2277 if (!vf
->vport_instance
) {
2280 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2282 status
= PFVF_STATUS_FAILURE
;
2286 /* Update shadow copy of the VF configuration */
2287 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
2288 status
= PFVF_STATUS_FAILURE
;
2292 /* Determine if the unicast filtering is acceptible by PF */
2293 if ((p_bulletin
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)) &&
2294 (params
.type
== QED_FILTER_VLAN
||
2295 params
.type
== QED_FILTER_MAC_VLAN
)) {
2296 /* Once VLAN is forced or PVID is set, do not allow
2297 * to add/replace any further VLANs.
2299 if (params
.opcode
== QED_FILTER_ADD
||
2300 params
.opcode
== QED_FILTER_REPLACE
)
2301 status
= PFVF_STATUS_FORCED
;
2305 if ((p_bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) &&
2306 (params
.type
== QED_FILTER_MAC
||
2307 params
.type
== QED_FILTER_MAC_VLAN
)) {
2308 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
2309 (params
.opcode
!= QED_FILTER_ADD
&&
2310 params
.opcode
!= QED_FILTER_REPLACE
))
2311 status
= PFVF_STATUS_FORCED
;
2315 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
2317 status
= PFVF_STATUS_FAILURE
;
2321 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
2322 QED_SPQ_MODE_CB
, NULL
);
2324 status
= PFVF_STATUS_FAILURE
;
2327 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
2328 sizeof(struct pfvf_def_resp_tlv
), status
);
2331 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
2332 struct qed_ptt
*p_ptt
,
2333 struct qed_vf_info
*vf
)
2338 for (i
= 0; i
< vf
->num_sbs
; i
++)
2339 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2341 vf
->opaque_fid
, false);
2343 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
2344 sizeof(struct pfvf_def_resp_tlv
),
2345 PFVF_STATUS_SUCCESS
);
2348 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
2349 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
2351 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2352 u8 status
= PFVF_STATUS_SUCCESS
;
2354 /* Disable Interrupts for VF */
2355 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
2357 /* Reset Permission table */
2358 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
2360 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
2364 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
2365 struct qed_ptt
*p_ptt
,
2366 struct qed_vf_info
*p_vf
)
2368 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2370 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2372 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
2373 length
, PFVF_STATUS_SUCCESS
);
2377 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
2378 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2383 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
2385 for (cnt
= 0; cnt
< 50; cnt
++) {
2386 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
2391 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
2395 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2396 p_vf
->abs_vf_id
, val
);
2404 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
2405 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2407 u32 cons
[MAX_NUM_VOQS
], distance
[MAX_NUM_VOQS
];
2410 /* Read initial consumers & producers */
2411 for (i
= 0; i
< MAX_NUM_VOQS
; i
++) {
2414 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
2415 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2417 prod
= qed_rd(p_hwfn
, p_ptt
,
2418 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
2420 distance
[i
] = prod
- cons
[i
];
2423 /* Wait for consumers to pass the producers */
2425 for (cnt
= 0; cnt
< 50; cnt
++) {
2426 for (; i
< MAX_NUM_VOQS
; i
++) {
2429 tmp
= qed_rd(p_hwfn
, p_ptt
,
2430 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2432 if (distance
[i
] > tmp
- cons
[i
])
2436 if (i
== MAX_NUM_VOQS
)
2443 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
2444 p_vf
->abs_vf_id
, i
);
2451 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
2452 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2456 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
2460 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
2468 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
2469 struct qed_ptt
*p_ptt
,
2470 u16 rel_vf_id
, u32
*ack_vfs
)
2472 struct qed_vf_info
*p_vf
;
2475 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
2479 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
2480 (1ULL << (rel_vf_id
% 64))) {
2481 u16 vfid
= p_vf
->abs_vf_id
;
2483 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2484 "VF[%d] - Handling FLR\n", vfid
);
2486 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2488 /* If VF isn't active, no need for anything but SW */
2492 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
2496 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
2498 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
2502 /* VF_STOPPED has to be set only after final cleanup
2503 * but prior to re-enabling the VF.
2505 p_vf
->state
= VF_STOPPED
;
2507 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
2509 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
2514 /* Mark VF for ack and clean pending state */
2515 if (p_vf
->state
== VF_RESET
)
2516 p_vf
->state
= VF_STOPPED
;
2517 ack_vfs
[vfid
/ 32] |= (1 << (vfid
% 32));
2518 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
2519 ~(1ULL << (rel_vf_id
% 64));
2520 p_hwfn
->pf_iov_info
->pending_events
[rel_vf_id
/ 64] &=
2521 ~(1ULL << (rel_vf_id
% 64));
2527 int qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2529 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
2533 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
2535 /* Since BRB <-> PRS interface can't be tested as part of the flr
2536 * polling due to HW limitations, simply sleep a bit. And since
2537 * there's no need to wait per-vf, do it before looping.
2541 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
2542 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
2544 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
2548 int qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
2552 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
2553 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
2554 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2555 "[%08x,...,%08x]: %08x\n",
2556 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
2558 if (!p_hwfn
->cdev
->p_iov_info
) {
2559 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
2564 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
2565 struct qed_vf_info
*p_vf
;
2568 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
2572 vfid
= p_vf
->abs_vf_id
;
2573 if ((1 << (vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
2574 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
2575 u16 rel_vf_id
= p_vf
->relative_vf_id
;
2577 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2578 "VF[%d] [rel %d] got FLR-ed\n",
2581 p_vf
->state
= VF_RESET
;
2583 /* No need to lock here, since pending_flr should
2584 * only change here and before ACKing MFw. Since
2585 * MFW will not trigger an additional attention for
2586 * VF flr until ACKs, we're safe.
2588 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
2596 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
2598 struct qed_mcp_link_params
*p_params
,
2599 struct qed_mcp_link_state
*p_link
,
2600 struct qed_mcp_link_capabilities
*p_caps
)
2602 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
2605 struct qed_bulletin_content
*p_bulletin
;
2610 p_bulletin
= p_vf
->bulletin
.p_virt
;
2613 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
2615 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
2617 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
2620 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
2621 struct qed_ptt
*p_ptt
, int vfid
)
2623 struct qed_iov_vf_mbx
*mbx
;
2624 struct qed_vf_info
*p_vf
;
2627 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2631 mbx
= &p_vf
->vf_mbx
;
2633 /* qed_iov_process_mbx_request */
2636 "qed_iov_process_mbx_req vfid %d\n", p_vf
->abs_vf_id
);
2638 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
2640 /* check if tlv type is known */
2641 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
2642 switch (mbx
->first_tlv
.tl
.type
) {
2643 case CHANNEL_TLV_ACQUIRE
:
2644 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
2646 case CHANNEL_TLV_VPORT_START
:
2647 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
2649 case CHANNEL_TLV_VPORT_TEARDOWN
:
2650 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
2652 case CHANNEL_TLV_START_RXQ
:
2653 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
2655 case CHANNEL_TLV_START_TXQ
:
2656 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
2658 case CHANNEL_TLV_STOP_RXQS
:
2659 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
2661 case CHANNEL_TLV_STOP_TXQS
:
2662 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
2664 case CHANNEL_TLV_UPDATE_RXQ
:
2665 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
2667 case CHANNEL_TLV_VPORT_UPDATE
:
2668 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
2670 case CHANNEL_TLV_UCAST_FILTER
:
2671 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
2673 case CHANNEL_TLV_CLOSE
:
2674 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
2676 case CHANNEL_TLV_INT_CLEANUP
:
2677 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
2679 case CHANNEL_TLV_RELEASE
:
2680 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
2684 /* unknown TLV - this may belong to a VF driver from the future
2685 * - a version written after this PF driver was written, which
2686 * supports features unknown as of yet. Too bad since we don't
2687 * support them. Or this may be because someone wrote a crappy
2688 * VF driver and is sending garbage over the channel.
2691 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
2692 mbx
->first_tlv
.tl
.type
, mbx
->first_tlv
.tl
.length
);
2694 for (i
= 0; i
< 20; i
++) {
2698 mbx
->req_virt
->tlv_buf_size
.tlv_buffer
[i
]);
2703 void qed_iov_pf_add_pending_events(struct qed_hwfn
*p_hwfn
, u8 vfid
)
2705 u64 add_bit
= 1ULL << (vfid
% 64);
2707 p_hwfn
->pf_iov_info
->pending_events
[vfid
/ 64] |= add_bit
;
2710 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn
*p_hwfn
,
2713 u64
*p_pending_events
= p_hwfn
->pf_iov_info
->pending_events
;
2715 memcpy(events
, p_pending_events
, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
2716 memset(p_pending_events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
2719 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
2720 u16 abs_vfid
, struct regpair
*vf_msg
)
2722 u8 min
= (u8
)p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
2723 struct qed_vf_info
*p_vf
;
2725 if (!qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
)) {
2728 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
2732 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
2734 /* List the physical address of the request so that handler
2735 * could later on copy the message from it.
2737 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
2739 /* Mark the event and schedule the workqueue */
2740 qed_iov_pf_add_pending_events(p_hwfn
, p_vf
->relative_vf_id
);
2741 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
2746 int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
2747 u8 opcode
, __le16 echo
, union event_ring_data
*data
)
2750 case COMMON_EVENT_VF_PF_CHANNEL
:
2751 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
2752 &data
->vf_pf_channel
.msg_addr
);
2754 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
2760 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
2762 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2768 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
2769 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true))
2776 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
2779 struct qed_dmae_params params
;
2780 struct qed_vf_info
*vf_info
;
2782 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2786 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
2787 params
.flags
= QED_DMAE_FLAG_VF_SRC
| QED_DMAE_FLAG_COMPLETION_DST
;
2788 params
.src_vfid
= vf_info
->abs_vf_id
;
2790 if (qed_dmae_host2host(p_hwfn
, ptt
,
2791 vf_info
->vf_mbx
.pending_req
,
2792 vf_info
->vf_mbx
.req_phys
,
2793 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
2794 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2795 "Failed to copy message from VF 0x%02x\n", vfid
);
2803 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
2806 struct qed_vf_info
*vf_info
;
2809 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
2811 DP_NOTICE(p_hwfn
->cdev
,
2812 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
2816 feature
= 1 << MAC_ADDR_FORCED
;
2817 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
2819 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
2820 /* Forced MAC will disable MAC_ADDR */
2821 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
2822 ~(1 << VFPF_BULLETIN_MAC_ADDR
);
2824 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
2827 void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
2830 struct qed_vf_info
*vf_info
;
2833 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2835 DP_NOTICE(p_hwfn
->cdev
,
2836 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
2840 feature
= 1 << VLAN_ADDR_FORCED
;
2841 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
2843 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
2845 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
2847 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
2850 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
2852 struct qed_vf_info
*p_vf_info
;
2854 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2858 return !!p_vf_info
->vport_instance
;
2861 bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
2863 struct qed_vf_info
*p_vf_info
;
2865 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2869 return p_vf_info
->state
== VF_STOPPED
;
2872 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
2874 struct qed_vf_info
*vf_info
;
2876 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2880 return vf_info
->spoof_chk
;
2883 int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
2885 struct qed_vf_info
*vf
;
2888 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
2890 "SR-IOV sanity check failed, can't set spoofchk\n");
2894 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2898 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
2899 /* After VF VPORT start PF will configure spoof check */
2900 vf
->req_spoofchk_val
= val
;
2905 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
2911 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
2914 struct qed_vf_info
*p_vf
;
2916 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
2917 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
2920 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
2923 return p_vf
->bulletin
.p_virt
->mac
;
2926 u16
qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
2928 struct qed_vf_info
*p_vf
;
2930 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
2931 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
2934 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)))
2937 return p_vf
->bulletin
.p_virt
->pvid
;
2940 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
2941 struct qed_ptt
*p_ptt
, int vfid
, int val
)
2943 struct qed_vf_info
*vf
;
2947 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
2951 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
2955 return qed_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
);
2958 int qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
2960 struct qed_vf_info
*vf
;
2964 for_each_hwfn(cdev
, i
) {
2965 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2967 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
2969 "SR-IOV sanity check failed, can't set min rate\n");
2974 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
2975 vport_id
= vf
->vport_id
;
2977 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
2980 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
2982 struct qed_wfq_data
*vf_vp_wfq
;
2983 struct qed_vf_info
*vf_info
;
2985 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2989 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
2991 if (vf_vp_wfq
->configured
)
2992 return vf_vp_wfq
->min_speed
;
2998 * qed_schedule_iov - schedules IOV task for VF and PF
2999 * @hwfn: hardware function pointer
3000 * @flag: IOV flag for VF/PF
3002 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
3004 smp_mb__before_atomic();
3005 set_bit(flag
, &hwfn
->iov_task_flags
);
3006 smp_mb__after_atomic();
3007 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
3008 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
3011 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
3015 for_each_hwfn(cdev
, i
)
3016 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
3017 &cdev
->hwfns
[i
].iov_task
, 0);
3020 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
3024 for_each_hwfn(cdev
, i
)
3025 if (cdev
->hwfns
[i
].iov_wq
)
3026 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3028 /* Mark VFs for disablement */
3029 qed_iov_set_vfs_to_disable(cdev
, true);
3031 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
3032 pci_disable_sriov(cdev
->pdev
);
3034 for_each_hwfn(cdev
, i
) {
3035 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3036 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3038 /* Failure to acquire the ptt in 100g creates an odd error
3039 * where the first engine has already relased IOV.
3042 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3046 /* Clean WFQ db and configure equal weight for all vports */
3047 qed_clean_wfq_db(hwfn
, ptt
);
3049 qed_for_each_vf(hwfn
, j
) {
3052 if (!qed_iov_is_valid_vfid(hwfn
, j
, true))
3055 /* Wait until VF is disabled before releasing */
3056 for (k
= 0; k
< 100; k
++) {
3057 if (!qed_iov_is_vf_stopped(hwfn
, j
))
3064 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
3068 "Timeout waiting for VF's FLR to end\n");
3071 qed_ptt_release(hwfn
, ptt
);
3074 qed_iov_set_vfs_to_disable(cdev
, false);
3079 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
3081 struct qed_sb_cnt_info sb_cnt_info
;
3084 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
3085 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
3086 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
3090 /* Initialize HW for VF access */
3091 for_each_hwfn(cdev
, j
) {
3092 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[j
];
3093 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3094 int num_sbs
= 0, limit
= 16;
3097 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3102 if (IS_MF_DEFAULT(hwfn
))
3103 limit
= MAX_NUM_VFS_BB
/ hwfn
->num_funcs_on_engine
;
3105 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
3106 qed_int_get_num_sbs(hwfn
, &sb_cnt_info
);
3107 num_sbs
= min_t(int, sb_cnt_info
.sb_free_blk
, limit
);
3109 for (i
= 0; i
< num
; i
++) {
3110 if (!qed_iov_is_valid_vfid(hwfn
, i
, false))
3113 rc
= qed_iov_init_hw_for_vf(hwfn
,
3114 ptt
, i
, num_sbs
/ num
);
3116 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
3117 qed_ptt_release(hwfn
, ptt
);
3122 qed_ptt_release(hwfn
, ptt
);
3125 /* Enable SRIOV PCIe functions */
3126 rc
= pci_enable_sriov(cdev
->pdev
, num
);
3128 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
3135 qed_sriov_disable(cdev
, false);
3139 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
3141 if (!IS_QED_SRIOV(cdev
)) {
3142 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
3147 return qed_sriov_enable(cdev
, num_vfs_param
);
3149 return qed_sriov_disable(cdev
, true);
3152 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
3156 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3157 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3158 "Cannot set a VF MAC; Sriov is not enabled\n");
3162 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3163 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3164 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3168 for_each_hwfn(cdev
, i
) {
3169 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3170 struct qed_public_vf_info
*vf_info
;
3172 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3176 /* Set the forced MAC, and schedule the IOV task */
3177 ether_addr_copy(vf_info
->forced_mac
, mac
);
3178 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3184 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
3188 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3189 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3190 "Cannot set a VF MAC; Sriov is not enabled\n");
3194 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3195 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3196 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3200 for_each_hwfn(cdev
, i
) {
3201 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3202 struct qed_public_vf_info
*vf_info
;
3204 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3208 /* Set the forced vlan, and schedule the IOV task */
3209 vf_info
->forced_vlan
= vid
;
3210 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3216 static int qed_get_vf_config(struct qed_dev
*cdev
,
3217 int vf_id
, struct ifla_vf_info
*ivi
)
3219 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3220 struct qed_public_vf_info
*vf_info
;
3221 struct qed_mcp_link_state link
;
3224 /* Sanitize request */
3228 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3229 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3230 "VF index [%d] isn't active\n", vf_id
);
3234 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3236 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
3238 /* Fill information about VF */
3241 if (is_valid_ether_addr(vf_info
->forced_mac
))
3242 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
3244 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
3246 ivi
->vlan
= vf_info
->forced_vlan
;
3247 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
3248 ivi
->linkstate
= vf_info
->link_state
;
3249 tx_rate
= vf_info
->tx_rate
;
3250 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
3251 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
3256 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
3258 struct qed_mcp_link_capabilities caps
;
3259 struct qed_mcp_link_params params
;
3260 struct qed_mcp_link_state link
;
3263 if (!hwfn
->pf_iov_info
)
3266 /* Update bulletin of all future possible VFs with link configuration */
3267 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3268 struct qed_public_vf_info
*vf_info
;
3270 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
3274 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
3275 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
3276 memcpy(&caps
, qed_mcp_get_link_capabilities(hwfn
),
3279 /* Modify link according to the VF's configured link state */
3280 switch (vf_info
->link_state
) {
3281 case IFLA_VF_LINK_STATE_DISABLE
:
3282 link
.link_up
= false;
3284 case IFLA_VF_LINK_STATE_ENABLE
:
3285 link
.link_up
= true;
3286 /* Set speed according to maximum supported by HW.
3287 * that is 40G for regular devices and 100G for CMT
3290 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
3293 /* In auto mode pass PF link image to VF */
3297 if (link
.link_up
&& vf_info
->tx_rate
) {
3298 struct qed_ptt
*ptt
;
3301 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
3303 ptt
= qed_ptt_acquire(hwfn
);
3305 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
3309 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
3310 vf_info
->tx_rate
= rate
;
3314 qed_ptt_release(hwfn
, ptt
);
3317 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
3320 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3323 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
3324 int vf_id
, int link_state
)
3328 /* Sanitize request */
3332 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3333 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3334 "VF index [%d] isn't active\n", vf_id
);
3338 /* Handle configuration of link state */
3339 for_each_hwfn(cdev
, i
) {
3340 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3341 struct qed_public_vf_info
*vf
;
3343 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3347 if (vf
->link_state
== link_state
)
3350 vf
->link_state
= link_state
;
3351 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
3357 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
3359 int i
, rc
= -EINVAL
;
3361 for_each_hwfn(cdev
, i
) {
3362 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3364 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
3372 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
3376 for_each_hwfn(cdev
, i
) {
3377 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3378 struct qed_public_vf_info
*vf
;
3380 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3382 "SR-IOV sanity check failed, can't set tx rate\n");
3386 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
3390 qed_inform_vf_link_state(p_hwfn
);
3396 static int qed_set_vf_rate(struct qed_dev
*cdev
,
3397 int vfid
, u32 min_rate
, u32 max_rate
)
3399 int rc_min
= 0, rc_max
= 0;
3402 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
3405 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
3407 if (rc_max
| rc_min
)
3413 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
3415 u64 events
[QED_VF_ARRAY_LENGTH
];
3416 struct qed_ptt
*ptt
;
3419 ptt
= qed_ptt_acquire(hwfn
);
3421 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3422 "Can't acquire PTT; re-scheduling\n");
3423 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
3427 qed_iov_pf_get_and_clear_pending_events(hwfn
, events
);
3429 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3430 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3431 events
[0], events
[1], events
[2]);
3433 qed_for_each_vf(hwfn
, i
) {
3434 /* Skip VFs with no pending messages */
3435 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
3438 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3439 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3440 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3442 /* Copy VF's message to PF's request buffer for that VF */
3443 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
3446 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
3449 qed_ptt_release(hwfn
, ptt
);
3452 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
3456 qed_for_each_vf(hwfn
, i
) {
3457 struct qed_public_vf_info
*info
;
3458 bool update
= false;
3461 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
3465 /* Update data on bulletin board */
3466 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
3467 if (is_valid_ether_addr(info
->forced_mac
) &&
3468 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
))) {
3471 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3473 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3475 /* Update bulletin board with forced MAC */
3476 qed_iov_bulletin_set_forced_mac(hwfn
,
3477 info
->forced_mac
, i
);
3481 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
3482 info
->forced_vlan
) {
3485 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3488 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3489 qed_iov_bulletin_set_forced_vlan(hwfn
,
3490 info
->forced_vlan
, i
);
3495 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3499 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
3501 struct qed_ptt
*ptt
;
3504 ptt
= qed_ptt_acquire(hwfn
);
3506 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
3507 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3511 qed_for_each_vf(hwfn
, i
)
3512 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
3514 qed_ptt_release(hwfn
, ptt
);
3517 void qed_iov_pf_task(struct work_struct
*work
)
3519 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
3523 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
3526 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
3527 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3530 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3534 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
3536 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3538 qed_ptt_release(hwfn
, ptt
);
3541 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
3542 qed_handle_vf_msg(hwfn
);
3544 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
3545 &hwfn
->iov_task_flags
))
3546 qed_handle_pf_set_vf_unicast(hwfn
);
3548 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
3549 &hwfn
->iov_task_flags
))
3550 qed_handle_bulletin_post(hwfn
);
3553 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
3557 for_each_hwfn(cdev
, i
) {
3558 if (!cdev
->hwfns
[i
].iov_wq
)
3561 if (schedule_first
) {
3562 qed_schedule_iov(&cdev
->hwfns
[i
],
3563 QED_IOV_WQ_STOP_WQ_FLAG
);
3564 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
3567 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3568 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
3572 int qed_iov_wq_start(struct qed_dev
*cdev
)
3574 char name
[NAME_SIZE
];
3577 for_each_hwfn(cdev
, i
) {
3578 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3580 /* PFs needs a dedicated workqueue only if they support IOV.
3581 * VFs always require one.
3583 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
3586 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
3587 cdev
->pdev
->bus
->number
,
3588 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
3590 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
3591 if (!p_hwfn
->iov_wq
) {
3592 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
3597 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
3599 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
3605 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
3606 .configure
= &qed_sriov_configure
,
3607 .set_mac
= &qed_sriov_pf_set_mac
,
3608 .set_vlan
= &qed_sriov_pf_set_vlan
,
3609 .get_config
= &qed_get_vf_config
,
3610 .set_link_state
= &qed_set_vf_link_state
,
3611 .set_spoof
= &qed_spoof_configure
,
3612 .set_rate
= &qed_set_vf_rate
,