1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/etherdevice.h>
10 #include <linux/crc32.h>
11 #include <linux/qed/qed_iov_if.h>
15 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
24 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
,
25 u32 concrete_vfid
, u16 opaque_vfid
)
27 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
28 struct qed_spq_entry
*p_ent
= NULL
;
29 struct qed_sp_init_data init_data
;
33 memset(&init_data
, 0, sizeof(init_data
));
34 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
35 init_data
.opaque_fid
= opaque_vfid
;
36 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
38 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
39 COMMON_RAMROD_VF_START
,
40 PROTOCOLID_COMMON
, &init_data
);
44 p_ramrod
= &p_ent
->ramrod
.vf_start
;
46 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
47 p_ramrod
->opaque_fid
= cpu_to_le16(opaque_vfid
);
49 p_ramrod
->personality
= PERSONALITY_ETH
;
50 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
51 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MINOR
;
53 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
56 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
57 u32 concrete_vfid
, u16 opaque_vfid
)
59 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
60 struct qed_spq_entry
*p_ent
= NULL
;
61 struct qed_sp_init_data init_data
;
65 memset(&init_data
, 0, sizeof(init_data
));
66 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
67 init_data
.opaque_fid
= opaque_vfid
;
68 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
70 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
71 COMMON_RAMROD_VF_STOP
,
72 PROTOCOLID_COMMON
, &init_data
);
76 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
78 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
80 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
83 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
84 int rel_vf_id
, bool b_enabled_only
)
86 if (!p_hwfn
->pf_iov_info
) {
87 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
91 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
95 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
102 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
106 struct qed_vf_info
*vf
= NULL
;
108 if (!p_hwfn
->pf_iov_info
) {
109 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
113 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
, b_enabled_only
))
114 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
116 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
122 int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
123 int vfid
, struct qed_ptt
*p_ptt
)
125 struct qed_bulletin_content
*p_bulletin
;
126 int crc_size
= sizeof(p_bulletin
->crc
);
127 struct qed_dmae_params params
;
128 struct qed_vf_info
*p_vf
;
130 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
134 if (!p_vf
->vf_bulletin
)
137 p_bulletin
= p_vf
->bulletin
.p_virt
;
139 /* Increment bulletin board version and compute crc */
140 p_bulletin
->version
++;
141 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
142 p_vf
->bulletin
.size
- crc_size
);
144 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
145 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
146 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
148 /* propagate bulletin board via dmae to vm memory */
149 memset(¶ms
, 0, sizeof(params
));
150 params
.flags
= QED_DMAE_FLAG_VF_DST
;
151 params
.dst_vfid
= p_vf
->abs_vf_id
;
152 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
153 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
157 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
159 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
162 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
163 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
165 pci_read_config_word(cdev
->pdev
,
166 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
167 pci_read_config_word(cdev
->pdev
,
168 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
170 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
174 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
178 pci_read_config_word(cdev
->pdev
,
179 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
181 pci_read_config_word(cdev
->pdev
,
182 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
184 pci_read_config_word(cdev
->pdev
,
185 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
187 pci_read_config_dword(cdev
->pdev
,
188 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
190 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
192 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
196 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
202 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
204 /* Some sanity checks */
205 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
206 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
207 /* This can happen only due to a bug. In this case we set
208 * num_vfs to zero to avoid memory corruption in the code that
209 * assumes max number of vfs
212 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
222 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn
*p_hwfn
,
223 struct qed_ptt
*p_ptt
)
225 struct qed_igu_block
*p_sb
;
229 if (!p_hwfn
->hw_info
.p_igu_info
) {
231 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
235 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
237 p_sb
= &p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
[sb_id
];
238 if ((p_sb
->status
& QED_IGU_STATUS_FREE
) &&
239 !(p_sb
->status
& QED_IGU_STATUS_PF
)) {
240 val
= qed_rd(p_hwfn
, p_ptt
,
241 IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
242 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
243 qed_wr(p_hwfn
, p_ptt
,
244 IGU_REG_MAPPING_MEMORY
+ 4 * sb_id
, val
);
249 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
251 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
252 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
253 struct qed_bulletin_content
*p_bulletin_virt
;
254 dma_addr_t req_p
, rply_p
, bulletin_p
;
255 union pfvf_tlvs
*p_reply_virt_addr
;
256 union vfpf_tlvs
*p_req_virt_addr
;
259 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
261 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
262 req_p
= p_iov_info
->mbx_msg_phys_addr
;
263 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
264 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
265 p_bulletin_virt
= p_iov_info
->p_bulletins
;
266 bulletin_p
= p_iov_info
->bulletins_phys
;
267 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
269 "qed_iov_setup_vfdb called without allocating mem first\n");
273 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
274 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
277 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
278 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
279 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
280 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
282 vf
->state
= VF_STOPPED
;
285 vf
->bulletin
.phys
= idx
*
286 sizeof(struct qed_bulletin_content
) +
288 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
289 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
291 vf
->relative_vf_id
= idx
;
292 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
293 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
294 vf
->concrete_fid
= concrete
;
295 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
296 (vf
->abs_vf_id
<< 8);
297 vf
->vport_id
= idx
+ 1;
301 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
303 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
307 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
309 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
310 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
312 /* Allocate PF Mailbox buffer (per-VF) */
313 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
314 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
315 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
316 p_iov_info
->mbx_msg_size
,
317 &p_iov_info
->mbx_msg_phys_addr
,
322 /* Allocate PF Mailbox Reply buffer (per-VF) */
323 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
324 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
325 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
326 p_iov_info
->mbx_reply_size
,
327 &p_iov_info
->mbx_reply_phys_addr
,
332 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
334 p_v_addr
= &p_iov_info
->p_bulletins
;
335 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
336 p_iov_info
->bulletins_size
,
337 &p_iov_info
->bulletins_phys
,
344 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
345 p_iov_info
->mbx_msg_virt_addr
,
346 (u64
) p_iov_info
->mbx_msg_phys_addr
,
347 p_iov_info
->mbx_reply_virt_addr
,
348 (u64
) p_iov_info
->mbx_reply_phys_addr
,
349 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
354 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
356 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
358 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
359 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
360 p_iov_info
->mbx_msg_size
,
361 p_iov_info
->mbx_msg_virt_addr
,
362 p_iov_info
->mbx_msg_phys_addr
);
364 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
365 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
366 p_iov_info
->mbx_reply_size
,
367 p_iov_info
->mbx_reply_virt_addr
,
368 p_iov_info
->mbx_reply_phys_addr
);
370 if (p_iov_info
->p_bulletins
)
371 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
372 p_iov_info
->bulletins_size
,
373 p_iov_info
->p_bulletins
,
374 p_iov_info
->bulletins_phys
);
377 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
379 struct qed_pf_iov
*p_sriov
;
381 if (!IS_PF_SRIOV(p_hwfn
)) {
382 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
383 "No SR-IOV - no need for IOV db\n");
387 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
389 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_sriov'\n");
393 p_hwfn
->pf_iov_info
= p_sriov
;
395 return qed_iov_allocate_vfdb(p_hwfn
);
398 void qed_iov_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
400 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
403 qed_iov_setup_vfdb(p_hwfn
);
404 qed_iov_clear_vf_igu_blocks(p_hwfn
, p_ptt
);
407 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
409 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
410 qed_iov_free_vfdb(p_hwfn
);
411 kfree(p_hwfn
->pf_iov_info
);
415 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
417 kfree(cdev
->p_iov_info
);
418 cdev
->p_iov_info
= NULL
;
421 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
423 struct qed_dev
*cdev
= p_hwfn
->cdev
;
427 if (IS_VF(p_hwfn
->cdev
))
430 /* Learn the PCI configuration */
431 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
432 PCI_EXT_CAP_ID_SRIOV
);
434 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
438 /* Allocate a new struct for IOV information */
439 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
440 if (!cdev
->p_iov_info
) {
441 DP_NOTICE(p_hwfn
, "Can't support IOV due to lack of memory\n");
444 cdev
->p_iov_info
->pos
= pos
;
446 rc
= qed_iov_pci_cfg_info(cdev
);
450 /* We want PF IOV to be synonemous with the existance of p_iov_info;
451 * In case the capability is published but there are no VFs, simply
452 * de-allocate the struct.
454 if (!cdev
->p_iov_info
->total_vfs
) {
455 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
456 "IOV capabilities, but no VFs are published\n");
457 kfree(cdev
->p_iov_info
);
458 cdev
->p_iov_info
= NULL
;
462 /* Calculate the first VF index - this is a bit tricky; Basically,
463 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
464 * after the first engine's VFs.
466 cdev
->p_iov_info
->first_vf_in_pf
= p_hwfn
->cdev
->p_iov_info
->offset
+
467 p_hwfn
->abs_pf_id
- 16;
468 if (QED_PATH_ID(p_hwfn
))
469 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
471 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
472 "First VF in hwfn 0x%08x\n",
473 cdev
->p_iov_info
->first_vf_in_pf
);
478 static bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
480 /* Check PF supports sriov */
481 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
482 !IS_PF_SRIOV_ALLOC(p_hwfn
))
485 /* Check VF validity */
486 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true))
492 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
493 u16 rel_vf_id
, u8 to_disable
)
495 struct qed_vf_info
*vf
;
498 for_each_hwfn(cdev
, i
) {
499 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
501 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
505 vf
->to_disable
= to_disable
;
509 void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
513 if (!IS_QED_SRIOV(cdev
))
516 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
517 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
520 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
521 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
523 qed_wr(p_hwfn
, p_ptt
,
524 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
525 1 << (abs_vfid
& 0x1f));
528 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
529 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
533 /* Set VF masks and configuration - pretend */
534 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
536 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
539 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
541 /* iterate over all queues, clear sb consumer */
542 for (i
= 0; i
< vf
->num_sbs
; i
++)
543 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
545 vf
->opaque_fid
, true);
548 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
549 struct qed_ptt
*p_ptt
,
550 struct qed_vf_info
*vf
, bool enable
)
554 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
556 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
559 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
561 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
563 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
566 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
569 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
570 struct qed_ptt
*p_ptt
,
571 struct qed_vf_info
*vf
)
573 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
581 "Enable internal access for vf %x [abs %x]\n",
582 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
584 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
586 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
588 rc
= qed_mcp_config_vf_msix(p_hwfn
, p_ptt
, vf
->abs_vf_id
, vf
->num_sbs
);
592 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
594 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
595 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
597 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
598 p_hwfn
->hw_info
.hw_mode
);
601 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
603 if (vf
->state
!= VF_STOPPED
) {
604 DP_NOTICE(p_hwfn
, "VF[%02x] is already started\n",
610 rc
= qed_sp_vf_start(p_hwfn
, vf
->concrete_fid
, vf
->opaque_fid
);
612 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
620 * @brief qed_iov_config_perm_table - configure the permission
622 * In E4, queue zone permission table size is 320x9. There
623 * are 320 VF queues for single engine device (256 for dual
624 * engine device), and each entry has the following format:
631 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
632 struct qed_ptt
*p_ptt
,
633 struct qed_vf_info
*vf
, u8 enable
)
639 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
640 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
643 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
644 val
= enable
? (vf
->abs_vf_id
| (1 << 8)) : 0;
645 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
649 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
650 struct qed_ptt
*p_ptt
,
651 struct qed_vf_info
*vf
)
653 /* Reset vf in IGU - interrupts are still disabled */
654 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
656 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
658 /* Permission Table */
659 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
662 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
663 struct qed_ptt
*p_ptt
,
664 struct qed_vf_info
*vf
, u16 num_rx_queues
)
666 struct qed_igu_block
*igu_blocks
;
667 int qid
= 0, igu_id
= 0;
670 igu_blocks
= p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
;
672 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->free_blks
)
673 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->free_blks
;
674 p_hwfn
->hw_info
.p_igu_info
->free_blks
-= num_rx_queues
;
676 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
677 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
678 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
680 while ((qid
< num_rx_queues
) &&
681 (igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
))) {
682 if (igu_blocks
[igu_id
].status
& QED_IGU_STATUS_FREE
) {
683 struct cau_sb_entry sb_entry
;
685 vf
->igu_sbs
[qid
] = (u16
)igu_id
;
686 igu_blocks
[igu_id
].status
&= ~QED_IGU_STATUS_FREE
;
688 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
690 qed_wr(p_hwfn
, p_ptt
,
691 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
,
694 /* Configure igu sb in CAU which were marked valid */
695 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
698 qed_dmae_host2grc(p_hwfn
, p_ptt
,
699 (u64
)(uintptr_t)&sb_entry
,
700 CAU_REG_SB_VAR_MEMORY
+
701 igu_id
* sizeof(u64
), 2, 0);
707 vf
->num_sbs
= (u8
) num_rx_queues
;
712 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
713 struct qed_ptt
*p_ptt
,
714 struct qed_vf_info
*vf
)
716 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
720 /* Invalidate igu CAM lines and mark them as free */
721 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
722 igu_id
= vf
->igu_sbs
[idx
];
723 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
725 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
726 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
727 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
729 p_info
->igu_map
.igu_blocks
[igu_id
].status
|=
732 p_hwfn
->hw_info
.p_igu_info
->free_blks
++;
738 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
739 struct qed_ptt
*p_ptt
,
740 u16 rel_vf_id
, u16 num_rx_queues
)
742 u8 num_of_vf_avaiable_chains
= 0;
743 struct qed_vf_info
*vf
= NULL
;
748 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
750 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
755 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n", rel_vf_id
);
759 /* Limit number of queues according to number of CIDs */
760 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
763 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
764 vf
->relative_vf_id
, num_rx_queues
, (u16
) cids
);
765 num_rx_queues
= min_t(u16
, num_rx_queues
, ((u16
) cids
));
767 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
771 if (!num_of_vf_avaiable_chains
) {
772 DP_ERR(p_hwfn
, "no available igu sbs\n");
776 /* Choose queue number and index ranges */
777 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
778 vf
->num_txqs
= num_of_vf_avaiable_chains
;
780 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
781 u16 queue_id
= qed_int_queue_id_from_sb_id(p_hwfn
,
784 if (queue_id
> RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
786 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
787 vf
->relative_vf_id
, queue_id
);
791 /* CIDs are per-VF, so no problem having them 0-based. */
792 vf
->vf_queues
[i
].fw_rx_qid
= queue_id
;
793 vf
->vf_queues
[i
].fw_tx_qid
= queue_id
;
794 vf
->vf_queues
[i
].fw_cid
= i
;
796 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
797 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
798 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
], queue_id
, i
);
800 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
804 if (IS_LEAD_HWFN(p_hwfn
))
805 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
811 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
813 struct qed_mcp_link_params
*params
,
814 struct qed_mcp_link_state
*link
,
815 struct qed_mcp_link_capabilities
*p_caps
)
817 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
820 struct qed_bulletin_content
*p_bulletin
;
825 p_bulletin
= p_vf
->bulletin
.p_virt
;
826 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
827 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
828 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
829 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
830 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
831 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
832 p_bulletin
->req_loopback
= params
->loopback_mode
;
834 p_bulletin
->link_up
= link
->link_up
;
835 p_bulletin
->speed
= link
->speed
;
836 p_bulletin
->full_duplex
= link
->full_duplex
;
837 p_bulletin
->autoneg
= link
->an
;
838 p_bulletin
->autoneg_complete
= link
->an_complete
;
839 p_bulletin
->parallel_detection
= link
->parallel_detection
;
840 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
841 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
842 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
843 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
844 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
845 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
847 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
850 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
851 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
853 struct qed_mcp_link_capabilities caps
;
854 struct qed_mcp_link_params params
;
855 struct qed_mcp_link_state link
;
856 struct qed_vf_info
*vf
= NULL
;
859 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
861 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
865 if (vf
->bulletin
.p_virt
)
866 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
868 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
870 /* Get the link configuration back in bulletin so
871 * that when VFs are re-enabled they get the actual
872 * link configuration.
874 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
875 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
876 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
877 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
879 if (vf
->state
!= VF_STOPPED
) {
880 /* Stopping the VF */
881 rc
= qed_sp_vf_stop(p_hwfn
, vf
->concrete_fid
, vf
->opaque_fid
);
884 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
889 vf
->state
= VF_STOPPED
;
892 /* disablng interrupts and resetting permission table was done during
893 * vf-close, however, we could get here without going through vf_close
895 /* Disable Interrupts for VF */
896 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
898 /* Reset Permission table */
899 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
903 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
908 if (IS_LEAD_HWFN(p_hwfn
))
909 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
915 static bool qed_iov_tlv_supported(u16 tlvtype
)
917 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
920 /* place a given tlv on the tlv buffer, continuing current tlv list */
921 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
923 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
928 /* Offset should keep pointing to next TLV (the end of the last) */
931 /* Return a pointer to the start of the added tlv */
932 return *offset
- length
;
935 /* list the types and lengths of the tlvs on the buffer */
936 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
938 u16 i
= 1, total_length
= 0;
939 struct channel_tlv
*tlv
;
942 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
945 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
946 "TLV number %d: type %d, length %d\n",
947 i
, tlv
->type
, tlv
->length
);
949 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
952 /* Validate entry - protect against malicious VFs */
954 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
958 total_length
+= tlv
->length
;
960 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
961 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
969 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
970 struct qed_ptt
*p_ptt
,
971 struct qed_vf_info
*p_vf
,
972 u16 length
, u8 status
)
974 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
975 struct qed_dmae_params params
;
978 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
980 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
982 eng_vf_id
= p_vf
->abs_vf_id
;
984 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
985 params
.flags
= QED_DMAE_FLAG_VF_DST
;
986 params
.dst_vfid
= eng_vf_id
;
988 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
989 mbx
->req_virt
->first_tlv
.reply_address
+
991 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
994 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
995 mbx
->req_virt
->first_tlv
.reply_address
,
996 sizeof(u64
) / 4, ¶ms
);
999 GTT_BAR0_MAP_REG_USDM_RAM
+
1000 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1003 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1004 enum qed_iov_vport_update_flag flag
)
1007 case QED_IOV_VP_UPDATE_ACTIVATE
:
1008 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1009 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1010 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1011 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1012 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1013 case QED_IOV_VP_UPDATE_MCAST
:
1014 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1015 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1016 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1017 case QED_IOV_VP_UPDATE_RSS
:
1018 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1019 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1020 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1021 case QED_IOV_VP_UPDATE_SGE_TPA
:
1022 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1028 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1029 struct qed_vf_info
*p_vf
,
1030 struct qed_iov_vf_mbx
*p_mbx
,
1032 u16 tlvs_mask
, u16 tlvs_accepted
)
1034 struct pfvf_def_resp_tlv
*resp
;
1035 u16 size
, total_len
, i
;
1037 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1038 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1039 size
= sizeof(struct pfvf_def_resp_tlv
);
1042 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1044 /* Prepare response for all extended tlvs if they are found by PF */
1045 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1046 if (!(tlvs_mask
& (1 << i
)))
1049 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1050 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1052 if (tlvs_accepted
& (1 << i
))
1053 resp
->hdr
.status
= status
;
1055 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1059 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1060 p_vf
->relative_vf_id
,
1061 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1066 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1067 sizeof(struct channel_list_end_tlv
));
1072 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1073 struct qed_ptt
*p_ptt
,
1074 struct qed_vf_info
*vf_info
,
1075 u16 type
, u16 length
, u8 status
)
1077 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1079 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1081 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1082 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1083 sizeof(struct channel_list_end_tlv
));
1085 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1088 struct qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1090 bool b_enabled_only
)
1092 struct qed_vf_info
*vf
= NULL
;
1094 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1098 return &vf
->p_vf_info
;
1101 void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1103 struct qed_public_vf_info
*vf_info
;
1105 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1110 /* Clear the VF mac */
1111 memset(vf_info
->mac
, 0, ETH_ALEN
);
1114 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1115 struct qed_vf_info
*p_vf
)
1119 p_vf
->vf_bulletin
= 0;
1120 p_vf
->vport_instance
= 0;
1121 p_vf
->num_mac_filters
= 0;
1122 p_vf
->num_vlan_filters
= 0;
1123 p_vf
->configured_features
= 0;
1125 /* If VF previously requested less resources, go back to default */
1126 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1127 p_vf
->num_txqs
= p_vf
->num_sbs
;
1129 p_vf
->num_active_rxqs
= 0;
1131 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++)
1132 p_vf
->vf_queues
[i
].rxq_active
= 0;
1134 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1135 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1138 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1139 struct qed_ptt
*p_ptt
,
1140 struct qed_vf_info
*vf
)
1142 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1143 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1144 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1145 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1146 u8 i
, vfpf_status
= PFVF_STATUS_SUCCESS
;
1147 struct pf_vf_resc
*resc
= &resp
->resc
;
1149 /* Validate FW compatibility */
1150 if (req
->vfdev_info
.fw_major
!= FW_MAJOR_VERSION
||
1151 req
->vfdev_info
.fw_minor
!= FW_MINOR_VERSION
||
1152 req
->vfdev_info
.fw_revision
!= FW_REVISION_VERSION
||
1153 req
->vfdev_info
.fw_engineering
!= FW_ENGINEERING_VERSION
) {
1155 "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
1157 req
->vfdev_info
.fw_major
,
1158 req
->vfdev_info
.fw_minor
,
1159 req
->vfdev_info
.fw_revision
,
1160 req
->vfdev_info
.fw_engineering
,
1163 FW_REVISION_VERSION
, FW_ENGINEERING_VERSION
);
1164 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1168 /* On 100g PFs, prevent old VFs from loading */
1169 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1170 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1172 "VF[%d] is running an old driver that doesn't support 100g\n",
1174 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1178 memset(resp
, 0, sizeof(*resp
));
1180 /* Fill in vf info stuff */
1181 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1182 vf
->num_mac_filters
= 1;
1183 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
1185 vf
->vf_bulletin
= req
->bulletin_addr
;
1186 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1187 vf
->bulletin
.size
: req
->bulletin_size
;
1189 /* fill in pfdev info */
1190 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1191 pfdev_info
->db_size
= 0;
1192 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1194 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1195 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1196 if (p_hwfn
->cdev
->num_hwfns
> 1)
1197 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1199 pfdev_info
->stats_info
.mstats
.address
=
1200 PXP_VF_BAR0_START_MSDM_ZONE_B
+
1201 offsetof(struct mstorm_vf_zone
, non_trigger
.eth_queue_stat
);
1202 pfdev_info
->stats_info
.mstats
.len
=
1203 sizeof(struct eth_mstorm_per_queue_stat
);
1205 pfdev_info
->stats_info
.ustats
.address
=
1206 PXP_VF_BAR0_START_USDM_ZONE_B
+
1207 offsetof(struct ustorm_vf_zone
, non_trigger
.eth_queue_stat
);
1208 pfdev_info
->stats_info
.ustats
.len
=
1209 sizeof(struct eth_ustorm_per_queue_stat
);
1211 pfdev_info
->stats_info
.pstats
.address
=
1212 PXP_VF_BAR0_START_PSDM_ZONE_B
+
1213 offsetof(struct pstorm_vf_zone
, non_trigger
.eth_queue_stat
);
1214 pfdev_info
->stats_info
.pstats
.len
=
1215 sizeof(struct eth_pstorm_per_queue_stat
);
1217 pfdev_info
->stats_info
.tstats
.address
= 0;
1218 pfdev_info
->stats_info
.tstats
.len
= 0;
1220 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1222 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1223 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1224 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1225 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1226 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1227 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1229 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1230 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1232 resc
->num_rxqs
= vf
->num_rxqs
;
1233 resc
->num_txqs
= vf
->num_txqs
;
1234 resc
->num_sbs
= vf
->num_sbs
;
1235 for (i
= 0; i
< resc
->num_sbs
; i
++) {
1236 resc
->hw_sbs
[i
].hw_sb_id
= vf
->igu_sbs
[i
];
1237 resc
->hw_sbs
[i
].sb_qid
= 0;
1240 for (i
= 0; i
< resc
->num_rxqs
; i
++) {
1241 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[i
].fw_rx_qid
,
1242 (u16
*)&resc
->hw_qid
[i
]);
1243 resc
->cid
[i
] = vf
->vf_queues
[i
].fw_cid
;
1246 resc
->num_mac_filters
= min_t(u8
, vf
->num_mac_filters
,
1247 req
->resc_request
.num_mac_filters
);
1248 resc
->num_vlan_filters
= min_t(u8
, vf
->num_vlan_filters
,
1249 req
->resc_request
.num_vlan_filters
);
1251 /* This isn't really required as VF isn't limited, but some VFs might
1252 * actually test this value, so need to provide it.
1254 resc
->num_mc_filters
= req
->resc_request
.num_mc_filters
;
1256 /* Fill agreed size of bulletin board in response */
1257 resp
->bulletin_size
= vf
->bulletin
.size
;
1258 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1262 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1263 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1265 resp
->pfdev_info
.chip_num
,
1266 resp
->pfdev_info
.db_size
,
1267 resp
->pfdev_info
.indices_per_sb
,
1268 resp
->pfdev_info
.capabilities
,
1272 resc
->num_mac_filters
,
1273 resc
->num_vlan_filters
);
1274 vf
->state
= VF_ACQUIRED
;
1276 /* Prepare Response */
1278 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1279 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1282 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1283 struct qed_vf_info
*p_vf
, bool val
)
1285 struct qed_sp_vport_update_params params
;
1288 if (val
== p_vf
->spoof_chk
) {
1289 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1290 "Spoofchk value[%d] is already configured\n", val
);
1294 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1295 params
.opaque_fid
= p_vf
->opaque_fid
;
1296 params
.vport_id
= p_vf
->vport_id
;
1297 params
.update_anti_spoofing_en_flg
= 1;
1298 params
.anti_spoofing_en
= val
;
1300 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1302 p_vf
->spoof_chk
= val
;
1303 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1304 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1305 "Spoofchk val[%d] configured\n", val
);
1307 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1308 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1309 val
, p_vf
->relative_vf_id
);
1315 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1316 struct qed_vf_info
*p_vf
)
1318 struct qed_filter_ucast filter
;
1322 memset(&filter
, 0, sizeof(filter
));
1323 filter
.is_rx_filter
= 1;
1324 filter
.is_tx_filter
= 1;
1325 filter
.vport_to_add_to
= p_vf
->vport_id
;
1326 filter
.opcode
= QED_FILTER_ADD
;
1328 /* Reconfigure vlans */
1329 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1330 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1333 filter
.type
= QED_FILTER_VLAN
;
1334 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1337 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1338 filter
.vlan
, p_vf
->relative_vf_id
);
1339 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1342 QED_SPQ_MODE_CB
, NULL
);
1345 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1346 filter
.vlan
, p_vf
->relative_vf_id
);
1355 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1356 struct qed_vf_info
*p_vf
, u64 events
)
1360 if ((events
& (1 << VLAN_ADDR_FORCED
)) &&
1361 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1362 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1367 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1368 struct qed_vf_info
*p_vf
, u64 events
)
1371 struct qed_filter_ucast filter
;
1373 if (!p_vf
->vport_instance
)
1376 if (events
& (1 << MAC_ADDR_FORCED
)) {
1377 /* Since there's no way [currently] of removing the MAC,
1378 * we can always assume this means we need to force it.
1380 memset(&filter
, 0, sizeof(filter
));
1381 filter
.type
= QED_FILTER_MAC
;
1382 filter
.opcode
= QED_FILTER_REPLACE
;
1383 filter
.is_rx_filter
= 1;
1384 filter
.is_tx_filter
= 1;
1385 filter
.vport_to_add_to
= p_vf
->vport_id
;
1386 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1388 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1389 &filter
, QED_SPQ_MODE_CB
, NULL
);
1392 "PF failed to configure MAC for VF\n");
1396 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1399 if (events
& (1 << VLAN_ADDR_FORCED
)) {
1400 struct qed_sp_vport_update_params vport_update
;
1404 memset(&filter
, 0, sizeof(filter
));
1405 filter
.type
= QED_FILTER_VLAN
;
1406 filter
.is_rx_filter
= 1;
1407 filter
.is_tx_filter
= 1;
1408 filter
.vport_to_add_to
= p_vf
->vport_id
;
1409 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1410 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1413 /* Send the ramrod */
1414 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1415 &filter
, QED_SPQ_MODE_CB
, NULL
);
1418 "PF failed to configure VLAN for VF\n");
1422 /* Update the default-vlan & silent vlan stripping */
1423 memset(&vport_update
, 0, sizeof(vport_update
));
1424 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1425 vport_update
.vport_id
= p_vf
->vport_id
;
1426 vport_update
.update_default_vlan_enable_flg
= 1;
1427 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1428 vport_update
.update_default_vlan_flg
= 1;
1429 vport_update
.default_vlan
= filter
.vlan
;
1431 vport_update
.update_inner_vlan_removal_flg
= 1;
1432 removal
= filter
.vlan
? 1
1433 : p_vf
->shadow_config
.inner_vlan_removal
;
1434 vport_update
.inner_vlan_removal_flg
= removal
;
1435 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1436 rc
= qed_sp_vport_update(p_hwfn
,
1438 QED_SPQ_MODE_EBLOCK
, NULL
);
1441 "PF failed to configure VF vport for vlan\n");
1445 /* Update all the Rx queues */
1446 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1449 if (!p_vf
->vf_queues
[i
].rxq_active
)
1452 qid
= p_vf
->vf_queues
[i
].fw_rx_qid
;
1454 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, qid
,
1456 QED_SPQ_MODE_EBLOCK
,
1460 "Failed to send Rx update fo queue[0x%04x]\n",
1467 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1469 p_vf
->configured_features
&= ~(1 << VLAN_ADDR_FORCED
);
1472 /* If forced features are terminated, we need to configure the shadow
1473 * configuration back again.
1476 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1481 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1482 struct qed_ptt
*p_ptt
,
1483 struct qed_vf_info
*vf
)
1485 struct qed_sp_vport_start_params params
= { 0 };
1486 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1487 struct vfpf_vport_start_tlv
*start
;
1488 u8 status
= PFVF_STATUS_SUCCESS
;
1489 struct qed_vf_info
*vf_info
;
1494 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1496 DP_NOTICE(p_hwfn
->cdev
,
1497 "Failed to get VF info, invalid vfid [%d]\n",
1498 vf
->relative_vf_id
);
1502 vf
->state
= VF_ENABLED
;
1503 start
= &mbx
->req_virt
->start_vport
;
1505 /* Initialize Status block in CAU */
1506 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1507 if (!start
->sb_addr
[sb_id
]) {
1508 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1509 "VF[%d] did not fill the address of SB %d\n",
1510 vf
->relative_vf_id
, sb_id
);
1514 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1515 start
->sb_addr
[sb_id
],
1519 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1521 vf
->mtu
= start
->mtu
;
1522 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1524 /* Take into consideration configuration forced by hypervisor;
1525 * If none is configured, use the supplied VF values [for old
1526 * vfs that would still be fine, since they passed '0' as padding].
1528 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1529 if (!(*p_bitmap
& (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1530 u8 vf_req
= start
->only_untagged
;
1532 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1533 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1536 params
.tpa_mode
= start
->tpa_mode
;
1537 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1538 params
.tx_switching
= true;
1540 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1541 params
.drop_ttl0
= false;
1542 params
.concrete_fid
= vf
->concrete_fid
;
1543 params
.opaque_fid
= vf
->opaque_fid
;
1544 params
.vport_id
= vf
->vport_id
;
1545 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1546 params
.mtu
= vf
->mtu
;
1548 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1551 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1552 status
= PFVF_STATUS_FAILURE
;
1554 vf
->vport_instance
++;
1556 /* Force configuration if needed on the newly opened vport */
1557 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1559 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1561 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1562 sizeof(struct pfvf_def_resp_tlv
), status
);
1565 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1566 struct qed_ptt
*p_ptt
,
1567 struct qed_vf_info
*vf
)
1569 u8 status
= PFVF_STATUS_SUCCESS
;
1572 vf
->vport_instance
--;
1573 vf
->spoof_chk
= false;
1575 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1577 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1579 status
= PFVF_STATUS_FAILURE
;
1582 /* Forget the configuration on the vport */
1583 vf
->configured_features
= 0;
1584 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
1586 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
1587 sizeof(struct pfvf_def_resp_tlv
), status
);
1590 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
1591 struct qed_ptt
*p_ptt
,
1592 struct qed_vf_info
*vf
, u8 status
)
1594 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1595 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1596 struct vfpf_start_rxq_tlv
*req
;
1598 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1600 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
1602 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1603 sizeof(struct channel_list_end_tlv
));
1605 /* Update the TLV with the response */
1606 if (status
== PFVF_STATUS_SUCCESS
) {
1607 req
= &mbx
->req_virt
->start_rxq
;
1608 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1609 offsetof(struct mstorm_vf_zone
,
1610 non_trigger
.eth_rx_queue_producers
) +
1611 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
1614 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, sizeof(*p_tlv
), status
);
1617 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
1618 struct qed_ptt
*p_ptt
,
1619 struct qed_vf_info
*vf
)
1621 struct qed_queue_start_common_params params
;
1622 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1623 u8 status
= PFVF_STATUS_SUCCESS
;
1624 struct vfpf_start_rxq_tlv
*req
;
1627 memset(¶ms
, 0, sizeof(params
));
1628 req
= &mbx
->req_virt
->start_rxq
;
1629 params
.queue_id
= vf
->vf_queues
[req
->rx_qid
].fw_rx_qid
;
1630 params
.vf_qid
= req
->rx_qid
;
1631 params
.vport_id
= vf
->vport_id
;
1632 params
.sb
= req
->hw_sb
;
1633 params
.sb_idx
= req
->sb_index
;
1635 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
, vf
->opaque_fid
,
1636 vf
->vf_queues
[req
->rx_qid
].fw_cid
,
1638 vf
->abs_vf_id
+ 0x10,
1641 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
1644 status
= PFVF_STATUS_FAILURE
;
1646 vf
->vf_queues
[req
->rx_qid
].rxq_active
= true;
1647 vf
->num_active_rxqs
++;
1650 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
);
1653 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
1654 struct qed_ptt
*p_ptt
,
1655 struct qed_vf_info
*vf
)
1657 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1658 struct qed_queue_start_common_params params
;
1659 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1660 union qed_qm_pq_params pq_params
;
1661 u8 status
= PFVF_STATUS_SUCCESS
;
1662 struct vfpf_start_txq_tlv
*req
;
1665 /* Prepare the parameters which would choose the right PQ */
1666 memset(&pq_params
, 0, sizeof(pq_params
));
1667 pq_params
.eth
.is_vf
= 1;
1668 pq_params
.eth
.vf_id
= vf
->relative_vf_id
;
1670 memset(¶ms
, 0, sizeof(params
));
1671 req
= &mbx
->req_virt
->start_txq
;
1672 params
.queue_id
= vf
->vf_queues
[req
->tx_qid
].fw_tx_qid
;
1673 params
.vport_id
= vf
->vport_id
;
1674 params
.sb
= req
->hw_sb
;
1675 params
.sb_idx
= req
->sb_index
;
1677 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
1679 vf
->vf_queues
[req
->tx_qid
].fw_cid
,
1681 vf
->abs_vf_id
+ 0x10,
1683 req
->pbl_size
, &pq_params
);
1686 status
= PFVF_STATUS_FAILURE
;
1688 vf
->vf_queues
[req
->tx_qid
].txq_active
= true;
1690 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_START_TXQ
,
1694 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1695 struct qed_vf_info
*vf
,
1696 u16 rxq_id
, u8 num_rxqs
, bool cqe_completion
)
1701 if (rxq_id
+ num_rxqs
> ARRAY_SIZE(vf
->vf_queues
))
1704 for (qid
= rxq_id
; qid
< rxq_id
+ num_rxqs
; qid
++) {
1705 if (vf
->vf_queues
[qid
].rxq_active
) {
1706 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1714 vf
->vf_queues
[qid
].rxq_active
= false;
1715 vf
->num_active_rxqs
--;
1721 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
1722 struct qed_vf_info
*vf
, u16 txq_id
, u8 num_txqs
)
1727 if (txq_id
+ num_txqs
> ARRAY_SIZE(vf
->vf_queues
))
1730 for (qid
= txq_id
; qid
< txq_id
+ num_txqs
; qid
++) {
1731 if (vf
->vf_queues
[qid
].txq_active
) {
1732 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1739 vf
->vf_queues
[qid
].txq_active
= false;
1744 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1745 struct qed_ptt
*p_ptt
,
1746 struct qed_vf_info
*vf
)
1748 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1749 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1750 u8 status
= PFVF_STATUS_SUCCESS
;
1751 struct vfpf_stop_rxqs_tlv
*req
;
1754 /* We give the option of starting from qid != 0, in this case we
1755 * need to make sure that qid + num_qs doesn't exceed the actual
1756 * amount of queues that exist.
1758 req
= &mbx
->req_virt
->stop_rxqs
;
1759 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
1760 req
->num_rxqs
, req
->cqe_completion
);
1762 status
= PFVF_STATUS_FAILURE
;
1764 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
1768 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
1769 struct qed_ptt
*p_ptt
,
1770 struct qed_vf_info
*vf
)
1772 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1773 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1774 u8 status
= PFVF_STATUS_SUCCESS
;
1775 struct vfpf_stop_txqs_tlv
*req
;
1778 /* We give the option of starting from qid != 0, in this case we
1779 * need to make sure that qid + num_qs doesn't exceed the actual
1780 * amount of queues that exist.
1782 req
= &mbx
->req_virt
->stop_txqs
;
1783 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, req
->num_txqs
);
1785 status
= PFVF_STATUS_FAILURE
;
1787 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
1791 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
1792 struct qed_ptt
*p_ptt
,
1793 struct qed_vf_info
*vf
)
1795 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1796 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1797 struct vfpf_update_rxq_tlv
*req
;
1798 u8 status
= PFVF_STATUS_SUCCESS
;
1799 u8 complete_event_flg
;
1800 u8 complete_cqe_flg
;
1805 req
= &mbx
->req_virt
->update_rxq
;
1806 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
1807 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
1809 for (i
= 0; i
< req
->num_rxqs
; i
++) {
1810 qid
= req
->rx_qid
+ i
;
1812 if (!vf
->vf_queues
[qid
].rxq_active
) {
1813 DP_NOTICE(p_hwfn
, "VF rx_qid = %d isn`t active!\n",
1815 status
= PFVF_STATUS_FAILURE
;
1819 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1820 vf
->vf_queues
[qid
].fw_rx_qid
,
1824 QED_SPQ_MODE_EBLOCK
, NULL
);
1827 status
= PFVF_STATUS_FAILURE
;
1832 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
1836 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
1837 void *p_tlvs_list
, u16 req_type
)
1839 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
1843 if (!p_tlv
->length
) {
1844 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
1848 if (p_tlv
->type
== req_type
) {
1849 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1850 "Extended tlv type %d, length %d found\n",
1851 p_tlv
->type
, p_tlv
->length
);
1855 len
+= p_tlv
->length
;
1856 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
1858 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
1859 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
1862 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
1868 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
1869 struct qed_sp_vport_update_params
*p_data
,
1870 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1872 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1873 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1875 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
1876 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1880 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
1881 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
1882 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
1883 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
1884 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
1888 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
1889 struct qed_sp_vport_update_params
*p_data
,
1890 struct qed_vf_info
*p_vf
,
1891 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1893 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
1894 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1896 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
1897 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1901 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
1903 /* Ignore the VF request if we're forcing a vlan */
1904 if (!(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
))) {
1905 p_data
->update_inner_vlan_removal_flg
= 1;
1906 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
1909 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
1913 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
1914 struct qed_sp_vport_update_params
*p_data
,
1915 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1917 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1918 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1920 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
1921 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
1923 if (!p_tx_switch_tlv
)
1926 p_data
->update_tx_switching_flg
= 1;
1927 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
1928 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
1932 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
1933 struct qed_sp_vport_update_params
*p_data
,
1934 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1936 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1937 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1939 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
1940 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1944 p_data
->update_approx_mcast_flg
= 1;
1945 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
1946 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1947 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
1951 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
1952 struct qed_sp_vport_update_params
*p_data
,
1953 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1955 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
1956 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1957 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1959 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
1960 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1964 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
1965 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
1966 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
1967 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
1968 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
1972 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
1973 struct qed_sp_vport_update_params
*p_data
,
1974 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1976 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
1977 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1979 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
1980 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
1982 if (!p_accept_any_vlan
)
1985 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
1986 p_data
->update_accept_any_vlan_flg
=
1987 p_accept_any_vlan
->update_accept_any_vlan_flg
;
1988 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
1992 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
1993 struct qed_vf_info
*vf
,
1994 struct qed_sp_vport_update_params
*p_data
,
1995 struct qed_rss_params
*p_rss
,
1996 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1998 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
1999 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2000 u16 i
, q_idx
, max_q_idx
;
2003 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2004 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2006 p_data
->rss_params
= NULL
;
2010 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2012 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2013 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2014 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2015 VFPF_UPDATE_RSS_CAPS_FLAG
);
2016 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2017 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2018 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2019 VFPF_UPDATE_RSS_KEY_FLAG
);
2021 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2022 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2023 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2024 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2025 memcpy(p_rss
->rss_ind_table
, p_rss_tlv
->rss_ind_table
,
2026 sizeof(p_rss
->rss_ind_table
));
2027 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2029 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2030 (1 << p_rss_tlv
->rss_table_size_log
));
2032 max_q_idx
= ARRAY_SIZE(vf
->vf_queues
);
2034 for (i
= 0; i
< table_size
; i
++) {
2035 u16 index
= vf
->vf_queues
[0].fw_rx_qid
;
2037 q_idx
= p_rss
->rss_ind_table
[i
];
2038 if (q_idx
>= max_q_idx
)
2040 "rss_ind_table[%d] = %d, rxq is out of range\n",
2042 else if (!vf
->vf_queues
[q_idx
].rxq_active
)
2044 "rss_ind_table[%d] = %d, rxq is not active\n",
2047 index
= vf
->vf_queues
[q_idx
].fw_rx_qid
;
2048 p_rss
->rss_ind_table
[i
] = index
;
2051 p_data
->rss_params
= p_rss
;
2052 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2056 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2057 struct qed_vf_info
*vf
,
2058 struct qed_sp_vport_update_params
*p_data
,
2059 struct qed_sge_tpa_params
*p_sge_tpa
,
2060 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2062 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2063 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2065 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2066 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2068 if (!p_sge_tpa_tlv
) {
2069 p_data
->sge_tpa_params
= NULL
;
2073 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2075 p_sge_tpa
->update_tpa_en_flg
=
2076 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2077 p_sge_tpa
->update_tpa_param_flg
=
2078 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2079 VFPF_UPDATE_TPA_PARAM_FLAG
);
2081 p_sge_tpa
->tpa_ipv4_en_flg
=
2082 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2083 p_sge_tpa
->tpa_ipv6_en_flg
=
2084 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2085 p_sge_tpa
->tpa_pkt_split_flg
=
2086 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2087 p_sge_tpa
->tpa_hdr_data_split_flg
=
2088 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2089 p_sge_tpa
->tpa_gro_consistent_flg
=
2090 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2092 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2093 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2094 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2095 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2096 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2098 p_data
->sge_tpa_params
= p_sge_tpa
;
2100 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2103 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
2104 struct qed_ptt
*p_ptt
,
2105 struct qed_vf_info
*vf
)
2107 struct qed_sp_vport_update_params params
;
2108 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2109 struct qed_sge_tpa_params sge_tpa_params
;
2110 struct qed_rss_params rss_params
;
2111 u8 status
= PFVF_STATUS_SUCCESS
;
2116 memset(¶ms
, 0, sizeof(params
));
2117 params
.opaque_fid
= vf
->opaque_fid
;
2118 params
.vport_id
= vf
->vport_id
;
2119 params
.rss_params
= NULL
;
2121 /* Search for extended tlvs list and update values
2122 * from VF in struct qed_sp_vport_update_params.
2124 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2125 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
2126 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2127 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2128 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2129 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, &rss_params
,
2131 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2132 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
2133 &sge_tpa_params
, mbx
, &tlvs_mask
);
2135 /* Just log a message if there is no single extended tlv in buffer.
2136 * When all features of vport update ramrod would be requested by VF
2137 * as extended TLVs in buffer then an error can be returned in response
2138 * if there is no extended TLV present in buffer.
2142 "No feature tlvs found for vport update\n");
2143 status
= PFVF_STATUS_NOT_SUPPORTED
;
2147 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
2150 status
= PFVF_STATUS_FAILURE
;
2153 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
2154 tlvs_mask
, tlvs_mask
);
2155 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2158 static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
2159 struct qed_vf_info
*p_vf
,
2160 struct qed_filter_ucast
*p_params
)
2164 if (p_params
->type
== QED_FILTER_MAC
)
2167 /* First remove entries and then add new ones */
2168 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2169 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2170 if (p_vf
->shadow_config
.vlans
[i
].used
&&
2171 p_vf
->shadow_config
.vlans
[i
].vid
==
2173 p_vf
->shadow_config
.vlans
[i
].used
= false;
2176 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2179 "VF [%d] - Tries to remove a non-existing vlan\n",
2180 p_vf
->relative_vf_id
);
2183 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2184 p_params
->opcode
== QED_FILTER_FLUSH
) {
2185 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2186 p_vf
->shadow_config
.vlans
[i
].used
= false;
2189 /* In forced mode, we're willing to remove entries - but we don't add
2192 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
))
2195 if (p_params
->opcode
== QED_FILTER_ADD
||
2196 p_params
->opcode
== QED_FILTER_REPLACE
) {
2197 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
2198 if (p_vf
->shadow_config
.vlans
[i
].used
)
2201 p_vf
->shadow_config
.vlans
[i
].used
= true;
2202 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
2206 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2209 "VF [%d] - Tries to configure more than %d vlan filters\n",
2210 p_vf
->relative_vf_id
,
2211 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
2219 int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
2220 int vfid
, struct qed_filter_ucast
*params
)
2222 struct qed_public_vf_info
*vf
;
2224 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
2228 /* No real decision to make; Store the configured MAC */
2229 if (params
->type
== QED_FILTER_MAC
||
2230 params
->type
== QED_FILTER_MAC_VLAN
)
2231 ether_addr_copy(vf
->mac
, params
->mac
);
2236 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
2237 struct qed_ptt
*p_ptt
,
2238 struct qed_vf_info
*vf
)
2240 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
2241 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2242 struct vfpf_ucast_filter_tlv
*req
;
2243 u8 status
= PFVF_STATUS_SUCCESS
;
2244 struct qed_filter_ucast params
;
2247 /* Prepare the unicast filter params */
2248 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
2249 req
= &mbx
->req_virt
->ucast_filter
;
2250 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
2251 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
2253 params
.is_rx_filter
= 1;
2254 params
.is_tx_filter
= 1;
2255 params
.vport_to_remove_from
= vf
->vport_id
;
2256 params
.vport_to_add_to
= vf
->vport_id
;
2257 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
2258 params
.vlan
= req
->vlan
;
2262 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2263 vf
->abs_vf_id
, params
.opcode
, params
.type
,
2264 params
.is_rx_filter
? "RX" : "",
2265 params
.is_tx_filter
? "TX" : "",
2266 params
.vport_to_add_to
,
2267 params
.mac
[0], params
.mac
[1],
2268 params
.mac
[2], params
.mac
[3],
2269 params
.mac
[4], params
.mac
[5], params
.vlan
);
2271 if (!vf
->vport_instance
) {
2274 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2276 status
= PFVF_STATUS_FAILURE
;
2280 /* Update shadow copy of the VF configuration */
2281 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
2282 status
= PFVF_STATUS_FAILURE
;
2286 /* Determine if the unicast filtering is acceptible by PF */
2287 if ((p_bulletin
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)) &&
2288 (params
.type
== QED_FILTER_VLAN
||
2289 params
.type
== QED_FILTER_MAC_VLAN
)) {
2290 /* Once VLAN is forced or PVID is set, do not allow
2291 * to add/replace any further VLANs.
2293 if (params
.opcode
== QED_FILTER_ADD
||
2294 params
.opcode
== QED_FILTER_REPLACE
)
2295 status
= PFVF_STATUS_FORCED
;
2299 if ((p_bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) &&
2300 (params
.type
== QED_FILTER_MAC
||
2301 params
.type
== QED_FILTER_MAC_VLAN
)) {
2302 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
2303 (params
.opcode
!= QED_FILTER_ADD
&&
2304 params
.opcode
!= QED_FILTER_REPLACE
))
2305 status
= PFVF_STATUS_FORCED
;
2309 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
2311 status
= PFVF_STATUS_FAILURE
;
2315 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
2316 QED_SPQ_MODE_CB
, NULL
);
2318 status
= PFVF_STATUS_FAILURE
;
2321 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
2322 sizeof(struct pfvf_def_resp_tlv
), status
);
2325 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
2326 struct qed_ptt
*p_ptt
,
2327 struct qed_vf_info
*vf
)
2332 for (i
= 0; i
< vf
->num_sbs
; i
++)
2333 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2335 vf
->opaque_fid
, false);
2337 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
2338 sizeof(struct pfvf_def_resp_tlv
),
2339 PFVF_STATUS_SUCCESS
);
2342 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
2343 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
2345 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2346 u8 status
= PFVF_STATUS_SUCCESS
;
2348 /* Disable Interrupts for VF */
2349 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
2351 /* Reset Permission table */
2352 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
2354 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
2358 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
2359 struct qed_ptt
*p_ptt
,
2360 struct qed_vf_info
*p_vf
)
2362 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2364 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2366 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
2367 length
, PFVF_STATUS_SUCCESS
);
2371 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
2372 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2377 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
2379 for (cnt
= 0; cnt
< 50; cnt
++) {
2380 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
2385 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
2389 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2390 p_vf
->abs_vf_id
, val
);
2398 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
2399 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2401 u32 cons
[MAX_NUM_VOQS
], distance
[MAX_NUM_VOQS
];
2404 /* Read initial consumers & producers */
2405 for (i
= 0; i
< MAX_NUM_VOQS
; i
++) {
2408 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
2409 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2411 prod
= qed_rd(p_hwfn
, p_ptt
,
2412 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
2414 distance
[i
] = prod
- cons
[i
];
2417 /* Wait for consumers to pass the producers */
2419 for (cnt
= 0; cnt
< 50; cnt
++) {
2420 for (; i
< MAX_NUM_VOQS
; i
++) {
2423 tmp
= qed_rd(p_hwfn
, p_ptt
,
2424 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2426 if (distance
[i
] > tmp
- cons
[i
])
2430 if (i
== MAX_NUM_VOQS
)
2437 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
2438 p_vf
->abs_vf_id
, i
);
2445 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
2446 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2450 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
2454 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
2462 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
2463 struct qed_ptt
*p_ptt
,
2464 u16 rel_vf_id
, u32
*ack_vfs
)
2466 struct qed_vf_info
*p_vf
;
2469 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
2473 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
2474 (1ULL << (rel_vf_id
% 64))) {
2475 u16 vfid
= p_vf
->abs_vf_id
;
2477 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2478 "VF[%d] - Handling FLR\n", vfid
);
2480 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2482 /* If VF isn't active, no need for anything but SW */
2486 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
2490 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
2492 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
2496 /* VF_STOPPED has to be set only after final cleanup
2497 * but prior to re-enabling the VF.
2499 p_vf
->state
= VF_STOPPED
;
2501 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
2503 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
2508 /* Mark VF for ack and clean pending state */
2509 if (p_vf
->state
== VF_RESET
)
2510 p_vf
->state
= VF_STOPPED
;
2511 ack_vfs
[vfid
/ 32] |= (1 << (vfid
% 32));
2512 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
2513 ~(1ULL << (rel_vf_id
% 64));
2514 p_hwfn
->pf_iov_info
->pending_events
[rel_vf_id
/ 64] &=
2515 ~(1ULL << (rel_vf_id
% 64));
2521 int qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2523 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
2527 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
2529 /* Since BRB <-> PRS interface can't be tested as part of the flr
2530 * polling due to HW limitations, simply sleep a bit. And since
2531 * there's no need to wait per-vf, do it before looping.
2535 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
2536 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
2538 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
2542 int qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
2546 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
2547 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
2548 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2549 "[%08x,...,%08x]: %08x\n",
2550 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
2552 if (!p_hwfn
->cdev
->p_iov_info
) {
2553 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
2558 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
2559 struct qed_vf_info
*p_vf
;
2562 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
2566 vfid
= p_vf
->abs_vf_id
;
2567 if ((1 << (vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
2568 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
2569 u16 rel_vf_id
= p_vf
->relative_vf_id
;
2571 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2572 "VF[%d] [rel %d] got FLR-ed\n",
2575 p_vf
->state
= VF_RESET
;
2577 /* No need to lock here, since pending_flr should
2578 * only change here and before ACKing MFw. Since
2579 * MFW will not trigger an additional attention for
2580 * VF flr until ACKs, we're safe.
2582 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
2590 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
2592 struct qed_mcp_link_params
*p_params
,
2593 struct qed_mcp_link_state
*p_link
,
2594 struct qed_mcp_link_capabilities
*p_caps
)
2596 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
2599 struct qed_bulletin_content
*p_bulletin
;
2604 p_bulletin
= p_vf
->bulletin
.p_virt
;
2607 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
2609 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
2611 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
2614 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
2615 struct qed_ptt
*p_ptt
, int vfid
)
2617 struct qed_iov_vf_mbx
*mbx
;
2618 struct qed_vf_info
*p_vf
;
2621 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2625 mbx
= &p_vf
->vf_mbx
;
2627 /* qed_iov_process_mbx_request */
2630 "qed_iov_process_mbx_req vfid %d\n", p_vf
->abs_vf_id
);
2632 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
2634 /* check if tlv type is known */
2635 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
2636 switch (mbx
->first_tlv
.tl
.type
) {
2637 case CHANNEL_TLV_ACQUIRE
:
2638 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
2640 case CHANNEL_TLV_VPORT_START
:
2641 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
2643 case CHANNEL_TLV_VPORT_TEARDOWN
:
2644 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
2646 case CHANNEL_TLV_START_RXQ
:
2647 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
2649 case CHANNEL_TLV_START_TXQ
:
2650 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
2652 case CHANNEL_TLV_STOP_RXQS
:
2653 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
2655 case CHANNEL_TLV_STOP_TXQS
:
2656 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
2658 case CHANNEL_TLV_UPDATE_RXQ
:
2659 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
2661 case CHANNEL_TLV_VPORT_UPDATE
:
2662 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
2664 case CHANNEL_TLV_UCAST_FILTER
:
2665 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
2667 case CHANNEL_TLV_CLOSE
:
2668 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
2670 case CHANNEL_TLV_INT_CLEANUP
:
2671 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
2673 case CHANNEL_TLV_RELEASE
:
2674 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
2678 /* unknown TLV - this may belong to a VF driver from the future
2679 * - a version written after this PF driver was written, which
2680 * supports features unknown as of yet. Too bad since we don't
2681 * support them. Or this may be because someone wrote a crappy
2682 * VF driver and is sending garbage over the channel.
2685 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
2686 mbx
->first_tlv
.tl
.type
, mbx
->first_tlv
.tl
.length
);
2688 for (i
= 0; i
< 20; i
++) {
2692 mbx
->req_virt
->tlv_buf_size
.tlv_buffer
[i
]);
2697 void qed_iov_pf_add_pending_events(struct qed_hwfn
*p_hwfn
, u8 vfid
)
2699 u64 add_bit
= 1ULL << (vfid
% 64);
2701 p_hwfn
->pf_iov_info
->pending_events
[vfid
/ 64] |= add_bit
;
2704 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn
*p_hwfn
,
2707 u64
*p_pending_events
= p_hwfn
->pf_iov_info
->pending_events
;
2709 memcpy(events
, p_pending_events
, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
2710 memset(p_pending_events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
2713 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
2714 u16 abs_vfid
, struct regpair
*vf_msg
)
2716 u8 min
= (u8
)p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
2717 struct qed_vf_info
*p_vf
;
2719 if (!qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
)) {
2722 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
2726 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
2728 /* List the physical address of the request so that handler
2729 * could later on copy the message from it.
2731 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
2733 /* Mark the event and schedule the workqueue */
2734 qed_iov_pf_add_pending_events(p_hwfn
, p_vf
->relative_vf_id
);
2735 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
2740 int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
2741 u8 opcode
, __le16 echo
, union event_ring_data
*data
)
2744 case COMMON_EVENT_VF_PF_CHANNEL
:
2745 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
2746 &data
->vf_pf_channel
.msg_addr
);
2748 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
2754 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
2756 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2762 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
2763 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true))
2770 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
2773 struct qed_dmae_params params
;
2774 struct qed_vf_info
*vf_info
;
2776 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2780 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
2781 params
.flags
= QED_DMAE_FLAG_VF_SRC
| QED_DMAE_FLAG_COMPLETION_DST
;
2782 params
.src_vfid
= vf_info
->abs_vf_id
;
2784 if (qed_dmae_host2host(p_hwfn
, ptt
,
2785 vf_info
->vf_mbx
.pending_req
,
2786 vf_info
->vf_mbx
.req_phys
,
2787 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
2788 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2789 "Failed to copy message from VF 0x%02x\n", vfid
);
2797 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
2800 struct qed_vf_info
*vf_info
;
2803 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
2805 DP_NOTICE(p_hwfn
->cdev
,
2806 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
2810 feature
= 1 << MAC_ADDR_FORCED
;
2811 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
2813 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
2814 /* Forced MAC will disable MAC_ADDR */
2815 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
2816 ~(1 << VFPF_BULLETIN_MAC_ADDR
);
2818 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
2821 void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
2824 struct qed_vf_info
*vf_info
;
2827 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2829 DP_NOTICE(p_hwfn
->cdev
,
2830 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
2834 feature
= 1 << VLAN_ADDR_FORCED
;
2835 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
2837 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
2839 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
2841 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
2844 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
2846 struct qed_vf_info
*p_vf_info
;
2848 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2852 return !!p_vf_info
->vport_instance
;
2855 bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
2857 struct qed_vf_info
*p_vf_info
;
2859 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2863 return p_vf_info
->state
== VF_STOPPED
;
2866 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
2868 struct qed_vf_info
*vf_info
;
2870 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2874 return vf_info
->spoof_chk
;
2877 int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
2879 struct qed_vf_info
*vf
;
2882 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
2884 "SR-IOV sanity check failed, can't set spoofchk\n");
2888 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2892 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
2893 /* After VF VPORT start PF will configure spoof check */
2894 vf
->req_spoofchk_val
= val
;
2899 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
2905 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
2908 struct qed_vf_info
*p_vf
;
2910 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
2911 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
2914 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
2917 return p_vf
->bulletin
.p_virt
->mac
;
2920 u16
qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
2922 struct qed_vf_info
*p_vf
;
2924 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
2925 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
2928 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)))
2931 return p_vf
->bulletin
.p_virt
->pvid
;
2934 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
2935 struct qed_ptt
*p_ptt
, int vfid
, int val
)
2937 struct qed_vf_info
*vf
;
2941 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
2945 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
2949 return qed_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
);
2952 int qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
2954 struct qed_vf_info
*vf
;
2958 for_each_hwfn(cdev
, i
) {
2959 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2961 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
2963 "SR-IOV sanity check failed, can't set min rate\n");
2968 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
2969 vport_id
= vf
->vport_id
;
2971 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
2974 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
2976 struct qed_wfq_data
*vf_vp_wfq
;
2977 struct qed_vf_info
*vf_info
;
2979 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2983 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
2985 if (vf_vp_wfq
->configured
)
2986 return vf_vp_wfq
->min_speed
;
2992 * qed_schedule_iov - schedules IOV task for VF and PF
2993 * @hwfn: hardware function pointer
2994 * @flag: IOV flag for VF/PF
2996 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
2998 smp_mb__before_atomic();
2999 set_bit(flag
, &hwfn
->iov_task_flags
);
3000 smp_mb__after_atomic();
3001 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
3002 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
3005 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
3009 for_each_hwfn(cdev
, i
)
3010 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
3011 &cdev
->hwfns
[i
].iov_task
, 0);
3014 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
3018 for_each_hwfn(cdev
, i
)
3019 if (cdev
->hwfns
[i
].iov_wq
)
3020 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3022 /* Mark VFs for disablement */
3023 qed_iov_set_vfs_to_disable(cdev
, true);
3025 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
3026 pci_disable_sriov(cdev
->pdev
);
3028 for_each_hwfn(cdev
, i
) {
3029 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3030 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3032 /* Failure to acquire the ptt in 100g creates an odd error
3033 * where the first engine has already relased IOV.
3036 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3040 /* Clean WFQ db and configure equal weight for all vports */
3041 qed_clean_wfq_db(hwfn
, ptt
);
3043 qed_for_each_vf(hwfn
, j
) {
3046 if (!qed_iov_is_valid_vfid(hwfn
, j
, true))
3049 /* Wait until VF is disabled before releasing */
3050 for (k
= 0; k
< 100; k
++) {
3051 if (!qed_iov_is_vf_stopped(hwfn
, j
))
3058 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
3062 "Timeout waiting for VF's FLR to end\n");
3065 qed_ptt_release(hwfn
, ptt
);
3068 qed_iov_set_vfs_to_disable(cdev
, false);
3073 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
3075 struct qed_sb_cnt_info sb_cnt_info
;
3078 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
3079 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
3080 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
3084 /* Initialize HW for VF access */
3085 for_each_hwfn(cdev
, j
) {
3086 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[j
];
3087 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3088 int num_sbs
= 0, limit
= 16;
3091 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3096 if (IS_MF_DEFAULT(hwfn
))
3097 limit
= MAX_NUM_VFS_BB
/ hwfn
->num_funcs_on_engine
;
3099 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
3100 qed_int_get_num_sbs(hwfn
, &sb_cnt_info
);
3101 num_sbs
= min_t(int, sb_cnt_info
.sb_free_blk
, limit
);
3103 for (i
= 0; i
< num
; i
++) {
3104 if (!qed_iov_is_valid_vfid(hwfn
, i
, false))
3107 rc
= qed_iov_init_hw_for_vf(hwfn
,
3108 ptt
, i
, num_sbs
/ num
);
3110 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
3111 qed_ptt_release(hwfn
, ptt
);
3116 qed_ptt_release(hwfn
, ptt
);
3119 /* Enable SRIOV PCIe functions */
3120 rc
= pci_enable_sriov(cdev
->pdev
, num
);
3122 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
3129 qed_sriov_disable(cdev
, false);
3133 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
3135 if (!IS_QED_SRIOV(cdev
)) {
3136 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
3141 return qed_sriov_enable(cdev
, num_vfs_param
);
3143 return qed_sriov_disable(cdev
, true);
3146 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
3150 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3151 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3152 "Cannot set a VF MAC; Sriov is not enabled\n");
3156 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3157 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3158 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3162 for_each_hwfn(cdev
, i
) {
3163 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3164 struct qed_public_vf_info
*vf_info
;
3166 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3170 /* Set the forced MAC, and schedule the IOV task */
3171 ether_addr_copy(vf_info
->forced_mac
, mac
);
3172 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3178 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
3182 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3183 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3184 "Cannot set a VF MAC; Sriov is not enabled\n");
3188 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3189 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3190 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3194 for_each_hwfn(cdev
, i
) {
3195 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3196 struct qed_public_vf_info
*vf_info
;
3198 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3202 /* Set the forced vlan, and schedule the IOV task */
3203 vf_info
->forced_vlan
= vid
;
3204 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3210 static int qed_get_vf_config(struct qed_dev
*cdev
,
3211 int vf_id
, struct ifla_vf_info
*ivi
)
3213 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3214 struct qed_public_vf_info
*vf_info
;
3215 struct qed_mcp_link_state link
;
3218 /* Sanitize request */
3222 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3223 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3224 "VF index [%d] isn't active\n", vf_id
);
3228 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3230 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
3232 /* Fill information about VF */
3235 if (is_valid_ether_addr(vf_info
->forced_mac
))
3236 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
3238 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
3240 ivi
->vlan
= vf_info
->forced_vlan
;
3241 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
3242 ivi
->linkstate
= vf_info
->link_state
;
3243 tx_rate
= vf_info
->tx_rate
;
3244 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
3245 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
3250 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
3252 struct qed_mcp_link_capabilities caps
;
3253 struct qed_mcp_link_params params
;
3254 struct qed_mcp_link_state link
;
3257 if (!hwfn
->pf_iov_info
)
3260 /* Update bulletin of all future possible VFs with link configuration */
3261 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3262 struct qed_public_vf_info
*vf_info
;
3264 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
3268 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
3269 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
3270 memcpy(&caps
, qed_mcp_get_link_capabilities(hwfn
),
3273 /* Modify link according to the VF's configured link state */
3274 switch (vf_info
->link_state
) {
3275 case IFLA_VF_LINK_STATE_DISABLE
:
3276 link
.link_up
= false;
3278 case IFLA_VF_LINK_STATE_ENABLE
:
3279 link
.link_up
= true;
3280 /* Set speed according to maximum supported by HW.
3281 * that is 40G for regular devices and 100G for CMT
3284 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
3287 /* In auto mode pass PF link image to VF */
3291 if (link
.link_up
&& vf_info
->tx_rate
) {
3292 struct qed_ptt
*ptt
;
3295 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
3297 ptt
= qed_ptt_acquire(hwfn
);
3299 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
3303 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
3304 vf_info
->tx_rate
= rate
;
3308 qed_ptt_release(hwfn
, ptt
);
3311 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
3314 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3317 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
3318 int vf_id
, int link_state
)
3322 /* Sanitize request */
3326 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3327 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3328 "VF index [%d] isn't active\n", vf_id
);
3332 /* Handle configuration of link state */
3333 for_each_hwfn(cdev
, i
) {
3334 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3335 struct qed_public_vf_info
*vf
;
3337 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3341 if (vf
->link_state
== link_state
)
3344 vf
->link_state
= link_state
;
3345 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
3351 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
3353 int i
, rc
= -EINVAL
;
3355 for_each_hwfn(cdev
, i
) {
3356 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3358 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
3366 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
3370 for_each_hwfn(cdev
, i
) {
3371 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3372 struct qed_public_vf_info
*vf
;
3374 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3376 "SR-IOV sanity check failed, can't set tx rate\n");
3380 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
3384 qed_inform_vf_link_state(p_hwfn
);
3390 static int qed_set_vf_rate(struct qed_dev
*cdev
,
3391 int vfid
, u32 min_rate
, u32 max_rate
)
3393 int rc_min
= 0, rc_max
= 0;
3396 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
3399 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
3401 if (rc_max
| rc_min
)
3407 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
3409 u64 events
[QED_VF_ARRAY_LENGTH
];
3410 struct qed_ptt
*ptt
;
3413 ptt
= qed_ptt_acquire(hwfn
);
3415 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3416 "Can't acquire PTT; re-scheduling\n");
3417 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
3421 qed_iov_pf_get_and_clear_pending_events(hwfn
, events
);
3423 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3424 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3425 events
[0], events
[1], events
[2]);
3427 qed_for_each_vf(hwfn
, i
) {
3428 /* Skip VFs with no pending messages */
3429 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
3432 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3433 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3434 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3436 /* Copy VF's message to PF's request buffer for that VF */
3437 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
3440 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
3443 qed_ptt_release(hwfn
, ptt
);
3446 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
3450 qed_for_each_vf(hwfn
, i
) {
3451 struct qed_public_vf_info
*info
;
3452 bool update
= false;
3455 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
3459 /* Update data on bulletin board */
3460 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
3461 if (is_valid_ether_addr(info
->forced_mac
) &&
3462 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
))) {
3465 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3467 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3469 /* Update bulletin board with forced MAC */
3470 qed_iov_bulletin_set_forced_mac(hwfn
,
3471 info
->forced_mac
, i
);
3475 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
3476 info
->forced_vlan
) {
3479 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3482 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3483 qed_iov_bulletin_set_forced_vlan(hwfn
,
3484 info
->forced_vlan
, i
);
3489 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3493 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
3495 struct qed_ptt
*ptt
;
3498 ptt
= qed_ptt_acquire(hwfn
);
3500 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
3501 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3505 qed_for_each_vf(hwfn
, i
)
3506 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
3508 qed_ptt_release(hwfn
, ptt
);
3511 void qed_iov_pf_task(struct work_struct
*work
)
3513 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
3517 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
3520 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
3521 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3524 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3528 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
3530 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3532 qed_ptt_release(hwfn
, ptt
);
3535 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
3536 qed_handle_vf_msg(hwfn
);
3538 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
3539 &hwfn
->iov_task_flags
))
3540 qed_handle_pf_set_vf_unicast(hwfn
);
3542 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
3543 &hwfn
->iov_task_flags
))
3544 qed_handle_bulletin_post(hwfn
);
3547 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
3551 for_each_hwfn(cdev
, i
) {
3552 if (!cdev
->hwfns
[i
].iov_wq
)
3555 if (schedule_first
) {
3556 qed_schedule_iov(&cdev
->hwfns
[i
],
3557 QED_IOV_WQ_STOP_WQ_FLAG
);
3558 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
3561 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3562 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
3566 int qed_iov_wq_start(struct qed_dev
*cdev
)
3568 char name
[NAME_SIZE
];
3571 for_each_hwfn(cdev
, i
) {
3572 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3574 /* PFs needs a dedicated workqueue only if they support IOV.
3575 * VFs always require one.
3577 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
3580 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
3581 cdev
->pdev
->bus
->number
,
3582 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
3584 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
3585 if (!p_hwfn
->iov_wq
) {
3586 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
3591 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
3593 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
3599 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
3600 .configure
= &qed_sriov_configure
,
3601 .set_mac
= &qed_sriov_pf_set_mac
,
3602 .set_vlan
= &qed_sriov_pf_set_vlan
,
3603 .get_config
= &qed_get_vf_config
,
3604 .set_link_state
= &qed_set_vf_link_state
,
3605 .set_spoof
= &qed_spoof_configure
,
3606 .set_rate
= &qed_set_vf_rate
,