2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include "qlcnic_sriov.h"
10 #include "qlcnic_83xx_hw.h"
11 #include <linux/types.h>
13 #define QLC_BC_COMMAND 0
14 #define QLC_BC_RESPONSE 1
16 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
17 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
20 #define QLC_BC_CFREE 1
22 #define QLC_BC_HDR_SZ 16
23 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
28 #define QLC_83XX_VF_RESET_FAIL_THRESH 8
29 #define QLC_BC_CMD_MAX_RETRY_CNT 5
31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter
*);
32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*, u32
);
33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct
*);
34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter
*);
35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*);
36 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter
*,
37 struct qlcnic_cmd_args
*);
38 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*);
40 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops
= {
41 .read_crb
= qlcnic_83xx_read_crb
,
42 .write_crb
= qlcnic_83xx_write_crb
,
43 .read_reg
= qlcnic_83xx_rd_reg_indirect
,
44 .write_reg
= qlcnic_83xx_wrt_reg_indirect
,
45 .get_mac_address
= qlcnic_83xx_get_mac_address
,
46 .setup_intr
= qlcnic_83xx_setup_intr
,
47 .alloc_mbx_args
= qlcnic_83xx_alloc_mbx_args
,
48 .mbx_cmd
= qlcnic_sriov_vf_mbx_op
,
49 .get_func_no
= qlcnic_83xx_get_func_no
,
50 .api_lock
= qlcnic_83xx_cam_lock
,
51 .api_unlock
= qlcnic_83xx_cam_unlock
,
52 .process_lb_rcv_ring_diag
= qlcnic_83xx_process_rcv_ring_diag
,
53 .create_rx_ctx
= qlcnic_83xx_create_rx_ctx
,
54 .create_tx_ctx
= qlcnic_83xx_create_tx_ctx
,
55 .del_rx_ctx
= qlcnic_83xx_del_rx_ctx
,
56 .del_tx_ctx
= qlcnic_83xx_del_tx_ctx
,
57 .setup_link_event
= qlcnic_83xx_setup_link_event
,
58 .get_nic_info
= qlcnic_83xx_get_nic_info
,
59 .get_pci_info
= qlcnic_83xx_get_pci_info
,
60 .set_nic_info
= qlcnic_83xx_set_nic_info
,
61 .change_macvlan
= qlcnic_83xx_sre_macaddr_change
,
62 .napi_enable
= qlcnic_83xx_napi_enable
,
63 .napi_disable
= qlcnic_83xx_napi_disable
,
64 .config_intr_coal
= qlcnic_83xx_config_intr_coal
,
65 .config_rss
= qlcnic_83xx_config_rss
,
66 .config_hw_lro
= qlcnic_83xx_config_hw_lro
,
67 .config_promisc_mode
= qlcnic_83xx_nic_set_promisc
,
68 .change_l2_filter
= qlcnic_83xx_change_l2_filter
,
69 .get_board_info
= qlcnic_83xx_get_port_info
,
70 .free_mac_list
= qlcnic_sriov_vf_free_mac_list
,
73 static struct qlcnic_nic_template qlcnic_sriov_vf_ops
= {
74 .config_bridged_mode
= qlcnic_config_bridged_mode
,
75 .config_led
= qlcnic_config_led
,
76 .cancel_idc_work
= qlcnic_sriov_vf_cancel_fw_work
,
77 .napi_add
= qlcnic_83xx_napi_add
,
78 .napi_del
= qlcnic_83xx_napi_del
,
79 .shutdown
= qlcnic_sriov_vf_shutdown
,
80 .resume
= qlcnic_sriov_vf_resume
,
81 .config_ipaddr
= qlcnic_83xx_config_ipaddr
,
82 .clear_legacy_intr
= qlcnic_83xx_clear_legacy_intr
,
85 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl
[] = {
86 {QLCNIC_BC_CMD_CHANNEL_INIT
, 2, 2},
87 {QLCNIC_BC_CMD_CHANNEL_TERM
, 2, 2},
88 {QLCNIC_BC_CMD_GET_ACL
, 3, 14},
89 {QLCNIC_BC_CMD_CFG_GUEST_VLAN
, 2, 2},
92 static inline bool qlcnic_sriov_bc_msg_check(u32 val
)
94 return (val
& (1 << QLC_BC_MSG
)) ? true : false;
97 static inline bool qlcnic_sriov_channel_free_check(u32 val
)
99 return (val
& (1 << QLC_BC_CFREE
)) ? true : false;
102 static inline bool qlcnic_sriov_flr_check(u32 val
)
104 return (val
& (1 << QLC_BC_FLR
)) ? true : false;
107 static inline u8
qlcnic_sriov_target_func_id(u32 val
)
109 return (val
>> 4) & 0xff;
112 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter
*adapter
, int vf_id
)
114 struct pci_dev
*dev
= adapter
->pdev
;
118 if (qlcnic_sriov_vf_check(adapter
))
121 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
122 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
123 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
125 return (dev
->devfn
+ offset
+ stride
* vf_id
) & 0xff;
128 int qlcnic_sriov_init(struct qlcnic_adapter
*adapter
, int num_vfs
)
130 struct qlcnic_sriov
*sriov
;
131 struct qlcnic_back_channel
*bc
;
132 struct workqueue_struct
*wq
;
133 struct qlcnic_vport
*vp
;
134 struct qlcnic_vf_info
*vf
;
137 if (!qlcnic_sriov_enable_check(adapter
))
140 sriov
= kzalloc(sizeof(struct qlcnic_sriov
), GFP_KERNEL
);
144 adapter
->ahw
->sriov
= sriov
;
145 sriov
->num_vfs
= num_vfs
;
147 sriov
->vf_info
= kzalloc(sizeof(struct qlcnic_vf_info
) *
148 num_vfs
, GFP_KERNEL
);
149 if (!sriov
->vf_info
) {
151 goto qlcnic_free_sriov
;
154 wq
= create_singlethread_workqueue("bc-trans");
157 dev_err(&adapter
->pdev
->dev
,
158 "Cannot create bc-trans workqueue\n");
159 goto qlcnic_free_vf_info
;
162 bc
->bc_trans_wq
= wq
;
164 wq
= create_singlethread_workqueue("async");
167 dev_err(&adapter
->pdev
->dev
, "Cannot create async workqueue\n");
168 goto qlcnic_destroy_trans_wq
;
171 bc
->bc_async_wq
= wq
;
172 INIT_LIST_HEAD(&bc
->async_list
);
174 for (i
= 0; i
< num_vfs
; i
++) {
175 vf
= &sriov
->vf_info
[i
];
176 vf
->adapter
= adapter
;
177 vf
->pci_func
= qlcnic_sriov_virtid_fn(adapter
, i
);
178 mutex_init(&vf
->send_cmd_lock
);
179 INIT_LIST_HEAD(&vf
->rcv_act
.wait_list
);
180 INIT_LIST_HEAD(&vf
->rcv_pend
.wait_list
);
181 spin_lock_init(&vf
->rcv_act
.lock
);
182 spin_lock_init(&vf
->rcv_pend
.lock
);
183 init_completion(&vf
->ch_free_cmpl
);
185 INIT_WORK(&vf
->trans_work
, qlcnic_sriov_process_bc_cmd
);
187 if (qlcnic_sriov_pf_check(adapter
)) {
188 vp
= kzalloc(sizeof(struct qlcnic_vport
), GFP_KERNEL
);
191 goto qlcnic_destroy_async_wq
;
193 sriov
->vf_info
[i
].vp
= vp
;
194 vp
->max_tx_bw
= MAX_BW
;
196 random_ether_addr(vp
->mac
);
197 dev_info(&adapter
->pdev
->dev
,
198 "MAC Address %pM is configured for VF %d\n",
205 qlcnic_destroy_async_wq
:
206 destroy_workqueue(bc
->bc_async_wq
);
208 qlcnic_destroy_trans_wq
:
209 destroy_workqueue(bc
->bc_trans_wq
);
212 kfree(sriov
->vf_info
);
215 kfree(adapter
->ahw
->sriov
);
219 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list
*t_list
)
221 struct qlcnic_bc_trans
*trans
;
222 struct qlcnic_cmd_args cmd
;
225 spin_lock_irqsave(&t_list
->lock
, flags
);
227 while (!list_empty(&t_list
->wait_list
)) {
228 trans
= list_first_entry(&t_list
->wait_list
,
229 struct qlcnic_bc_trans
, list
);
230 list_del(&trans
->list
);
232 cmd
.req
.arg
= (u32
*)trans
->req_pay
;
233 cmd
.rsp
.arg
= (u32
*)trans
->rsp_pay
;
234 qlcnic_free_mbx_args(&cmd
);
235 qlcnic_sriov_cleanup_transaction(trans
);
238 spin_unlock_irqrestore(&t_list
->lock
, flags
);
241 void __qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
243 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
244 struct qlcnic_back_channel
*bc
= &sriov
->bc
;
245 struct qlcnic_vf_info
*vf
;
248 if (!qlcnic_sriov_enable_check(adapter
))
251 qlcnic_sriov_cleanup_async_list(bc
);
252 destroy_workqueue(bc
->bc_async_wq
);
254 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
255 vf
= &sriov
->vf_info
[i
];
256 qlcnic_sriov_cleanup_list(&vf
->rcv_pend
);
257 cancel_work_sync(&vf
->trans_work
);
258 qlcnic_sriov_cleanup_list(&vf
->rcv_act
);
261 destroy_workqueue(bc
->bc_trans_wq
);
263 for (i
= 0; i
< sriov
->num_vfs
; i
++)
264 kfree(sriov
->vf_info
[i
].vp
);
266 kfree(sriov
->vf_info
);
267 kfree(adapter
->ahw
->sriov
);
270 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter
*adapter
)
272 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
273 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
274 __qlcnic_sriov_cleanup(adapter
);
277 void qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
279 if (qlcnic_sriov_pf_check(adapter
))
280 qlcnic_sriov_pf_cleanup(adapter
);
282 if (qlcnic_sriov_vf_check(adapter
))
283 qlcnic_sriov_vf_cleanup(adapter
);
286 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter
*adapter
, u32
*hdr
,
287 u32
*pay
, u8 pci_func
, u8 size
)
289 u32 rsp
, mbx_val
, fw_data
, rsp_num
, mbx_cmd
, val
, wait_time
= 0;
290 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
296 opcode
= ((struct qlcnic_bc_hdr
*)hdr
)->cmd_op
;
298 if (!test_bit(QLC_83XX_MBX_READY
, &adapter
->ahw
->idc
.status
)) {
299 dev_info(&adapter
->pdev
->dev
,
300 "Mailbox cmd attempted, 0x%x\n", opcode
);
301 dev_info(&adapter
->pdev
->dev
, "Mailbox detached\n");
305 spin_lock_irqsave(&ahw
->mbx_lock
, flags
);
307 mbx_val
= QLCRDX(ahw
, QLCNIC_HOST_MBX_CTRL
);
309 QLCDB(adapter
, DRV
, "Mailbox cmd attempted, 0x%x\n", opcode
);
310 spin_unlock_irqrestore(&ahw
->mbx_lock
, flags
);
311 return QLCNIC_RCODE_TIMEOUT
;
313 /* Fill in mailbox registers */
314 val
= size
+ (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
315 mbx_cmd
= 0x31 | (val
<< 16) | (adapter
->ahw
->fw_hal_version
<< 29);
317 writel(mbx_cmd
, QLCNIC_MBX_HOST(ahw
, 0));
318 mbx_cmd
= 0x1 | (1 << 4);
320 if (qlcnic_sriov_pf_check(adapter
))
321 mbx_cmd
|= (pci_func
<< 5);
323 writel(mbx_cmd
, QLCNIC_MBX_HOST(ahw
, 1));
324 for (i
= 2, j
= 0; j
< (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
326 writel(*(hdr
++), QLCNIC_MBX_HOST(ahw
, i
));
328 for (j
= 0; j
< size
; j
++, i
++)
329 writel(*(pay
++), QLCNIC_MBX_HOST(ahw
, i
));
331 /* Signal FW about the impending command */
332 QLCWRX(ahw
, QLCNIC_HOST_MBX_CTRL
, QLCNIC_SET_OWNER
);
334 /* Waiting for the mailbox cmd to complete and while waiting here
335 * some AEN might arrive. If more than 5 seconds expire we can
336 * assume something is wrong.
339 rsp
= qlcnic_83xx_mbx_poll(adapter
, &wait_time
);
340 if (rsp
!= QLCNIC_RCODE_TIMEOUT
) {
341 /* Get the FW response data */
342 fw_data
= readl(QLCNIC_MBX_FW(ahw
, 0));
343 if (fw_data
& QLCNIC_MBX_ASYNC_EVENT
) {
344 __qlcnic_83xx_process_aen(adapter
);
347 mbx_err_code
= QLCNIC_MBX_STATUS(fw_data
);
348 rsp_num
= QLCNIC_MBX_NUM_REGS(fw_data
);
349 opcode
= QLCNIC_MBX_RSP(fw_data
);
351 switch (mbx_err_code
) {
352 case QLCNIC_MBX_RSP_OK
:
353 case QLCNIC_MBX_PORT_RSP_OK
:
354 rsp
= QLCNIC_RCODE_SUCCESS
;
357 if (opcode
== QLCNIC_CMD_CONFIG_MAC_VLAN
) {
358 rsp
= qlcnic_83xx_mac_rcode(adapter
);
362 dev_err(&adapter
->pdev
->dev
,
363 "MBX command 0x%x failed with err:0x%x\n",
364 opcode
, mbx_err_code
);
371 dev_err(&adapter
->pdev
->dev
, "MBX command 0x%x timed out\n",
372 QLCNIC_MBX_RSP(mbx_cmd
));
373 rsp
= QLCNIC_RCODE_TIMEOUT
;
375 /* clear fw mbx control register */
376 QLCWRX(ahw
, QLCNIC_FW_MBX_CTRL
, QLCNIC_CLR_OWNER
);
377 spin_unlock_irqrestore(&adapter
->ahw
->mbx_lock
, flags
);
381 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter
*adapter
)
383 adapter
->num_rxd
= QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF
;
384 adapter
->max_rxd
= MAX_RCV_DESCRIPTORS_10G
;
385 adapter
->num_jumbo_rxd
= QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF
;
386 adapter
->max_jumbo_rxd
= MAX_JUMBO_RCV_DESCRIPTORS_10G
;
387 adapter
->num_txd
= MAX_CMD_DESCRIPTORS
;
388 adapter
->max_rds_rings
= MAX_RDS_RINGS
;
391 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter
*adapter
,
392 struct qlcnic_info
*npar_info
, u16 vport_id
)
394 struct device
*dev
= &adapter
->pdev
->dev
;
395 struct qlcnic_cmd_args cmd
;
399 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_NIC_INFO
);
403 cmd
.req
.arg
[1] = vport_id
<< 16 | 0x1;
404 err
= qlcnic_issue_cmd(adapter
, &cmd
);
406 dev_err(&adapter
->pdev
->dev
,
407 "Failed to get vport info, err=%d\n", err
);
408 qlcnic_free_mbx_args(&cmd
);
412 status
= cmd
.rsp
.arg
[2] & 0xffff;
414 npar_info
->min_tx_bw
= MSW(cmd
.rsp
.arg
[2]);
416 npar_info
->max_tx_bw
= LSW(cmd
.rsp
.arg
[3]);
418 npar_info
->max_tx_ques
= MSW(cmd
.rsp
.arg
[3]);
420 npar_info
->max_tx_mac_filters
= LSW(cmd
.rsp
.arg
[4]);
422 npar_info
->max_rx_mcast_mac_filters
= MSW(cmd
.rsp
.arg
[4]);
424 npar_info
->max_rx_ucast_mac_filters
= LSW(cmd
.rsp
.arg
[5]);
426 npar_info
->max_rx_ip_addr
= MSW(cmd
.rsp
.arg
[5]);
428 npar_info
->max_rx_lro_flow
= LSW(cmd
.rsp
.arg
[6]);
430 npar_info
->max_rx_status_rings
= MSW(cmd
.rsp
.arg
[6]);
432 npar_info
->max_rx_buf_rings
= LSW(cmd
.rsp
.arg
[7]);
434 npar_info
->max_rx_ques
= MSW(cmd
.rsp
.arg
[7]);
435 npar_info
->max_tx_vlan_keys
= LSW(cmd
.rsp
.arg
[8]);
436 npar_info
->max_local_ipv6_addrs
= MSW(cmd
.rsp
.arg
[8]);
437 npar_info
->max_remote_ipv6_addrs
= LSW(cmd
.rsp
.arg
[9]);
439 dev_info(dev
, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
440 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
441 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
442 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
443 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
444 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
445 npar_info
->min_tx_bw
, npar_info
->max_tx_bw
,
446 npar_info
->max_tx_ques
, npar_info
->max_tx_mac_filters
,
447 npar_info
->max_rx_mcast_mac_filters
,
448 npar_info
->max_rx_ucast_mac_filters
, npar_info
->max_rx_ip_addr
,
449 npar_info
->max_rx_lro_flow
, npar_info
->max_rx_status_rings
,
450 npar_info
->max_rx_buf_rings
, npar_info
->max_rx_ques
,
451 npar_info
->max_tx_vlan_keys
, npar_info
->max_local_ipv6_addrs
,
452 npar_info
->max_remote_ipv6_addrs
);
454 qlcnic_free_mbx_args(&cmd
);
458 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter
*adapter
,
459 struct qlcnic_cmd_args
*cmd
)
461 adapter
->rx_pvid
= (cmd
->rsp
.arg
[1] >> 16) & 0xffff;
462 adapter
->flags
&= ~QLCNIC_TAGGING_ENABLED
;
466 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter
*adapter
,
467 struct qlcnic_cmd_args
*cmd
)
469 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
473 if (sriov
->allowed_vlans
)
476 sriov
->any_vlan
= cmd
->rsp
.arg
[2] & 0xf;
477 if (!sriov
->any_vlan
)
480 sriov
->num_allowed_vlans
= cmd
->rsp
.arg
[2] >> 16;
481 num_vlans
= sriov
->num_allowed_vlans
;
482 sriov
->allowed_vlans
= kzalloc(sizeof(u16
) * num_vlans
, GFP_KERNEL
);
483 if (!sriov
->allowed_vlans
)
486 vlans
= (u16
*)&cmd
->rsp
.arg
[3];
487 for (i
= 0; i
< num_vlans
; i
++)
488 sriov
->allowed_vlans
[i
] = vlans
[i
];
493 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter
*adapter
)
495 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
496 struct qlcnic_cmd_args cmd
;
499 ret
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, QLCNIC_BC_CMD_GET_ACL
);
503 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
505 dev_err(&adapter
->pdev
->dev
, "Failed to get ACL, err=%d\n",
508 sriov
->vlan_mode
= cmd
.rsp
.arg
[1] & 0x3;
509 switch (sriov
->vlan_mode
) {
510 case QLC_GUEST_VLAN_MODE
:
511 ret
= qlcnic_sriov_set_guest_vlan_mode(adapter
, &cmd
);
514 ret
= qlcnic_sriov_set_pvid_mode(adapter
, &cmd
);
519 qlcnic_free_mbx_args(&cmd
);
523 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter
*adapter
)
525 struct qlcnic_info nic_info
;
526 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
529 err
= qlcnic_sriov_get_vf_vport_info(adapter
, &nic_info
, 0);
533 err
= qlcnic_get_nic_info(adapter
, &nic_info
, ahw
->pci_func
);
537 err
= qlcnic_sriov_get_vf_acl(adapter
);
541 if (qlcnic_83xx_get_port_info(adapter
))
544 qlcnic_sriov_vf_cfg_buff_desc(adapter
);
545 adapter
->flags
|= QLCNIC_ADAPTER_INITIALIZED
;
546 dev_info(&adapter
->pdev
->dev
, "HAL Version: %d\n",
547 adapter
->ahw
->fw_hal_version
);
549 ahw
->physical_port
= (u8
) nic_info
.phys_port
;
550 ahw
->switch_mode
= nic_info
.switch_mode
;
551 ahw
->max_mtu
= nic_info
.max_mtu
;
552 ahw
->op_mode
= nic_info
.op_mode
;
553 ahw
->capabilities
= nic_info
.capabilities
;
557 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter
*adapter
,
562 INIT_LIST_HEAD(&adapter
->vf_mc_list
);
563 if (!qlcnic_use_msi_x
&& !!qlcnic_use_msi
)
564 dev_warn(&adapter
->pdev
->dev
,
565 "83xx adapter do not support MSI interrupts\n");
567 err
= qlcnic_setup_intr(adapter
, 1);
569 dev_err(&adapter
->pdev
->dev
, "Failed to setup interrupt\n");
570 goto err_out_disable_msi
;
573 err
= qlcnic_83xx_setup_mbx_intr(adapter
);
575 goto err_out_disable_msi
;
577 err
= qlcnic_sriov_init(adapter
, 1);
579 goto err_out_disable_mbx_intr
;
581 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
583 goto err_out_cleanup_sriov
;
585 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
587 goto err_out_disable_bc_intr
;
589 err
= qlcnic_sriov_vf_init_driver(adapter
);
591 goto err_out_send_channel_term
;
593 err
= qlcnic_setup_netdev(adapter
, adapter
->netdev
, pci_using_dac
);
595 goto err_out_send_channel_term
;
597 pci_set_drvdata(adapter
->pdev
, adapter
);
598 dev_info(&adapter
->pdev
->dev
, "%s: XGbE port initialized\n",
599 adapter
->netdev
->name
);
600 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
601 adapter
->ahw
->idc
.delay
);
604 err_out_send_channel_term
:
605 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
607 err_out_disable_bc_intr
:
608 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
610 err_out_cleanup_sriov
:
611 __qlcnic_sriov_cleanup(adapter
);
613 err_out_disable_mbx_intr
:
614 qlcnic_83xx_free_mbx_intr(adapter
);
617 qlcnic_teardown_intr(adapter
);
621 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter
*adapter
)
627 if (++adapter
->fw_fail_cnt
> QLC_BC_CMD_MAX_RETRY_CNT
)
629 state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
630 } while (state
!= QLC_83XX_IDC_DEV_READY
);
635 int qlcnic_sriov_vf_init(struct qlcnic_adapter
*adapter
, int pci_using_dac
)
637 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
640 spin_lock_init(&ahw
->mbx_lock
);
641 set_bit(QLC_83XX_MBX_READY
, &ahw
->idc
.status
);
642 set_bit(QLC_83XX_MODULE_LOADED
, &ahw
->idc
.status
);
643 ahw
->idc
.delay
= QLC_83XX_IDC_FW_POLL_DELAY
;
644 ahw
->reset_context
= 0;
645 adapter
->fw_fail_cnt
= 0;
646 ahw
->msix_supported
= 1;
647 adapter
->need_fw_reset
= 0;
648 adapter
->flags
|= QLCNIC_TX_INTR_SHARED
;
650 err
= qlcnic_sriov_check_dev_ready(adapter
);
654 err
= qlcnic_sriov_setup_vf(adapter
, pci_using_dac
);
658 if (qlcnic_read_mac_addr(adapter
))
659 dev_warn(&adapter
->pdev
->dev
, "failed to read mac addr\n");
661 INIT_DELAYED_WORK(&adapter
->idc_aen_work
, qlcnic_83xx_idc_aen_work
);
663 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
667 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter
*adapter
)
669 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
671 ahw
->op_mode
= QLCNIC_SRIOV_VF_FUNC
;
672 dev_info(&adapter
->pdev
->dev
,
673 "HAL Version: %d Non Privileged SRIOV function\n",
674 ahw
->fw_hal_version
);
675 adapter
->nic_ops
= &qlcnic_sriov_vf_ops
;
676 set_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
);
680 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context
*ahw
)
682 ahw
->hw_ops
= &qlcnic_sriov_vf_hw_ops
;
683 ahw
->reg_tbl
= (u32
*)qlcnic_83xx_reg_tbl
;
684 ahw
->ext_reg_tbl
= (u32
*)qlcnic_83xx_ext_reg_tbl
;
687 static u32
qlcnic_sriov_get_bc_paysize(u32 real_pay_size
, u8 curr_frag
)
691 pay_size
= real_pay_size
/ ((curr_frag
+ 1) * QLC_BC_PAYLOAD_SZ
);
694 pay_size
= QLC_BC_PAYLOAD_SZ
;
696 pay_size
= real_pay_size
% QLC_BC_PAYLOAD_SZ
;
701 int qlcnic_sriov_func_to_index(struct qlcnic_adapter
*adapter
, u8 pci_func
)
703 struct qlcnic_vf_info
*vf_info
= adapter
->ahw
->sriov
->vf_info
;
706 if (qlcnic_sriov_vf_check(adapter
))
709 for (i
= 0; i
< adapter
->ahw
->sriov
->num_vfs
; i
++) {
710 if (vf_info
[i
].pci_func
== pci_func
)
717 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans
**trans
)
719 *trans
= kzalloc(sizeof(struct qlcnic_bc_trans
), GFP_ATOMIC
);
723 init_completion(&(*trans
)->resp_cmpl
);
727 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr
**hdr
,
730 *hdr
= kzalloc(sizeof(struct qlcnic_bc_hdr
) * size
, GFP_ATOMIC
);
737 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*mbx
, u32 type
)
739 const struct qlcnic_mailbox_metadata
*mbx_tbl
;
742 mbx_tbl
= qlcnic_sriov_bc_mbx_tbl
;
743 size
= ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl
);
745 for (i
= 0; i
< size
; i
++) {
746 if (type
== mbx_tbl
[i
].cmd
) {
747 mbx
->op_type
= QLC_BC_CMD
;
748 mbx
->req
.num
= mbx_tbl
[i
].in_args
;
749 mbx
->rsp
.num
= mbx_tbl
[i
].out_args
;
750 mbx
->req
.arg
= kcalloc(mbx
->req
.num
, sizeof(u32
),
754 mbx
->rsp
.arg
= kcalloc(mbx
->rsp
.num
, sizeof(u32
),
761 memset(mbx
->req
.arg
, 0, sizeof(u32
) * mbx
->req
.num
);
762 memset(mbx
->rsp
.arg
, 0, sizeof(u32
) * mbx
->rsp
.num
);
763 mbx
->req
.arg
[0] = (type
| (mbx
->req
.num
<< 16) |
771 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans
*trans
,
772 struct qlcnic_cmd_args
*cmd
,
773 u16 seq
, u8 msg_type
)
775 struct qlcnic_bc_hdr
*hdr
;
777 u32 num_regs
, bc_pay_sz
;
779 u8 cmd_op
, num_frags
, t_num_frags
;
781 bc_pay_sz
= QLC_BC_PAYLOAD_SZ
;
782 if (msg_type
== QLC_BC_COMMAND
) {
783 trans
->req_pay
= (struct qlcnic_bc_payload
*)cmd
->req
.arg
;
784 trans
->rsp_pay
= (struct qlcnic_bc_payload
*)cmd
->rsp
.arg
;
785 num_regs
= cmd
->req
.num
;
786 trans
->req_pay_size
= (num_regs
* 4);
787 num_regs
= cmd
->rsp
.num
;
788 trans
->rsp_pay_size
= (num_regs
* 4);
789 cmd_op
= cmd
->req
.arg
[0] & 0xff;
790 remainder
= (trans
->req_pay_size
) % (bc_pay_sz
);
791 num_frags
= (trans
->req_pay_size
) / (bc_pay_sz
);
794 t_num_frags
= num_frags
;
795 if (qlcnic_sriov_alloc_bc_msg(&trans
->req_hdr
, num_frags
))
797 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
798 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
801 if (qlcnic_sriov_alloc_bc_msg(&trans
->rsp_hdr
, num_frags
))
803 num_frags
= t_num_frags
;
804 hdr
= trans
->req_hdr
;
806 cmd
->req
.arg
= (u32
*)trans
->req_pay
;
807 cmd
->rsp
.arg
= (u32
*)trans
->rsp_pay
;
808 cmd_op
= cmd
->req
.arg
[0] & 0xff;
809 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
810 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
813 cmd
->req
.num
= trans
->req_pay_size
/ 4;
814 cmd
->rsp
.num
= trans
->rsp_pay_size
/ 4;
815 hdr
= trans
->rsp_hdr
;
818 trans
->trans_id
= seq
;
819 trans
->cmd_id
= cmd_op
;
820 for (i
= 0; i
< num_frags
; i
++) {
822 hdr
[i
].msg_type
= msg_type
;
823 hdr
[i
].op_type
= cmd
->op_type
;
825 hdr
[i
].num_frags
= num_frags
;
826 hdr
[i
].frag_num
= i
+ 1;
827 hdr
[i
].cmd_op
= cmd_op
;
833 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*trans
)
837 kfree(trans
->req_hdr
);
838 kfree(trans
->rsp_hdr
);
842 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info
*vf
,
843 struct qlcnic_bc_trans
*trans
, u8 type
)
845 struct qlcnic_trans_list
*t_list
;
849 if (type
== QLC_BC_RESPONSE
) {
850 t_list
= &vf
->rcv_act
;
851 spin_lock_irqsave(&t_list
->lock
, flags
);
853 list_del(&trans
->list
);
854 if (t_list
->count
> 0)
856 spin_unlock_irqrestore(&t_list
->lock
, flags
);
858 if (type
== QLC_BC_COMMAND
) {
859 while (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
862 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
867 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov
*sriov
,
868 struct qlcnic_vf_info
*vf
,
871 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
) ||
872 vf
->adapter
->need_fw_reset
)
875 queue_work(sriov
->bc
.bc_trans_wq
, &vf
->trans_work
);
878 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans
*trans
)
880 struct completion
*cmpl
= &trans
->resp_cmpl
;
882 if (wait_for_completion_timeout(cmpl
, QLC_MBOX_RESP_TIMEOUT
))
883 trans
->trans_state
= QLC_END
;
885 trans
->trans_state
= QLC_ABORT
;
890 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans
*trans
,
893 if (type
== QLC_BC_RESPONSE
) {
894 trans
->curr_rsp_frag
++;
895 if (trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
896 trans
->trans_state
= QLC_INIT
;
898 trans
->trans_state
= QLC_END
;
900 trans
->curr_req_frag
++;
901 if (trans
->curr_req_frag
< trans
->req_hdr
->num_frags
)
902 trans
->trans_state
= QLC_INIT
;
904 trans
->trans_state
= QLC_WAIT_FOR_RESP
;
908 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans
*trans
,
911 struct qlcnic_vf_info
*vf
= trans
->vf
;
912 struct completion
*cmpl
= &vf
->ch_free_cmpl
;
914 if (!wait_for_completion_timeout(cmpl
, QLC_MBOX_CH_FREE_TIMEOUT
)) {
915 trans
->trans_state
= QLC_ABORT
;
919 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
920 qlcnic_sriov_handle_multi_frags(trans
, type
);
923 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter
*adapter
,
924 u32
*hdr
, u32
*pay
, u32 size
)
926 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
928 u8 i
, max
= 2, hdr_size
, j
;
930 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
931 max
= (size
/ sizeof(u32
)) + hdr_size
;
933 fw_mbx
= readl(QLCNIC_MBX_FW(ahw
, 0));
934 for (i
= 2, j
= 0; j
< hdr_size
; i
++, j
++)
935 *(hdr
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
936 for (; j
< max
; i
++, j
++)
937 *(pay
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
940 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info
*vf
)
946 if (!test_and_set_bit(QLC_BC_VF_CHANNEL
, &vf
->state
)) {
956 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans
*trans
, u8 type
)
958 struct qlcnic_vf_info
*vf
= trans
->vf
;
959 u32 pay_size
, hdr_size
;
962 u8 pci_func
= trans
->func_id
;
964 if (__qlcnic_sriov_issue_bc_post(vf
))
967 if (type
== QLC_BC_COMMAND
) {
968 hdr
= (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
);
969 pay
= (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
);
970 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
971 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
972 trans
->curr_req_frag
);
973 pay_size
= (pay_size
/ sizeof(u32
));
975 hdr
= (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
);
976 pay
= (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
);
977 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
978 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
979 trans
->curr_rsp_frag
);
980 pay_size
= (pay_size
/ sizeof(u32
));
983 ret
= qlcnic_sriov_post_bc_msg(vf
->adapter
, hdr
, pay
,
988 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans
*trans
,
989 struct qlcnic_vf_info
*vf
, u8 type
)
995 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
) ||
996 vf
->adapter
->need_fw_reset
)
997 trans
->trans_state
= QLC_ABORT
;
999 switch (trans
->trans_state
) {
1001 trans
->trans_state
= QLC_WAIT_FOR_CHANNEL_FREE
;
1002 if (qlcnic_sriov_issue_bc_post(trans
, type
))
1003 trans
->trans_state
= QLC_ABORT
;
1005 case QLC_WAIT_FOR_CHANNEL_FREE
:
1006 qlcnic_sriov_wait_for_channel_free(trans
, type
);
1008 case QLC_WAIT_FOR_RESP
:
1009 qlcnic_sriov_wait_for_resp(trans
);
1018 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
1028 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter
*adapter
,
1029 struct qlcnic_bc_trans
*trans
, int pci_func
)
1031 struct qlcnic_vf_info
*vf
;
1032 int err
, index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1037 vf
= &adapter
->ahw
->sriov
->vf_info
[index
];
1039 trans
->func_id
= pci_func
;
1041 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
)) {
1042 if (qlcnic_sriov_pf_check(adapter
))
1044 if (qlcnic_sriov_vf_check(adapter
) &&
1045 trans
->cmd_id
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
1049 mutex_lock(&vf
->send_cmd_lock
);
1050 vf
->send_cmd
= trans
;
1051 err
= __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_COMMAND
);
1052 qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_COMMAND
);
1053 mutex_unlock(&vf
->send_cmd_lock
);
1057 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter
*adapter
,
1058 struct qlcnic_bc_trans
*trans
,
1059 struct qlcnic_cmd_args
*cmd
)
1061 #ifdef CONFIG_QLCNIC_SRIOV
1062 if (qlcnic_sriov_pf_check(adapter
)) {
1063 qlcnic_sriov_pf_process_bc_cmd(adapter
, trans
, cmd
);
1067 cmd
->rsp
.arg
[0] |= (0x9 << 25);
1071 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*work
)
1073 struct qlcnic_vf_info
*vf
= container_of(work
, struct qlcnic_vf_info
,
1075 struct qlcnic_bc_trans
*trans
= NULL
;
1076 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1077 struct qlcnic_cmd_args cmd
;
1080 if (adapter
->need_fw_reset
)
1083 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
))
1086 trans
= list_first_entry(&vf
->rcv_act
.wait_list
,
1087 struct qlcnic_bc_trans
, list
);
1088 adapter
= vf
->adapter
;
1090 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, trans
->req_hdr
->seq_id
,
1094 __qlcnic_sriov_process_bc_cmd(adapter
, trans
, &cmd
);
1095 trans
->trans_state
= QLC_INIT
;
1096 __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_RESPONSE
);
1099 qlcnic_free_mbx_args(&cmd
);
1100 req
= qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_RESPONSE
);
1101 qlcnic_sriov_cleanup_transaction(trans
);
1103 qlcnic_sriov_schedule_bc_cmd(adapter
->ahw
->sriov
, vf
,
1104 qlcnic_sriov_process_bc_cmd
);
1107 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr
*hdr
,
1108 struct qlcnic_vf_info
*vf
)
1110 struct qlcnic_bc_trans
*trans
;
1113 if (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
1116 trans
= vf
->send_cmd
;
1121 if (trans
->trans_id
!= hdr
->seq_id
)
1124 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
1125 trans
->curr_rsp_frag
);
1126 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1127 (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
),
1128 (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
),
1130 if (++trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
1133 complete(&trans
->resp_cmpl
);
1136 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
1139 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
1140 struct qlcnic_vf_info
*vf
,
1141 struct qlcnic_bc_trans
*trans
)
1143 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
1146 list_add_tail(&trans
->list
, &t_list
->wait_list
);
1147 if (t_list
->count
== 1)
1148 qlcnic_sriov_schedule_bc_cmd(sriov
, vf
,
1149 qlcnic_sriov_process_bc_cmd
);
1153 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
1154 struct qlcnic_vf_info
*vf
,
1155 struct qlcnic_bc_trans
*trans
)
1157 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
1159 spin_lock(&t_list
->lock
);
1161 __qlcnic_sriov_add_act_list(sriov
, vf
, trans
);
1163 spin_unlock(&t_list
->lock
);
1167 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov
*sriov
,
1168 struct qlcnic_vf_info
*vf
,
1169 struct qlcnic_bc_hdr
*hdr
)
1171 struct qlcnic_bc_trans
*trans
= NULL
;
1172 struct list_head
*node
;
1173 u32 pay_size
, curr_frag
;
1174 u8 found
= 0, active
= 0;
1176 spin_lock(&vf
->rcv_pend
.lock
);
1177 if (vf
->rcv_pend
.count
> 0) {
1178 list_for_each(node
, &vf
->rcv_pend
.wait_list
) {
1179 trans
= list_entry(node
, struct qlcnic_bc_trans
, list
);
1180 if (trans
->trans_id
== hdr
->seq_id
) {
1188 curr_frag
= trans
->curr_req_frag
;
1189 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
1191 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1192 (u32
*)(trans
->req_hdr
+ curr_frag
),
1193 (u32
*)(trans
->req_pay
+ curr_frag
),
1195 trans
->curr_req_frag
++;
1196 if (trans
->curr_req_frag
>= hdr
->num_frags
) {
1197 vf
->rcv_pend
.count
--;
1198 list_del(&trans
->list
);
1202 spin_unlock(&vf
->rcv_pend
.lock
);
1205 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
))
1206 qlcnic_sriov_cleanup_transaction(trans
);
1211 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov
*sriov
,
1212 struct qlcnic_bc_hdr
*hdr
,
1213 struct qlcnic_vf_info
*vf
)
1215 struct qlcnic_bc_trans
*trans
;
1216 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1217 struct qlcnic_cmd_args cmd
;
1222 if (adapter
->need_fw_reset
)
1225 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
) &&
1226 hdr
->op_type
!= QLC_BC_CMD
&&
1227 hdr
->cmd_op
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
1230 if (hdr
->frag_num
> 1) {
1231 qlcnic_sriov_handle_pending_trans(sriov
, vf
, hdr
);
1235 cmd_op
= hdr
->cmd_op
;
1236 if (qlcnic_sriov_alloc_bc_trans(&trans
))
1239 if (hdr
->op_type
== QLC_BC_CMD
)
1240 err
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
);
1242 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, cmd_op
);
1245 qlcnic_sriov_cleanup_transaction(trans
);
1249 cmd
.op_type
= hdr
->op_type
;
1250 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, hdr
->seq_id
,
1252 qlcnic_free_mbx_args(&cmd
);
1253 qlcnic_sriov_cleanup_transaction(trans
);
1257 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
1258 trans
->curr_req_frag
);
1259 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1260 (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
),
1261 (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
),
1263 trans
->func_id
= vf
->pci_func
;
1265 trans
->trans_id
= hdr
->seq_id
;
1266 trans
->curr_req_frag
++;
1268 if (qlcnic_sriov_soft_flr_check(adapter
, trans
, vf
))
1271 if (trans
->curr_req_frag
== trans
->req_hdr
->num_frags
) {
1272 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
)) {
1273 qlcnic_free_mbx_args(&cmd
);
1274 qlcnic_sriov_cleanup_transaction(trans
);
1277 spin_lock(&vf
->rcv_pend
.lock
);
1278 list_add_tail(&trans
->list
, &vf
->rcv_pend
.wait_list
);
1279 vf
->rcv_pend
.count
++;
1280 spin_unlock(&vf
->rcv_pend
.lock
);
1284 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov
*sriov
,
1285 struct qlcnic_vf_info
*vf
)
1287 struct qlcnic_bc_hdr hdr
;
1288 u32
*ptr
= (u32
*)&hdr
;
1291 for (i
= 2; i
< 6; i
++)
1292 ptr
[i
- 2] = readl(QLCNIC_MBX_FW(vf
->adapter
->ahw
, i
));
1293 msg_type
= hdr
.msg_type
;
1296 case QLC_BC_COMMAND
:
1297 qlcnic_sriov_handle_bc_cmd(sriov
, &hdr
, vf
);
1299 case QLC_BC_RESPONSE
:
1300 qlcnic_sriov_handle_bc_resp(&hdr
, vf
);
1305 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov
*sriov
,
1306 struct qlcnic_vf_info
*vf
)
1308 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1310 if (qlcnic_sriov_pf_check(adapter
))
1311 qlcnic_sriov_pf_handle_flr(sriov
, vf
);
1313 dev_err(&adapter
->pdev
->dev
,
1314 "Invalid event to VF. VF should not get FLR event\n");
1317 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter
*adapter
, u32 event
)
1319 struct qlcnic_vf_info
*vf
;
1320 struct qlcnic_sriov
*sriov
;
1324 sriov
= adapter
->ahw
->sriov
;
1325 pci_func
= qlcnic_sriov_target_func_id(event
);
1326 index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1331 vf
= &sriov
->vf_info
[index
];
1332 vf
->pci_func
= pci_func
;
1334 if (qlcnic_sriov_channel_free_check(event
))
1335 complete(&vf
->ch_free_cmpl
);
1337 if (qlcnic_sriov_flr_check(event
)) {
1338 qlcnic_sriov_handle_flr_event(sriov
, vf
);
1342 if (qlcnic_sriov_bc_msg_check(event
))
1343 qlcnic_sriov_handle_msg_event(sriov
, vf
);
1346 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter
*adapter
, u8 enable
)
1348 struct qlcnic_cmd_args cmd
;
1351 if (!test_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
))
1354 if (qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_BC_EVENT_SETUP
))
1358 cmd
.req
.arg
[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1360 err
= qlcnic_83xx_mbx_op(adapter
, &cmd
);
1362 if (err
!= QLCNIC_RCODE_SUCCESS
) {
1363 dev_err(&adapter
->pdev
->dev
,
1364 "Failed to %s bc events, err=%d\n",
1365 (enable
? "enable" : "disable"), err
);
1368 qlcnic_free_mbx_args(&cmd
);
1372 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter
*adapter
,
1373 struct qlcnic_bc_trans
*trans
)
1375 u8 max
= QLC_BC_CMD_MAX_RETRY_CNT
;
1378 state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
1379 if (state
== QLC_83XX_IDC_DEV_READY
) {
1381 clear_bit(QLC_BC_VF_CHANNEL
, &trans
->vf
->state
);
1382 trans
->trans_state
= QLC_INIT
;
1383 if (++adapter
->fw_fail_cnt
> max
)
1392 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter
*adapter
,
1393 struct qlcnic_cmd_args
*cmd
)
1395 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1396 struct device
*dev
= &adapter
->pdev
->dev
;
1397 struct qlcnic_bc_trans
*trans
;
1399 u32 rsp_data
, opcode
, mbx_err_code
, rsp
;
1400 u16 seq
= ++adapter
->ahw
->sriov
->bc
.trans_counter
;
1401 u8 func
= ahw
->pci_func
;
1403 rsp
= qlcnic_sriov_alloc_bc_trans(&trans
);
1407 rsp
= qlcnic_sriov_prepare_bc_hdr(trans
, cmd
, seq
, QLC_BC_COMMAND
);
1409 goto cleanup_transaction
;
1412 if (!test_bit(QLC_83XX_MBX_READY
, &adapter
->ahw
->idc
.status
)) {
1414 QLCDB(adapter
, DRV
, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1415 QLCNIC_MBX_RSP(cmd
->req
.arg
[0]), func
);
1419 err
= qlcnic_sriov_send_bc_cmd(adapter
, trans
, func
);
1421 dev_err(dev
, "MBX command 0x%x timed out for VF %d\n",
1422 (cmd
->req
.arg
[0] & 0xffff), func
);
1423 rsp
= QLCNIC_RCODE_TIMEOUT
;
1425 /* After adapter reset PF driver may take some time to
1426 * respond to VF's request. Retry request till maximum retries.
1428 if ((trans
->req_hdr
->cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
) &&
1429 !qlcnic_sriov_retry_bc_cmd(adapter
, trans
))
1435 rsp_data
= cmd
->rsp
.arg
[0];
1436 mbx_err_code
= QLCNIC_MBX_STATUS(rsp_data
);
1437 opcode
= QLCNIC_MBX_RSP(cmd
->req
.arg
[0]);
1439 if ((mbx_err_code
== QLCNIC_MBX_RSP_OK
) ||
1440 (mbx_err_code
== QLCNIC_MBX_PORT_RSP_OK
)) {
1441 rsp
= QLCNIC_RCODE_SUCCESS
;
1447 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1448 opcode
, mbx_err_code
, func
);
1452 if (rsp
== QLCNIC_RCODE_TIMEOUT
) {
1453 ahw
->reset_context
= 1;
1454 adapter
->need_fw_reset
= 1;
1455 clear_bit(QLC_83XX_MBX_READY
, &ahw
->idc
.status
);
1458 cleanup_transaction
:
1459 qlcnic_sriov_cleanup_transaction(trans
);
1463 int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter
*adapter
, u8 cmd_op
)
1465 struct qlcnic_cmd_args cmd
;
1466 struct qlcnic_vf_info
*vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1469 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
))
1472 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
1474 dev_err(&adapter
->pdev
->dev
,
1475 "Failed bc channel %s %d\n", cmd_op
? "term" : "init",
1480 cmd_op
= (cmd
.rsp
.arg
[0] & 0xff);
1481 if (cmd
.rsp
.arg
[0] >> 25 == 2)
1483 if (cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
)
1484 set_bit(QLC_BC_VF_STATE
, &vf
->state
);
1486 clear_bit(QLC_BC_VF_STATE
, &vf
->state
);
1489 qlcnic_free_mbx_args(&cmd
);
1493 void qlcnic_vf_add_mc_list(struct net_device
*netdev
, u16 vlan
)
1495 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1496 struct qlcnic_mac_list_s
*cur
;
1497 struct list_head
*head
, tmp_list
;
1499 INIT_LIST_HEAD(&tmp_list
);
1500 head
= &adapter
->vf_mc_list
;
1501 netif_addr_lock_bh(netdev
);
1503 while (!list_empty(head
)) {
1504 cur
= list_entry(head
->next
, struct qlcnic_mac_list_s
, list
);
1505 list_move(&cur
->list
, &tmp_list
);
1508 netif_addr_unlock_bh(netdev
);
1510 while (!list_empty(&tmp_list
)) {
1511 cur
= list_entry((&tmp_list
)->next
,
1512 struct qlcnic_mac_list_s
, list
);
1513 qlcnic_nic_add_mac(adapter
, cur
->mac_addr
, vlan
);
1514 list_del(&cur
->list
);
1519 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel
*bc
)
1521 struct list_head
*head
= &bc
->async_list
;
1522 struct qlcnic_async_work_list
*entry
;
1524 while (!list_empty(head
)) {
1525 entry
= list_entry(head
->next
, struct qlcnic_async_work_list
,
1527 cancel_work_sync(&entry
->work
);
1528 list_del(&entry
->list
);
1533 static void qlcnic_sriov_vf_set_multi(struct net_device
*netdev
)
1535 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1538 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
1541 vlan
= adapter
->ahw
->sriov
->vlan
;
1542 __qlcnic_set_multi(netdev
, vlan
);
1545 static void qlcnic_sriov_handle_async_multi(struct work_struct
*work
)
1547 struct qlcnic_async_work_list
*entry
;
1548 struct net_device
*netdev
;
1550 entry
= container_of(work
, struct qlcnic_async_work_list
, work
);
1551 netdev
= (struct net_device
*)entry
->ptr
;
1553 qlcnic_sriov_vf_set_multi(netdev
);
1557 static struct qlcnic_async_work_list
*
1558 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel
*bc
)
1560 struct list_head
*node
;
1561 struct qlcnic_async_work_list
*entry
= NULL
;
1564 list_for_each(node
, &bc
->async_list
) {
1565 entry
= list_entry(node
, struct qlcnic_async_work_list
, list
);
1566 if (!work_pending(&entry
->work
)) {
1573 entry
= kzalloc(sizeof(struct qlcnic_async_work_list
),
1577 list_add_tail(&entry
->list
, &bc
->async_list
);
1583 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel
*bc
,
1584 work_func_t func
, void *data
)
1586 struct qlcnic_async_work_list
*entry
= NULL
;
1588 entry
= qlcnic_sriov_get_free_node_async_work(bc
);
1593 INIT_WORK(&entry
->work
, func
);
1594 queue_work(bc
->bc_async_wq
, &entry
->work
);
1597 void qlcnic_sriov_vf_schedule_multi(struct net_device
*netdev
)
1600 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1601 struct qlcnic_back_channel
*bc
= &adapter
->ahw
->sriov
->bc
;
1603 if (adapter
->need_fw_reset
)
1606 qlcnic_sriov_schedule_bc_async_work(bc
, qlcnic_sriov_handle_async_multi
,
1610 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter
*adapter
)
1614 set_bit(QLC_83XX_MBX_READY
, &adapter
->ahw
->idc
.status
);
1615 qlcnic_83xx_enable_mbx_intrpt(adapter
);
1617 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
1621 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
1623 goto err_out_cleanup_bc_intr
;
1625 err
= qlcnic_sriov_vf_init_driver(adapter
);
1627 goto err_out_term_channel
;
1631 err_out_term_channel
:
1632 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
1634 err_out_cleanup_bc_intr
:
1635 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
1639 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter
*adapter
)
1641 struct net_device
*netdev
= adapter
->netdev
;
1643 if (netif_running(netdev
)) {
1644 if (!qlcnic_up(adapter
, netdev
))
1645 qlcnic_restore_indev_addr(netdev
, NETDEV_UP
);
1648 netif_device_attach(netdev
);
1651 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter
*adapter
)
1653 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1654 struct qlcnic_intrpt_config
*intr_tbl
= ahw
->intr_tbl
;
1655 struct net_device
*netdev
= adapter
->netdev
;
1656 u8 i
, max_ints
= ahw
->num_msix
- 1;
1658 qlcnic_83xx_disable_mbx_intr(adapter
);
1659 netif_device_detach(netdev
);
1660 if (netif_running(netdev
))
1661 qlcnic_down(adapter
, netdev
);
1663 for (i
= 0; i
< max_ints
; i
++) {
1665 intr_tbl
[i
].enabled
= 0;
1666 intr_tbl
[i
].src
= 0;
1668 ahw
->reset_context
= 0;
1671 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter
*adapter
)
1673 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1674 struct device
*dev
= &adapter
->pdev
->dev
;
1675 struct qlc_83xx_idc
*idc
= &ahw
->idc
;
1676 u8 func
= ahw
->pci_func
;
1679 if ((idc
->prev_state
== QLC_83XX_IDC_DEV_NEED_RESET
) ||
1680 (idc
->prev_state
== QLC_83XX_IDC_DEV_INIT
)) {
1681 if (!qlcnic_sriov_vf_reinit_driver(adapter
)) {
1682 qlcnic_sriov_vf_attach(adapter
);
1683 adapter
->fw_fail_cnt
= 0;
1685 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1689 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1691 state
= QLCRDX(ahw
, QLC_83XX_IDC_DEV_STATE
);
1692 dev_info(dev
, "Current state 0x%x after FW reset\n",
1700 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter
*adapter
)
1702 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1703 struct device
*dev
= &adapter
->pdev
->dev
;
1704 struct qlc_83xx_idc
*idc
= &ahw
->idc
;
1705 u8 func
= ahw
->pci_func
;
1708 adapter
->reset_ctx_cnt
++;
1710 /* Skip the context reset and check if FW is hung */
1711 if (adapter
->reset_ctx_cnt
< 3) {
1712 adapter
->need_fw_reset
= 1;
1713 clear_bit(QLC_83XX_MBX_READY
, &idc
->status
);
1715 "Resetting context, wait here to check if FW is in failed state\n");
1719 /* Check if number of resets exceed the threshold.
1720 * If it exceeds the threshold just fail the VF.
1722 if (adapter
->reset_ctx_cnt
> QLC_83XX_VF_RESET_FAIL_THRESH
) {
1723 clear_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1724 adapter
->tx_timeo_cnt
= 0;
1725 adapter
->fw_fail_cnt
= 0;
1726 adapter
->reset_ctx_cnt
= 0;
1727 qlcnic_sriov_vf_detach(adapter
);
1729 "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1733 dev_info(dev
, "Resetting context of VF 0x%x\n", func
);
1734 dev_info(dev
, "%s: Context reset count %d for VF 0x%x\n",
1735 __func__
, adapter
->reset_ctx_cnt
, func
);
1736 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1737 adapter
->need_fw_reset
= 1;
1738 clear_bit(QLC_83XX_MBX_READY
, &idc
->status
);
1739 qlcnic_sriov_vf_detach(adapter
);
1740 adapter
->need_fw_reset
= 0;
1742 if (!qlcnic_sriov_vf_reinit_driver(adapter
)) {
1743 qlcnic_sriov_vf_attach(adapter
);
1744 adapter
->tx_timeo_cnt
= 0;
1745 adapter
->reset_ctx_cnt
= 0;
1746 adapter
->fw_fail_cnt
= 0;
1747 dev_info(dev
, "Done resetting context for VF 0x%x\n", func
);
1749 dev_err(dev
, "%s: Reinitialization of VF 0x%x failed\n",
1751 state
= QLCRDX(ahw
, QLC_83XX_IDC_DEV_STATE
);
1752 dev_info(dev
, "%s: Current state 0x%x\n", __func__
, state
);
1758 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter
*adapter
)
1760 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1763 if (ahw
->idc
.prev_state
!= QLC_83XX_IDC_DEV_READY
)
1764 ret
= qlcnic_sriov_vf_handle_dev_ready(adapter
);
1765 else if (ahw
->reset_context
)
1766 ret
= qlcnic_sriov_vf_handle_context_reset(adapter
);
1768 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1772 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter
*adapter
)
1774 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1776 dev_err(&adapter
->pdev
->dev
, "Device is in failed state\n");
1777 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
)
1778 qlcnic_sriov_vf_detach(adapter
);
1780 clear_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1781 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1786 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter
*adapter
)
1788 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1790 dev_info(&adapter
->pdev
->dev
, "Device is in quiescent state\n");
1791 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
) {
1792 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1793 adapter
->tx_timeo_cnt
= 0;
1794 adapter
->reset_ctx_cnt
= 0;
1795 clear_bit(QLC_83XX_MBX_READY
, &idc
->status
);
1796 qlcnic_sriov_vf_detach(adapter
);
1802 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter
*adapter
)
1804 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1805 u8 func
= adapter
->ahw
->pci_func
;
1807 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
) {
1808 dev_err(&adapter
->pdev
->dev
,
1809 "Firmware hang detected by VF 0x%x\n", func
);
1810 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1811 adapter
->tx_timeo_cnt
= 0;
1812 adapter
->reset_ctx_cnt
= 0;
1813 clear_bit(QLC_83XX_MBX_READY
, &idc
->status
);
1814 qlcnic_sriov_vf_detach(adapter
);
1819 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter
*adapter
)
1821 dev_err(&adapter
->pdev
->dev
, "%s: Device in unknown state\n", __func__
);
1825 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct
*work
)
1827 struct qlcnic_adapter
*adapter
;
1828 struct qlc_83xx_idc
*idc
;
1831 adapter
= container_of(work
, struct qlcnic_adapter
, fw_work
.work
);
1832 idc
= &adapter
->ahw
->idc
;
1833 idc
->curr_state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
1835 switch (idc
->curr_state
) {
1836 case QLC_83XX_IDC_DEV_READY
:
1837 ret
= qlcnic_sriov_vf_idc_ready_state(adapter
);
1839 case QLC_83XX_IDC_DEV_NEED_RESET
:
1840 case QLC_83XX_IDC_DEV_INIT
:
1841 ret
= qlcnic_sriov_vf_idc_init_reset_state(adapter
);
1843 case QLC_83XX_IDC_DEV_NEED_QUISCENT
:
1844 ret
= qlcnic_sriov_vf_idc_need_quiescent_state(adapter
);
1846 case QLC_83XX_IDC_DEV_FAILED
:
1847 ret
= qlcnic_sriov_vf_idc_failed_state(adapter
);
1849 case QLC_83XX_IDC_DEV_QUISCENT
:
1852 ret
= qlcnic_sriov_vf_idc_unknown_state(adapter
);
1855 idc
->prev_state
= idc
->curr_state
;
1856 if (!ret
&& test_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
))
1857 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
1861 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter
*adapter
)
1863 while (test_and_set_bit(__QLCNIC_RESETTING
, &adapter
->state
))
1866 clear_bit(QLC_83XX_MODULE_LOADED
, &adapter
->ahw
->idc
.status
);
1867 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1868 cancel_delayed_work_sync(&adapter
->fw_work
);
1871 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov
*sriov
,
1874 u16 vlan
= sriov
->vlan
;
1878 if (sriov
->vlan_mode
!= QLC_GUEST_VLAN_MODE
)
1885 if (sriov
->any_vlan
) {
1886 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1887 if (sriov
->allowed_vlans
[i
] == vid
)
1895 if (!vlan
|| vlan
!= vid
)
1902 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter
*adapter
,
1905 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
1906 struct qlcnic_cmd_args cmd
;
1912 ret
= qlcnic_sriov_validate_vlan_cfg(sriov
, vid
, enable
);
1916 ret
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
,
1917 QLCNIC_BC_CMD_CFG_GUEST_VLAN
);
1921 cmd
.req
.arg
[1] = (enable
& 1) | vid
<< 16;
1923 qlcnic_sriov_cleanup_async_list(&sriov
->bc
);
1924 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
1926 dev_err(&adapter
->pdev
->dev
,
1927 "Failed to configure guest VLAN, err=%d\n", ret
);
1929 qlcnic_free_mac_list(adapter
);
1936 qlcnic_sriov_vf_set_multi(adapter
->netdev
);
1939 qlcnic_free_mbx_args(&cmd
);
1943 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter
*adapter
)
1945 struct list_head
*head
= &adapter
->mac_list
;
1946 struct qlcnic_mac_list_s
*cur
;
1949 vlan
= adapter
->ahw
->sriov
->vlan
;
1951 while (!list_empty(head
)) {
1952 cur
= list_entry(head
->next
, struct qlcnic_mac_list_s
, list
);
1953 qlcnic_sre_macaddr_change(adapter
, cur
->mac_addr
,
1954 vlan
, QLCNIC_MAC_DEL
);
1955 list_del(&cur
->list
);
1960 int qlcnic_sriov_vf_shutdown(struct pci_dev
*pdev
)
1962 struct qlcnic_adapter
*adapter
= pci_get_drvdata(pdev
);
1963 struct net_device
*netdev
= adapter
->netdev
;
1966 netif_device_detach(netdev
);
1967 qlcnic_cancel_idc_work(adapter
);
1969 if (netif_running(netdev
))
1970 qlcnic_down(adapter
, netdev
);
1972 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
1973 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
1974 qlcnic_83xx_disable_mbx_intr(adapter
);
1975 cancel_delayed_work_sync(&adapter
->idc_aen_work
);
1977 retval
= pci_save_state(pdev
);
1984 int qlcnic_sriov_vf_resume(struct qlcnic_adapter
*adapter
)
1986 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1987 struct net_device
*netdev
= adapter
->netdev
;
1990 set_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1991 qlcnic_83xx_enable_mbx_intrpt(adapter
);
1992 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
1996 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
1998 if (netif_running(netdev
)) {
1999 err
= qlcnic_up(adapter
, netdev
);
2001 qlcnic_restore_indev_addr(netdev
, NETDEV_UP
);
2005 netif_device_attach(netdev
);
2006 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,