2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/types.h>
10 #include "qlcnic_sriov.h"
12 #include "qlcnic_83xx_hw.h"
14 #define QLC_BC_COMMAND 0
15 #define QLC_BC_RESPONSE 1
17 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
18 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
21 #define QLC_BC_CFREE 1
23 #define QLC_BC_HDR_SZ 16
24 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
26 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
27 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
29 #define QLC_83XX_VF_RESET_FAIL_THRESH 8
30 #define QLC_BC_CMD_MAX_RETRY_CNT 5
32 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter
*);
33 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*, u32
);
34 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct
*);
35 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter
*);
36 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*);
37 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter
*,
38 struct qlcnic_cmd_args
*);
39 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter
*, u8
);
40 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*);
41 static int qlcnic_sriov_vf_shutdown(struct pci_dev
*);
42 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter
*);
43 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter
*,
44 struct qlcnic_cmd_args
*);
46 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops
= {
47 .read_crb
= qlcnic_83xx_read_crb
,
48 .write_crb
= qlcnic_83xx_write_crb
,
49 .read_reg
= qlcnic_83xx_rd_reg_indirect
,
50 .write_reg
= qlcnic_83xx_wrt_reg_indirect
,
51 .get_mac_address
= qlcnic_83xx_get_mac_address
,
52 .setup_intr
= qlcnic_83xx_setup_intr
,
53 .alloc_mbx_args
= qlcnic_83xx_alloc_mbx_args
,
54 .mbx_cmd
= qlcnic_sriov_issue_cmd
,
55 .get_func_no
= qlcnic_83xx_get_func_no
,
56 .api_lock
= qlcnic_83xx_cam_lock
,
57 .api_unlock
= qlcnic_83xx_cam_unlock
,
58 .process_lb_rcv_ring_diag
= qlcnic_83xx_process_rcv_ring_diag
,
59 .create_rx_ctx
= qlcnic_83xx_create_rx_ctx
,
60 .create_tx_ctx
= qlcnic_83xx_create_tx_ctx
,
61 .del_rx_ctx
= qlcnic_83xx_del_rx_ctx
,
62 .del_tx_ctx
= qlcnic_83xx_del_tx_ctx
,
63 .setup_link_event
= qlcnic_83xx_setup_link_event
,
64 .get_nic_info
= qlcnic_83xx_get_nic_info
,
65 .get_pci_info
= qlcnic_83xx_get_pci_info
,
66 .set_nic_info
= qlcnic_83xx_set_nic_info
,
67 .change_macvlan
= qlcnic_83xx_sre_macaddr_change
,
68 .napi_enable
= qlcnic_83xx_napi_enable
,
69 .napi_disable
= qlcnic_83xx_napi_disable
,
70 .config_intr_coal
= qlcnic_83xx_config_intr_coal
,
71 .config_rss
= qlcnic_83xx_config_rss
,
72 .config_hw_lro
= qlcnic_83xx_config_hw_lro
,
73 .config_promisc_mode
= qlcnic_83xx_nic_set_promisc
,
74 .change_l2_filter
= qlcnic_83xx_change_l2_filter
,
75 .get_board_info
= qlcnic_83xx_get_port_info
,
76 .free_mac_list
= qlcnic_sriov_vf_free_mac_list
,
77 .enable_sds_intr
= qlcnic_83xx_enable_sds_intr
,
78 .disable_sds_intr
= qlcnic_83xx_disable_sds_intr
,
81 static struct qlcnic_nic_template qlcnic_sriov_vf_ops
= {
82 .config_bridged_mode
= qlcnic_config_bridged_mode
,
83 .config_led
= qlcnic_config_led
,
84 .cancel_idc_work
= qlcnic_sriov_vf_cancel_fw_work
,
85 .napi_add
= qlcnic_83xx_napi_add
,
86 .napi_del
= qlcnic_83xx_napi_del
,
87 .shutdown
= qlcnic_sriov_vf_shutdown
,
88 .resume
= qlcnic_sriov_vf_resume
,
89 .config_ipaddr
= qlcnic_83xx_config_ipaddr
,
90 .clear_legacy_intr
= qlcnic_83xx_clear_legacy_intr
,
93 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl
[] = {
94 {QLCNIC_BC_CMD_CHANNEL_INIT
, 2, 2},
95 {QLCNIC_BC_CMD_CHANNEL_TERM
, 2, 2},
96 {QLCNIC_BC_CMD_GET_ACL
, 3, 14},
97 {QLCNIC_BC_CMD_CFG_GUEST_VLAN
, 2, 2},
100 static inline bool qlcnic_sriov_bc_msg_check(u32 val
)
102 return (val
& (1 << QLC_BC_MSG
)) ? true : false;
105 static inline bool qlcnic_sriov_channel_free_check(u32 val
)
107 return (val
& (1 << QLC_BC_CFREE
)) ? true : false;
110 static inline bool qlcnic_sriov_flr_check(u32 val
)
112 return (val
& (1 << QLC_BC_FLR
)) ? true : false;
115 static inline u8
qlcnic_sriov_target_func_id(u32 val
)
117 return (val
>> 4) & 0xff;
120 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter
*adapter
, int vf_id
)
122 struct pci_dev
*dev
= adapter
->pdev
;
126 if (qlcnic_sriov_vf_check(adapter
))
129 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
130 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
131 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
133 return (dev
->devfn
+ offset
+ stride
* vf_id
) & 0xff;
136 int qlcnic_sriov_init(struct qlcnic_adapter
*adapter
, int num_vfs
)
138 struct qlcnic_sriov
*sriov
;
139 struct qlcnic_back_channel
*bc
;
140 struct workqueue_struct
*wq
;
141 struct qlcnic_vport
*vp
;
142 struct qlcnic_vf_info
*vf
;
145 if (!qlcnic_sriov_enable_check(adapter
))
148 sriov
= kzalloc(sizeof(struct qlcnic_sriov
), GFP_KERNEL
);
152 adapter
->ahw
->sriov
= sriov
;
153 sriov
->num_vfs
= num_vfs
;
155 sriov
->vf_info
= kzalloc(sizeof(struct qlcnic_vf_info
) *
156 num_vfs
, GFP_KERNEL
);
157 if (!sriov
->vf_info
) {
159 goto qlcnic_free_sriov
;
162 wq
= create_singlethread_workqueue("bc-trans");
165 dev_err(&adapter
->pdev
->dev
,
166 "Cannot create bc-trans workqueue\n");
167 goto qlcnic_free_vf_info
;
170 bc
->bc_trans_wq
= wq
;
172 wq
= create_singlethread_workqueue("async");
175 dev_err(&adapter
->pdev
->dev
, "Cannot create async workqueue\n");
176 goto qlcnic_destroy_trans_wq
;
179 bc
->bc_async_wq
= wq
;
180 INIT_LIST_HEAD(&bc
->async_list
);
182 for (i
= 0; i
< num_vfs
; i
++) {
183 vf
= &sriov
->vf_info
[i
];
184 vf
->adapter
= adapter
;
185 vf
->pci_func
= qlcnic_sriov_virtid_fn(adapter
, i
);
186 mutex_init(&vf
->send_cmd_lock
);
187 spin_lock_init(&vf
->vlan_list_lock
);
188 INIT_LIST_HEAD(&vf
->rcv_act
.wait_list
);
189 INIT_LIST_HEAD(&vf
->rcv_pend
.wait_list
);
190 spin_lock_init(&vf
->rcv_act
.lock
);
191 spin_lock_init(&vf
->rcv_pend
.lock
);
192 init_completion(&vf
->ch_free_cmpl
);
194 INIT_WORK(&vf
->trans_work
, qlcnic_sriov_process_bc_cmd
);
196 if (qlcnic_sriov_pf_check(adapter
)) {
197 vp
= kzalloc(sizeof(struct qlcnic_vport
), GFP_KERNEL
);
200 goto qlcnic_destroy_async_wq
;
202 sriov
->vf_info
[i
].vp
= vp
;
203 vp
->vlan_mode
= QLC_GUEST_VLAN_MODE
;
204 vp
->max_tx_bw
= MAX_BW
;
205 vp
->min_tx_bw
= MIN_BW
;
206 vp
->spoofchk
= false;
207 random_ether_addr(vp
->mac
);
208 dev_info(&adapter
->pdev
->dev
,
209 "MAC Address %pM is configured for VF %d\n",
216 qlcnic_destroy_async_wq
:
217 destroy_workqueue(bc
->bc_async_wq
);
219 qlcnic_destroy_trans_wq
:
220 destroy_workqueue(bc
->bc_trans_wq
);
223 kfree(sriov
->vf_info
);
226 kfree(adapter
->ahw
->sriov
);
230 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list
*t_list
)
232 struct qlcnic_bc_trans
*trans
;
233 struct qlcnic_cmd_args cmd
;
236 spin_lock_irqsave(&t_list
->lock
, flags
);
238 while (!list_empty(&t_list
->wait_list
)) {
239 trans
= list_first_entry(&t_list
->wait_list
,
240 struct qlcnic_bc_trans
, list
);
241 list_del(&trans
->list
);
243 cmd
.req
.arg
= (u32
*)trans
->req_pay
;
244 cmd
.rsp
.arg
= (u32
*)trans
->rsp_pay
;
245 qlcnic_free_mbx_args(&cmd
);
246 qlcnic_sriov_cleanup_transaction(trans
);
249 spin_unlock_irqrestore(&t_list
->lock
, flags
);
252 void __qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
254 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
255 struct qlcnic_back_channel
*bc
= &sriov
->bc
;
256 struct qlcnic_vf_info
*vf
;
259 if (!qlcnic_sriov_enable_check(adapter
))
262 qlcnic_sriov_cleanup_async_list(bc
);
263 destroy_workqueue(bc
->bc_async_wq
);
265 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
266 vf
= &sriov
->vf_info
[i
];
267 qlcnic_sriov_cleanup_list(&vf
->rcv_pend
);
268 cancel_work_sync(&vf
->trans_work
);
269 qlcnic_sriov_cleanup_list(&vf
->rcv_act
);
272 destroy_workqueue(bc
->bc_trans_wq
);
274 for (i
= 0; i
< sriov
->num_vfs
; i
++)
275 kfree(sriov
->vf_info
[i
].vp
);
277 kfree(sriov
->vf_info
);
278 kfree(adapter
->ahw
->sriov
);
281 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter
*adapter
)
283 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
284 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
285 __qlcnic_sriov_cleanup(adapter
);
288 void qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
290 if (!test_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
))
293 qlcnic_sriov_free_vlans(adapter
);
295 if (qlcnic_sriov_pf_check(adapter
))
296 qlcnic_sriov_pf_cleanup(adapter
);
298 if (qlcnic_sriov_vf_check(adapter
))
299 qlcnic_sriov_vf_cleanup(adapter
);
302 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter
*adapter
, u32
*hdr
,
303 u32
*pay
, u8 pci_func
, u8 size
)
305 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
306 struct qlcnic_mailbox
*mbx
= ahw
->mailbox
;
307 struct qlcnic_cmd_args cmd
;
308 unsigned long timeout
;
311 memset(&cmd
, 0, sizeof(struct qlcnic_cmd_args
));
315 cmd
.func_num
= pci_func
;
316 cmd
.op_type
= QLC_83XX_MBX_POST_BC_OP
;
317 cmd
.cmd_op
= ((struct qlcnic_bc_hdr
*)hdr
)->cmd_op
;
319 err
= mbx
->ops
->enqueue_cmd(adapter
, &cmd
, &timeout
);
321 dev_err(&adapter
->pdev
->dev
,
322 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
323 __func__
, cmd
.cmd_op
, cmd
.type
, ahw
->pci_func
,
328 if (!wait_for_completion_timeout(&cmd
.completion
, timeout
)) {
329 dev_err(&adapter
->pdev
->dev
,
330 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
331 __func__
, cmd
.cmd_op
, cmd
.type
, ahw
->pci_func
,
333 flush_workqueue(mbx
->work_q
);
336 return cmd
.rsp_opcode
;
339 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter
*adapter
)
341 adapter
->num_rxd
= QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF
;
342 adapter
->max_rxd
= MAX_RCV_DESCRIPTORS_10G
;
343 adapter
->num_jumbo_rxd
= QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF
;
344 adapter
->max_jumbo_rxd
= MAX_JUMBO_RCV_DESCRIPTORS_10G
;
345 adapter
->num_txd
= MAX_CMD_DESCRIPTORS
;
346 adapter
->max_rds_rings
= MAX_RDS_RINGS
;
349 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter
*adapter
,
350 struct qlcnic_info
*npar_info
, u16 vport_id
)
352 struct device
*dev
= &adapter
->pdev
->dev
;
353 struct qlcnic_cmd_args cmd
;
357 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_NIC_INFO
);
361 cmd
.req
.arg
[1] = vport_id
<< 16 | 0x1;
362 err
= qlcnic_issue_cmd(adapter
, &cmd
);
364 dev_err(&adapter
->pdev
->dev
,
365 "Failed to get vport info, err=%d\n", err
);
366 qlcnic_free_mbx_args(&cmd
);
370 status
= cmd
.rsp
.arg
[2] & 0xffff;
372 npar_info
->min_tx_bw
= MSW(cmd
.rsp
.arg
[2]);
374 npar_info
->max_tx_bw
= LSW(cmd
.rsp
.arg
[3]);
376 npar_info
->max_tx_ques
= MSW(cmd
.rsp
.arg
[3]);
378 npar_info
->max_tx_mac_filters
= LSW(cmd
.rsp
.arg
[4]);
380 npar_info
->max_rx_mcast_mac_filters
= MSW(cmd
.rsp
.arg
[4]);
382 npar_info
->max_rx_ucast_mac_filters
= LSW(cmd
.rsp
.arg
[5]);
384 npar_info
->max_rx_ip_addr
= MSW(cmd
.rsp
.arg
[5]);
386 npar_info
->max_rx_lro_flow
= LSW(cmd
.rsp
.arg
[6]);
388 npar_info
->max_rx_status_rings
= MSW(cmd
.rsp
.arg
[6]);
390 npar_info
->max_rx_buf_rings
= LSW(cmd
.rsp
.arg
[7]);
392 npar_info
->max_rx_ques
= MSW(cmd
.rsp
.arg
[7]);
393 npar_info
->max_tx_vlan_keys
= LSW(cmd
.rsp
.arg
[8]);
394 npar_info
->max_local_ipv6_addrs
= MSW(cmd
.rsp
.arg
[8]);
395 npar_info
->max_remote_ipv6_addrs
= LSW(cmd
.rsp
.arg
[9]);
397 dev_info(dev
, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
398 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
399 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
400 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
401 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
402 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
403 npar_info
->min_tx_bw
, npar_info
->max_tx_bw
,
404 npar_info
->max_tx_ques
, npar_info
->max_tx_mac_filters
,
405 npar_info
->max_rx_mcast_mac_filters
,
406 npar_info
->max_rx_ucast_mac_filters
, npar_info
->max_rx_ip_addr
,
407 npar_info
->max_rx_lro_flow
, npar_info
->max_rx_status_rings
,
408 npar_info
->max_rx_buf_rings
, npar_info
->max_rx_ques
,
409 npar_info
->max_tx_vlan_keys
, npar_info
->max_local_ipv6_addrs
,
410 npar_info
->max_remote_ipv6_addrs
);
412 qlcnic_free_mbx_args(&cmd
);
416 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter
*adapter
,
417 struct qlcnic_cmd_args
*cmd
)
419 adapter
->rx_pvid
= MSW(cmd
->rsp
.arg
[1]) & 0xffff;
420 adapter
->flags
&= ~QLCNIC_TAGGING_ENABLED
;
424 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter
*adapter
,
425 struct qlcnic_cmd_args
*cmd
)
427 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
431 if (sriov
->allowed_vlans
)
434 sriov
->any_vlan
= cmd
->rsp
.arg
[2] & 0xf;
435 sriov
->num_allowed_vlans
= cmd
->rsp
.arg
[2] >> 16;
436 dev_info(&adapter
->pdev
->dev
, "Number of allowed Guest VLANs = %d\n",
437 sriov
->num_allowed_vlans
);
439 qlcnic_sriov_alloc_vlans(adapter
);
441 if (!sriov
->any_vlan
)
444 num_vlans
= sriov
->num_allowed_vlans
;
445 sriov
->allowed_vlans
= kzalloc(sizeof(u16
) * num_vlans
, GFP_KERNEL
);
446 if (!sriov
->allowed_vlans
)
449 vlans
= (u16
*)&cmd
->rsp
.arg
[3];
450 for (i
= 0; i
< num_vlans
; i
++)
451 sriov
->allowed_vlans
[i
] = vlans
[i
];
456 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter
*adapter
)
458 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
459 struct qlcnic_cmd_args cmd
;
462 memset(&cmd
, 0, sizeof(cmd
));
463 ret
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, QLCNIC_BC_CMD_GET_ACL
);
467 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
469 dev_err(&adapter
->pdev
->dev
, "Failed to get ACL, err=%d\n",
472 sriov
->vlan_mode
= cmd
.rsp
.arg
[1] & 0x3;
473 switch (sriov
->vlan_mode
) {
474 case QLC_GUEST_VLAN_MODE
:
475 ret
= qlcnic_sriov_set_guest_vlan_mode(adapter
, &cmd
);
478 ret
= qlcnic_sriov_set_pvid_mode(adapter
, &cmd
);
483 qlcnic_free_mbx_args(&cmd
);
487 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter
*adapter
)
489 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
490 struct qlcnic_info nic_info
;
493 err
= qlcnic_sriov_get_vf_vport_info(adapter
, &nic_info
, 0);
497 ahw
->max_mc_count
= nic_info
.max_rx_mcast_mac_filters
;
499 err
= qlcnic_get_nic_info(adapter
, &nic_info
, ahw
->pci_func
);
503 if (qlcnic_83xx_get_port_info(adapter
))
506 qlcnic_sriov_vf_cfg_buff_desc(adapter
);
507 adapter
->flags
|= QLCNIC_ADAPTER_INITIALIZED
;
508 dev_info(&adapter
->pdev
->dev
, "HAL Version: %d\n",
509 adapter
->ahw
->fw_hal_version
);
511 ahw
->physical_port
= (u8
) nic_info
.phys_port
;
512 ahw
->switch_mode
= nic_info
.switch_mode
;
513 ahw
->max_mtu
= nic_info
.max_mtu
;
514 ahw
->op_mode
= nic_info
.op_mode
;
515 ahw
->capabilities
= nic_info
.capabilities
;
519 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter
*adapter
,
524 adapter
->flags
|= QLCNIC_VLAN_FILTERING
;
525 adapter
->ahw
->total_nic_func
= 1;
526 INIT_LIST_HEAD(&adapter
->vf_mc_list
);
527 if (!qlcnic_use_msi_x
&& !!qlcnic_use_msi
)
528 dev_warn(&adapter
->pdev
->dev
,
529 "Device does not support MSI interrupts\n");
531 /* compute and set default and max tx/sds rings */
532 qlcnic_set_tx_ring_count(adapter
, QLCNIC_SINGLE_RING
);
533 qlcnic_set_sds_ring_count(adapter
, QLCNIC_SINGLE_RING
);
535 err
= qlcnic_setup_intr(adapter
);
537 dev_err(&adapter
->pdev
->dev
, "Failed to setup interrupt\n");
538 goto err_out_disable_msi
;
541 err
= qlcnic_83xx_setup_mbx_intr(adapter
);
543 goto err_out_disable_msi
;
545 err
= qlcnic_sriov_init(adapter
, 1);
547 goto err_out_disable_mbx_intr
;
549 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
551 goto err_out_cleanup_sriov
;
553 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
555 goto err_out_disable_bc_intr
;
557 err
= qlcnic_sriov_vf_init_driver(adapter
);
559 goto err_out_send_channel_term
;
561 err
= qlcnic_sriov_get_vf_acl(adapter
);
563 goto err_out_send_channel_term
;
565 err
= qlcnic_setup_netdev(adapter
, adapter
->netdev
, pci_using_dac
);
567 goto err_out_send_channel_term
;
569 pci_set_drvdata(adapter
->pdev
, adapter
);
570 dev_info(&adapter
->pdev
->dev
, "%s: XGbE port initialized\n",
571 adapter
->netdev
->name
);
573 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
574 adapter
->ahw
->idc
.delay
);
577 err_out_send_channel_term
:
578 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
580 err_out_disable_bc_intr
:
581 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
583 err_out_cleanup_sriov
:
584 __qlcnic_sriov_cleanup(adapter
);
586 err_out_disable_mbx_intr
:
587 qlcnic_83xx_free_mbx_intr(adapter
);
590 qlcnic_teardown_intr(adapter
);
594 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter
*adapter
)
600 if (++adapter
->fw_fail_cnt
> QLC_BC_CMD_MAX_RETRY_CNT
)
602 state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
603 } while (state
!= QLC_83XX_IDC_DEV_READY
);
608 int qlcnic_sriov_vf_init(struct qlcnic_adapter
*adapter
, int pci_using_dac
)
610 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
613 set_bit(QLC_83XX_MODULE_LOADED
, &ahw
->idc
.status
);
614 ahw
->idc
.delay
= QLC_83XX_IDC_FW_POLL_DELAY
;
615 ahw
->reset_context
= 0;
616 adapter
->fw_fail_cnt
= 0;
617 ahw
->msix_supported
= 1;
618 adapter
->need_fw_reset
= 0;
619 adapter
->flags
|= QLCNIC_TX_INTR_SHARED
;
621 err
= qlcnic_sriov_check_dev_ready(adapter
);
625 err
= qlcnic_sriov_setup_vf(adapter
, pci_using_dac
);
629 if (qlcnic_read_mac_addr(adapter
))
630 dev_warn(&adapter
->pdev
->dev
, "failed to read mac addr\n");
632 INIT_DELAYED_WORK(&adapter
->idc_aen_work
, qlcnic_83xx_idc_aen_work
);
634 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
638 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter
*adapter
)
640 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
642 ahw
->op_mode
= QLCNIC_SRIOV_VF_FUNC
;
643 dev_info(&adapter
->pdev
->dev
,
644 "HAL Version: %d Non Privileged SRIOV function\n",
645 ahw
->fw_hal_version
);
646 adapter
->nic_ops
= &qlcnic_sriov_vf_ops
;
647 set_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
);
651 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context
*ahw
)
653 ahw
->hw_ops
= &qlcnic_sriov_vf_hw_ops
;
654 ahw
->reg_tbl
= (u32
*)qlcnic_83xx_reg_tbl
;
655 ahw
->ext_reg_tbl
= (u32
*)qlcnic_83xx_ext_reg_tbl
;
658 static u32
qlcnic_sriov_get_bc_paysize(u32 real_pay_size
, u8 curr_frag
)
662 pay_size
= real_pay_size
/ ((curr_frag
+ 1) * QLC_BC_PAYLOAD_SZ
);
665 pay_size
= QLC_BC_PAYLOAD_SZ
;
667 pay_size
= real_pay_size
% QLC_BC_PAYLOAD_SZ
;
672 int qlcnic_sriov_func_to_index(struct qlcnic_adapter
*adapter
, u8 pci_func
)
674 struct qlcnic_vf_info
*vf_info
= adapter
->ahw
->sriov
->vf_info
;
677 if (qlcnic_sriov_vf_check(adapter
))
680 for (i
= 0; i
< adapter
->ahw
->sriov
->num_vfs
; i
++) {
681 if (vf_info
[i
].pci_func
== pci_func
)
688 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans
**trans
)
690 *trans
= kzalloc(sizeof(struct qlcnic_bc_trans
), GFP_ATOMIC
);
694 init_completion(&(*trans
)->resp_cmpl
);
698 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr
**hdr
,
701 *hdr
= kzalloc(sizeof(struct qlcnic_bc_hdr
) * size
, GFP_ATOMIC
);
708 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*mbx
, u32 type
)
710 const struct qlcnic_mailbox_metadata
*mbx_tbl
;
713 mbx_tbl
= qlcnic_sriov_bc_mbx_tbl
;
714 size
= ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl
);
716 for (i
= 0; i
< size
; i
++) {
717 if (type
== mbx_tbl
[i
].cmd
) {
718 mbx
->op_type
= QLC_BC_CMD
;
719 mbx
->req
.num
= mbx_tbl
[i
].in_args
;
720 mbx
->rsp
.num
= mbx_tbl
[i
].out_args
;
721 mbx
->req
.arg
= kcalloc(mbx
->req
.num
, sizeof(u32
),
725 mbx
->rsp
.arg
= kcalloc(mbx
->rsp
.num
, sizeof(u32
),
732 mbx
->req
.arg
[0] = (type
| (mbx
->req
.num
<< 16) |
734 mbx
->rsp
.arg
[0] = (type
& 0xffff) | mbx
->rsp
.num
<< 16;
741 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans
*trans
,
742 struct qlcnic_cmd_args
*cmd
,
743 u16 seq
, u8 msg_type
)
745 struct qlcnic_bc_hdr
*hdr
;
747 u32 num_regs
, bc_pay_sz
;
749 u8 cmd_op
, num_frags
, t_num_frags
;
751 bc_pay_sz
= QLC_BC_PAYLOAD_SZ
;
752 if (msg_type
== QLC_BC_COMMAND
) {
753 trans
->req_pay
= (struct qlcnic_bc_payload
*)cmd
->req
.arg
;
754 trans
->rsp_pay
= (struct qlcnic_bc_payload
*)cmd
->rsp
.arg
;
755 num_regs
= cmd
->req
.num
;
756 trans
->req_pay_size
= (num_regs
* 4);
757 num_regs
= cmd
->rsp
.num
;
758 trans
->rsp_pay_size
= (num_regs
* 4);
759 cmd_op
= cmd
->req
.arg
[0] & 0xff;
760 remainder
= (trans
->req_pay_size
) % (bc_pay_sz
);
761 num_frags
= (trans
->req_pay_size
) / (bc_pay_sz
);
764 t_num_frags
= num_frags
;
765 if (qlcnic_sriov_alloc_bc_msg(&trans
->req_hdr
, num_frags
))
767 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
768 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
771 if (qlcnic_sriov_alloc_bc_msg(&trans
->rsp_hdr
, num_frags
))
773 num_frags
= t_num_frags
;
774 hdr
= trans
->req_hdr
;
776 cmd
->req
.arg
= (u32
*)trans
->req_pay
;
777 cmd
->rsp
.arg
= (u32
*)trans
->rsp_pay
;
778 cmd_op
= cmd
->req
.arg
[0] & 0xff;
779 cmd
->cmd_op
= cmd_op
;
780 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
781 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
784 cmd
->req
.num
= trans
->req_pay_size
/ 4;
785 cmd
->rsp
.num
= trans
->rsp_pay_size
/ 4;
786 hdr
= trans
->rsp_hdr
;
787 cmd
->op_type
= trans
->req_hdr
->op_type
;
790 trans
->trans_id
= seq
;
791 trans
->cmd_id
= cmd_op
;
792 for (i
= 0; i
< num_frags
; i
++) {
794 hdr
[i
].msg_type
= msg_type
;
795 hdr
[i
].op_type
= cmd
->op_type
;
797 hdr
[i
].num_frags
= num_frags
;
798 hdr
[i
].frag_num
= i
+ 1;
799 hdr
[i
].cmd_op
= cmd_op
;
805 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*trans
)
809 kfree(trans
->req_hdr
);
810 kfree(trans
->rsp_hdr
);
814 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info
*vf
,
815 struct qlcnic_bc_trans
*trans
, u8 type
)
817 struct qlcnic_trans_list
*t_list
;
821 if (type
== QLC_BC_RESPONSE
) {
822 t_list
= &vf
->rcv_act
;
823 spin_lock_irqsave(&t_list
->lock
, flags
);
825 list_del(&trans
->list
);
826 if (t_list
->count
> 0)
828 spin_unlock_irqrestore(&t_list
->lock
, flags
);
830 if (type
== QLC_BC_COMMAND
) {
831 while (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
834 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
839 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov
*sriov
,
840 struct qlcnic_vf_info
*vf
,
843 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
) ||
844 vf
->adapter
->need_fw_reset
)
847 queue_work(sriov
->bc
.bc_trans_wq
, &vf
->trans_work
);
850 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans
*trans
)
852 struct completion
*cmpl
= &trans
->resp_cmpl
;
854 if (wait_for_completion_timeout(cmpl
, QLC_MBOX_RESP_TIMEOUT
))
855 trans
->trans_state
= QLC_END
;
857 trans
->trans_state
= QLC_ABORT
;
862 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans
*trans
,
865 if (type
== QLC_BC_RESPONSE
) {
866 trans
->curr_rsp_frag
++;
867 if (trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
868 trans
->trans_state
= QLC_INIT
;
870 trans
->trans_state
= QLC_END
;
872 trans
->curr_req_frag
++;
873 if (trans
->curr_req_frag
< trans
->req_hdr
->num_frags
)
874 trans
->trans_state
= QLC_INIT
;
876 trans
->trans_state
= QLC_WAIT_FOR_RESP
;
880 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans
*trans
,
883 struct qlcnic_vf_info
*vf
= trans
->vf
;
884 struct completion
*cmpl
= &vf
->ch_free_cmpl
;
886 if (!wait_for_completion_timeout(cmpl
, QLC_MBOX_CH_FREE_TIMEOUT
)) {
887 trans
->trans_state
= QLC_ABORT
;
891 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
892 qlcnic_sriov_handle_multi_frags(trans
, type
);
895 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter
*adapter
,
896 u32
*hdr
, u32
*pay
, u32 size
)
898 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
900 u8 i
, max
= 2, hdr_size
, j
;
902 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
903 max
= (size
/ sizeof(u32
)) + hdr_size
;
905 fw_mbx
= readl(QLCNIC_MBX_FW(ahw
, 0));
906 for (i
= 2, j
= 0; j
< hdr_size
; i
++, j
++)
907 *(hdr
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
908 for (; j
< max
; i
++, j
++)
909 *(pay
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
912 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info
*vf
)
918 if (!test_and_set_bit(QLC_BC_VF_CHANNEL
, &vf
->state
)) {
928 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans
*trans
, u8 type
)
930 struct qlcnic_vf_info
*vf
= trans
->vf
;
931 u32 pay_size
, hdr_size
;
934 u8 pci_func
= trans
->func_id
;
936 if (__qlcnic_sriov_issue_bc_post(vf
))
939 if (type
== QLC_BC_COMMAND
) {
940 hdr
= (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
);
941 pay
= (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
);
942 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
943 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
944 trans
->curr_req_frag
);
945 pay_size
= (pay_size
/ sizeof(u32
));
947 hdr
= (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
);
948 pay
= (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
);
949 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
950 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
951 trans
->curr_rsp_frag
);
952 pay_size
= (pay_size
/ sizeof(u32
));
955 ret
= qlcnic_sriov_post_bc_msg(vf
->adapter
, hdr
, pay
,
960 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans
*trans
,
961 struct qlcnic_vf_info
*vf
, u8 type
)
967 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
) ||
968 vf
->adapter
->need_fw_reset
)
969 trans
->trans_state
= QLC_ABORT
;
971 switch (trans
->trans_state
) {
973 trans
->trans_state
= QLC_WAIT_FOR_CHANNEL_FREE
;
974 if (qlcnic_sriov_issue_bc_post(trans
, type
))
975 trans
->trans_state
= QLC_ABORT
;
977 case QLC_WAIT_FOR_CHANNEL_FREE
:
978 qlcnic_sriov_wait_for_channel_free(trans
, type
);
980 case QLC_WAIT_FOR_RESP
:
981 qlcnic_sriov_wait_for_resp(trans
);
990 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
1000 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter
*adapter
,
1001 struct qlcnic_bc_trans
*trans
, int pci_func
)
1003 struct qlcnic_vf_info
*vf
;
1004 int err
, index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1009 vf
= &adapter
->ahw
->sriov
->vf_info
[index
];
1011 trans
->func_id
= pci_func
;
1013 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
)) {
1014 if (qlcnic_sriov_pf_check(adapter
))
1016 if (qlcnic_sriov_vf_check(adapter
) &&
1017 trans
->cmd_id
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
1021 mutex_lock(&vf
->send_cmd_lock
);
1022 vf
->send_cmd
= trans
;
1023 err
= __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_COMMAND
);
1024 qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_COMMAND
);
1025 mutex_unlock(&vf
->send_cmd_lock
);
1029 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter
*adapter
,
1030 struct qlcnic_bc_trans
*trans
,
1031 struct qlcnic_cmd_args
*cmd
)
1033 #ifdef CONFIG_QLCNIC_SRIOV
1034 if (qlcnic_sriov_pf_check(adapter
)) {
1035 qlcnic_sriov_pf_process_bc_cmd(adapter
, trans
, cmd
);
1039 cmd
->rsp
.arg
[0] |= (0x9 << 25);
1043 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*work
)
1045 struct qlcnic_vf_info
*vf
= container_of(work
, struct qlcnic_vf_info
,
1047 struct qlcnic_bc_trans
*trans
= NULL
;
1048 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1049 struct qlcnic_cmd_args cmd
;
1052 if (adapter
->need_fw_reset
)
1055 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
))
1058 memset(&cmd
, 0, sizeof(struct qlcnic_cmd_args
));
1059 trans
= list_first_entry(&vf
->rcv_act
.wait_list
,
1060 struct qlcnic_bc_trans
, list
);
1061 adapter
= vf
->adapter
;
1063 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, trans
->req_hdr
->seq_id
,
1067 __qlcnic_sriov_process_bc_cmd(adapter
, trans
, &cmd
);
1068 trans
->trans_state
= QLC_INIT
;
1069 __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_RESPONSE
);
1072 qlcnic_free_mbx_args(&cmd
);
1073 req
= qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_RESPONSE
);
1074 qlcnic_sriov_cleanup_transaction(trans
);
1076 qlcnic_sriov_schedule_bc_cmd(adapter
->ahw
->sriov
, vf
,
1077 qlcnic_sriov_process_bc_cmd
);
1080 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr
*hdr
,
1081 struct qlcnic_vf_info
*vf
)
1083 struct qlcnic_bc_trans
*trans
;
1086 if (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
1089 trans
= vf
->send_cmd
;
1094 if (trans
->trans_id
!= hdr
->seq_id
)
1097 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
1098 trans
->curr_rsp_frag
);
1099 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1100 (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
),
1101 (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
),
1103 if (++trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
1106 complete(&trans
->resp_cmpl
);
1109 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
1112 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
1113 struct qlcnic_vf_info
*vf
,
1114 struct qlcnic_bc_trans
*trans
)
1116 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
1119 list_add_tail(&trans
->list
, &t_list
->wait_list
);
1120 if (t_list
->count
== 1)
1121 qlcnic_sriov_schedule_bc_cmd(sriov
, vf
,
1122 qlcnic_sriov_process_bc_cmd
);
1126 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
1127 struct qlcnic_vf_info
*vf
,
1128 struct qlcnic_bc_trans
*trans
)
1130 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
1132 spin_lock(&t_list
->lock
);
1134 __qlcnic_sriov_add_act_list(sriov
, vf
, trans
);
1136 spin_unlock(&t_list
->lock
);
1140 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov
*sriov
,
1141 struct qlcnic_vf_info
*vf
,
1142 struct qlcnic_bc_hdr
*hdr
)
1144 struct qlcnic_bc_trans
*trans
= NULL
;
1145 struct list_head
*node
;
1146 u32 pay_size
, curr_frag
;
1147 u8 found
= 0, active
= 0;
1149 spin_lock(&vf
->rcv_pend
.lock
);
1150 if (vf
->rcv_pend
.count
> 0) {
1151 list_for_each(node
, &vf
->rcv_pend
.wait_list
) {
1152 trans
= list_entry(node
, struct qlcnic_bc_trans
, list
);
1153 if (trans
->trans_id
== hdr
->seq_id
) {
1161 curr_frag
= trans
->curr_req_frag
;
1162 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
1164 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1165 (u32
*)(trans
->req_hdr
+ curr_frag
),
1166 (u32
*)(trans
->req_pay
+ curr_frag
),
1168 trans
->curr_req_frag
++;
1169 if (trans
->curr_req_frag
>= hdr
->num_frags
) {
1170 vf
->rcv_pend
.count
--;
1171 list_del(&trans
->list
);
1175 spin_unlock(&vf
->rcv_pend
.lock
);
1178 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
))
1179 qlcnic_sriov_cleanup_transaction(trans
);
1184 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov
*sriov
,
1185 struct qlcnic_bc_hdr
*hdr
,
1186 struct qlcnic_vf_info
*vf
)
1188 struct qlcnic_bc_trans
*trans
;
1189 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1190 struct qlcnic_cmd_args cmd
;
1195 if (adapter
->need_fw_reset
)
1198 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
) &&
1199 hdr
->op_type
!= QLC_BC_CMD
&&
1200 hdr
->cmd_op
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
1203 if (hdr
->frag_num
> 1) {
1204 qlcnic_sriov_handle_pending_trans(sriov
, vf
, hdr
);
1208 memset(&cmd
, 0, sizeof(struct qlcnic_cmd_args
));
1209 cmd_op
= hdr
->cmd_op
;
1210 if (qlcnic_sriov_alloc_bc_trans(&trans
))
1213 if (hdr
->op_type
== QLC_BC_CMD
)
1214 err
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
);
1216 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, cmd_op
);
1219 qlcnic_sriov_cleanup_transaction(trans
);
1223 cmd
.op_type
= hdr
->op_type
;
1224 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, hdr
->seq_id
,
1226 qlcnic_free_mbx_args(&cmd
);
1227 qlcnic_sriov_cleanup_transaction(trans
);
1231 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
1232 trans
->curr_req_frag
);
1233 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1234 (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
),
1235 (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
),
1237 trans
->func_id
= vf
->pci_func
;
1239 trans
->trans_id
= hdr
->seq_id
;
1240 trans
->curr_req_frag
++;
1242 if (qlcnic_sriov_soft_flr_check(adapter
, trans
, vf
))
1245 if (trans
->curr_req_frag
== trans
->req_hdr
->num_frags
) {
1246 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
)) {
1247 qlcnic_free_mbx_args(&cmd
);
1248 qlcnic_sriov_cleanup_transaction(trans
);
1251 spin_lock(&vf
->rcv_pend
.lock
);
1252 list_add_tail(&trans
->list
, &vf
->rcv_pend
.wait_list
);
1253 vf
->rcv_pend
.count
++;
1254 spin_unlock(&vf
->rcv_pend
.lock
);
1258 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov
*sriov
,
1259 struct qlcnic_vf_info
*vf
)
1261 struct qlcnic_bc_hdr hdr
;
1262 u32
*ptr
= (u32
*)&hdr
;
1265 for (i
= 2; i
< 6; i
++)
1266 ptr
[i
- 2] = readl(QLCNIC_MBX_FW(vf
->adapter
->ahw
, i
));
1267 msg_type
= hdr
.msg_type
;
1270 case QLC_BC_COMMAND
:
1271 qlcnic_sriov_handle_bc_cmd(sriov
, &hdr
, vf
);
1273 case QLC_BC_RESPONSE
:
1274 qlcnic_sriov_handle_bc_resp(&hdr
, vf
);
1279 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov
*sriov
,
1280 struct qlcnic_vf_info
*vf
)
1282 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1284 if (qlcnic_sriov_pf_check(adapter
))
1285 qlcnic_sriov_pf_handle_flr(sriov
, vf
);
1287 dev_err(&adapter
->pdev
->dev
,
1288 "Invalid event to VF. VF should not get FLR event\n");
1291 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter
*adapter
, u32 event
)
1293 struct qlcnic_vf_info
*vf
;
1294 struct qlcnic_sriov
*sriov
;
1298 sriov
= adapter
->ahw
->sriov
;
1299 pci_func
= qlcnic_sriov_target_func_id(event
);
1300 index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1305 vf
= &sriov
->vf_info
[index
];
1306 vf
->pci_func
= pci_func
;
1308 if (qlcnic_sriov_channel_free_check(event
))
1309 complete(&vf
->ch_free_cmpl
);
1311 if (qlcnic_sriov_flr_check(event
)) {
1312 qlcnic_sriov_handle_flr_event(sriov
, vf
);
1316 if (qlcnic_sriov_bc_msg_check(event
))
1317 qlcnic_sriov_handle_msg_event(sriov
, vf
);
1320 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter
*adapter
, u8 enable
)
1322 struct qlcnic_cmd_args cmd
;
1325 if (!test_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
))
1328 if (qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_BC_EVENT_SETUP
))
1332 cmd
.req
.arg
[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1334 err
= qlcnic_83xx_issue_cmd(adapter
, &cmd
);
1336 if (err
!= QLCNIC_RCODE_SUCCESS
) {
1337 dev_err(&adapter
->pdev
->dev
,
1338 "Failed to %s bc events, err=%d\n",
1339 (enable
? "enable" : "disable"), err
);
1342 qlcnic_free_mbx_args(&cmd
);
1346 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter
*adapter
,
1347 struct qlcnic_bc_trans
*trans
)
1349 u8 max
= QLC_BC_CMD_MAX_RETRY_CNT
;
1352 state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
1353 if (state
== QLC_83XX_IDC_DEV_READY
) {
1355 clear_bit(QLC_BC_VF_CHANNEL
, &trans
->vf
->state
);
1356 trans
->trans_state
= QLC_INIT
;
1357 if (++adapter
->fw_fail_cnt
> max
)
1366 static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter
*adapter
,
1367 struct qlcnic_cmd_args
*cmd
)
1369 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1370 struct qlcnic_mailbox
*mbx
= ahw
->mailbox
;
1371 struct device
*dev
= &adapter
->pdev
->dev
;
1372 struct qlcnic_bc_trans
*trans
;
1374 u32 rsp_data
, opcode
, mbx_err_code
, rsp
;
1375 u16 seq
= ++adapter
->ahw
->sriov
->bc
.trans_counter
;
1376 u8 func
= ahw
->pci_func
;
1378 rsp
= qlcnic_sriov_alloc_bc_trans(&trans
);
1382 rsp
= qlcnic_sriov_prepare_bc_hdr(trans
, cmd
, seq
, QLC_BC_COMMAND
);
1384 goto cleanup_transaction
;
1387 if (!test_bit(QLC_83XX_MBX_READY
, &mbx
->status
)) {
1389 QLCDB(adapter
, DRV
, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1390 QLCNIC_MBX_RSP(cmd
->req
.arg
[0]), func
);
1394 err
= qlcnic_sriov_send_bc_cmd(adapter
, trans
, func
);
1396 dev_err(dev
, "MBX command 0x%x timed out for VF %d\n",
1397 (cmd
->req
.arg
[0] & 0xffff), func
);
1398 rsp
= QLCNIC_RCODE_TIMEOUT
;
1400 /* After adapter reset PF driver may take some time to
1401 * respond to VF's request. Retry request till maximum retries.
1403 if ((trans
->req_hdr
->cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
) &&
1404 !qlcnic_sriov_retry_bc_cmd(adapter
, trans
))
1410 rsp_data
= cmd
->rsp
.arg
[0];
1411 mbx_err_code
= QLCNIC_MBX_STATUS(rsp_data
);
1412 opcode
= QLCNIC_MBX_RSP(cmd
->req
.arg
[0]);
1414 if ((mbx_err_code
== QLCNIC_MBX_RSP_OK
) ||
1415 (mbx_err_code
== QLCNIC_MBX_PORT_RSP_OK
)) {
1416 rsp
= QLCNIC_RCODE_SUCCESS
;
1418 if (cmd
->type
== QLC_83XX_MBX_CMD_NO_WAIT
) {
1419 rsp
= QLCNIC_RCODE_SUCCESS
;
1426 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1427 opcode
, mbx_err_code
, func
);
1432 if (rsp
== QLCNIC_RCODE_TIMEOUT
) {
1433 ahw
->reset_context
= 1;
1434 adapter
->need_fw_reset
= 1;
1435 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1438 cleanup_transaction
:
1439 qlcnic_sriov_cleanup_transaction(trans
);
1442 if (cmd
->type
== QLC_83XX_MBX_CMD_NO_WAIT
) {
1443 qlcnic_free_mbx_args(cmd
);
1451 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter
*adapter
,
1452 struct qlcnic_cmd_args
*cmd
)
1454 if (cmd
->type
== QLC_83XX_MBX_CMD_NO_WAIT
)
1455 return qlcnic_sriov_async_issue_cmd(adapter
, cmd
);
1457 return __qlcnic_sriov_issue_cmd(adapter
, cmd
);
1460 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter
*adapter
, u8 cmd_op
)
1462 struct qlcnic_cmd_args cmd
;
1463 struct qlcnic_vf_info
*vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1466 memset(&cmd
, 0, sizeof(cmd
));
1467 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
))
1470 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
1472 dev_err(&adapter
->pdev
->dev
,
1473 "Failed bc channel %s %d\n", cmd_op
? "term" : "init",
1478 cmd_op
= (cmd
.rsp
.arg
[0] & 0xff);
1479 if (cmd
.rsp
.arg
[0] >> 25 == 2)
1481 if (cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
)
1482 set_bit(QLC_BC_VF_STATE
, &vf
->state
);
1484 clear_bit(QLC_BC_VF_STATE
, &vf
->state
);
1487 qlcnic_free_mbx_args(&cmd
);
1491 static void qlcnic_vf_add_mc_list(struct net_device
*netdev
, const u8
*mac
,
1492 enum qlcnic_mac_type mac_type
)
1494 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1495 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
1496 struct qlcnic_vf_info
*vf
;
1500 vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1502 if (!qlcnic_sriov_check_any_vlan(vf
)) {
1503 qlcnic_nic_add_mac(adapter
, mac
, 0, mac_type
);
1505 spin_lock(&vf
->vlan_list_lock
);
1506 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1507 vlan_id
= vf
->sriov_vlans
[i
];
1509 qlcnic_nic_add_mac(adapter
, mac
, vlan_id
,
1512 spin_unlock(&vf
->vlan_list_lock
);
1513 if (qlcnic_84xx_check(adapter
))
1514 qlcnic_nic_add_mac(adapter
, mac
, 0, mac_type
);
1518 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel
*bc
)
1520 struct list_head
*head
= &bc
->async_list
;
1521 struct qlcnic_async_work_list
*entry
;
1523 flush_workqueue(bc
->bc_async_wq
);
1524 while (!list_empty(head
)) {
1525 entry
= list_entry(head
->next
, struct qlcnic_async_work_list
,
1527 cancel_work_sync(&entry
->work
);
1528 list_del(&entry
->list
);
1533 void qlcnic_sriov_vf_set_multi(struct net_device
*netdev
)
1535 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1536 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1537 static const u8 bcast_addr
[ETH_ALEN
] = {
1538 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1540 struct netdev_hw_addr
*ha
;
1541 u32 mode
= VPORT_MISS_MODE_DROP
;
1543 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
1546 if (netdev
->flags
& IFF_PROMISC
) {
1547 if (!(adapter
->flags
& QLCNIC_PROMISC_DISABLED
))
1548 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
1549 } else if ((netdev
->flags
& IFF_ALLMULTI
) ||
1550 (netdev_mc_count(netdev
) > ahw
->max_mc_count
)) {
1551 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
1553 qlcnic_vf_add_mc_list(netdev
, bcast_addr
, QLCNIC_BROADCAST_MAC
);
1554 if (!netdev_mc_empty(netdev
)) {
1555 qlcnic_flush_mcast_mac(adapter
);
1556 netdev_for_each_mc_addr(ha
, netdev
)
1557 qlcnic_vf_add_mc_list(netdev
, ha
->addr
,
1558 QLCNIC_MULTICAST_MAC
);
1562 /* configure unicast MAC address, if there is not sufficient space
1563 * to store all the unicast addresses then enable promiscuous mode
1565 if (netdev_uc_count(netdev
) > ahw
->max_uc_count
) {
1566 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
1567 } else if (!netdev_uc_empty(netdev
)) {
1568 netdev_for_each_uc_addr(ha
, netdev
)
1569 qlcnic_vf_add_mc_list(netdev
, ha
->addr
,
1570 QLCNIC_UNICAST_MAC
);
1573 if (adapter
->pdev
->is_virtfn
) {
1574 if (mode
== VPORT_MISS_MODE_ACCEPT_ALL
&&
1575 !adapter
->fdb_mac_learn
) {
1576 qlcnic_alloc_lb_filters_mem(adapter
);
1577 adapter
->drv_mac_learn
= 1;
1578 adapter
->rx_mac_learn
= true;
1580 adapter
->drv_mac_learn
= 0;
1581 adapter
->rx_mac_learn
= false;
1585 qlcnic_nic_set_promisc(adapter
, mode
);
1588 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct
*work
)
1590 struct qlcnic_async_work_list
*entry
;
1591 struct qlcnic_adapter
*adapter
;
1592 struct qlcnic_cmd_args
*cmd
;
1594 entry
= container_of(work
, struct qlcnic_async_work_list
, work
);
1595 adapter
= entry
->ptr
;
1597 __qlcnic_sriov_issue_cmd(adapter
, cmd
);
1601 static struct qlcnic_async_work_list
*
1602 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel
*bc
)
1604 struct list_head
*node
;
1605 struct qlcnic_async_work_list
*entry
= NULL
;
1608 list_for_each(node
, &bc
->async_list
) {
1609 entry
= list_entry(node
, struct qlcnic_async_work_list
, list
);
1610 if (!work_pending(&entry
->work
)) {
1617 entry
= kzalloc(sizeof(struct qlcnic_async_work_list
),
1621 list_add_tail(&entry
->list
, &bc
->async_list
);
1627 static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel
*bc
,
1628 work_func_t func
, void *data
,
1629 struct qlcnic_cmd_args
*cmd
)
1631 struct qlcnic_async_work_list
*entry
= NULL
;
1633 entry
= qlcnic_sriov_get_free_node_async_work(bc
);
1639 INIT_WORK(&entry
->work
, func
);
1640 queue_work(bc
->bc_async_wq
, &entry
->work
);
1643 static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter
*adapter
,
1644 struct qlcnic_cmd_args
*cmd
)
1647 struct qlcnic_back_channel
*bc
= &adapter
->ahw
->sriov
->bc
;
1649 if (adapter
->need_fw_reset
)
1652 qlcnic_sriov_schedule_async_cmd(bc
, qlcnic_sriov_handle_async_issue_cmd
,
1657 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter
*adapter
)
1661 adapter
->need_fw_reset
= 0;
1662 qlcnic_83xx_reinit_mbx_work(adapter
->ahw
->mailbox
);
1663 qlcnic_83xx_enable_mbx_interrupt(adapter
);
1665 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
1669 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
1671 goto err_out_cleanup_bc_intr
;
1673 err
= qlcnic_sriov_vf_init_driver(adapter
);
1675 goto err_out_term_channel
;
1679 err_out_term_channel
:
1680 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
1682 err_out_cleanup_bc_intr
:
1683 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
1687 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter
*adapter
)
1689 struct net_device
*netdev
= adapter
->netdev
;
1691 if (netif_running(netdev
)) {
1692 if (!qlcnic_up(adapter
, netdev
))
1693 qlcnic_restore_indev_addr(netdev
, NETDEV_UP
);
1696 netif_device_attach(netdev
);
1699 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter
*adapter
)
1701 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1702 struct qlcnic_intrpt_config
*intr_tbl
= ahw
->intr_tbl
;
1703 struct net_device
*netdev
= adapter
->netdev
;
1704 u8 i
, max_ints
= ahw
->num_msix
- 1;
1706 netif_device_detach(netdev
);
1707 qlcnic_83xx_detach_mailbox_work(adapter
);
1708 qlcnic_83xx_disable_mbx_intr(adapter
);
1710 if (netif_running(netdev
))
1711 qlcnic_down(adapter
, netdev
);
1713 for (i
= 0; i
< max_ints
; i
++) {
1715 intr_tbl
[i
].enabled
= 0;
1716 intr_tbl
[i
].src
= 0;
1718 ahw
->reset_context
= 0;
1721 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter
*adapter
)
1723 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1724 struct device
*dev
= &adapter
->pdev
->dev
;
1725 struct qlc_83xx_idc
*idc
= &ahw
->idc
;
1726 u8 func
= ahw
->pci_func
;
1729 if ((idc
->prev_state
== QLC_83XX_IDC_DEV_NEED_RESET
) ||
1730 (idc
->prev_state
== QLC_83XX_IDC_DEV_INIT
)) {
1731 if (!qlcnic_sriov_vf_reinit_driver(adapter
)) {
1732 qlcnic_sriov_vf_attach(adapter
);
1733 adapter
->fw_fail_cnt
= 0;
1735 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1739 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1741 state
= QLCRDX(ahw
, QLC_83XX_IDC_DEV_STATE
);
1742 dev_info(dev
, "Current state 0x%x after FW reset\n",
1750 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter
*adapter
)
1752 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1753 struct qlcnic_mailbox
*mbx
= ahw
->mailbox
;
1754 struct device
*dev
= &adapter
->pdev
->dev
;
1755 struct qlc_83xx_idc
*idc
= &ahw
->idc
;
1756 u8 func
= ahw
->pci_func
;
1759 adapter
->reset_ctx_cnt
++;
1761 /* Skip the context reset and check if FW is hung */
1762 if (adapter
->reset_ctx_cnt
< 3) {
1763 adapter
->need_fw_reset
= 1;
1764 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1766 "Resetting context, wait here to check if FW is in failed state\n");
1770 /* Check if number of resets exceed the threshold.
1771 * If it exceeds the threshold just fail the VF.
1773 if (adapter
->reset_ctx_cnt
> QLC_83XX_VF_RESET_FAIL_THRESH
) {
1774 clear_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1775 adapter
->tx_timeo_cnt
= 0;
1776 adapter
->fw_fail_cnt
= 0;
1777 adapter
->reset_ctx_cnt
= 0;
1778 qlcnic_sriov_vf_detach(adapter
);
1780 "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1784 dev_info(dev
, "Resetting context of VF 0x%x\n", func
);
1785 dev_info(dev
, "%s: Context reset count %d for VF 0x%x\n",
1786 __func__
, adapter
->reset_ctx_cnt
, func
);
1787 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1788 adapter
->need_fw_reset
= 1;
1789 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1790 qlcnic_sriov_vf_detach(adapter
);
1791 adapter
->need_fw_reset
= 0;
1793 if (!qlcnic_sriov_vf_reinit_driver(adapter
)) {
1794 qlcnic_sriov_vf_attach(adapter
);
1795 adapter
->tx_timeo_cnt
= 0;
1796 adapter
->reset_ctx_cnt
= 0;
1797 adapter
->fw_fail_cnt
= 0;
1798 dev_info(dev
, "Done resetting context for VF 0x%x\n", func
);
1800 dev_err(dev
, "%s: Reinitialization of VF 0x%x failed\n",
1802 state
= QLCRDX(ahw
, QLC_83XX_IDC_DEV_STATE
);
1803 dev_info(dev
, "%s: Current state 0x%x\n", __func__
, state
);
1809 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter
*adapter
)
1811 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1814 if (ahw
->idc
.prev_state
!= QLC_83XX_IDC_DEV_READY
)
1815 ret
= qlcnic_sriov_vf_handle_dev_ready(adapter
);
1816 else if (ahw
->reset_context
)
1817 ret
= qlcnic_sriov_vf_handle_context_reset(adapter
);
1819 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1823 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter
*adapter
)
1825 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1827 dev_err(&adapter
->pdev
->dev
, "Device is in failed state\n");
1828 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
)
1829 qlcnic_sriov_vf_detach(adapter
);
1831 clear_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1832 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1837 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter
*adapter
)
1839 struct qlcnic_mailbox
*mbx
= adapter
->ahw
->mailbox
;
1840 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1842 dev_info(&adapter
->pdev
->dev
, "Device is in quiescent state\n");
1843 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
) {
1844 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1845 adapter
->tx_timeo_cnt
= 0;
1846 adapter
->reset_ctx_cnt
= 0;
1847 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1848 qlcnic_sriov_vf_detach(adapter
);
1854 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter
*adapter
)
1856 struct qlcnic_mailbox
*mbx
= adapter
->ahw
->mailbox
;
1857 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1858 u8 func
= adapter
->ahw
->pci_func
;
1860 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
) {
1861 dev_err(&adapter
->pdev
->dev
,
1862 "Firmware hang detected by VF 0x%x\n", func
);
1863 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1864 adapter
->tx_timeo_cnt
= 0;
1865 adapter
->reset_ctx_cnt
= 0;
1866 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1867 qlcnic_sriov_vf_detach(adapter
);
1872 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter
*adapter
)
1874 dev_err(&adapter
->pdev
->dev
, "%s: Device in unknown state\n", __func__
);
1878 static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter
*adapter
)
1880 if (adapter
->fhash
.fnum
)
1881 qlcnic_prune_lb_filters(adapter
);
1884 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct
*work
)
1886 struct qlcnic_adapter
*adapter
;
1887 struct qlc_83xx_idc
*idc
;
1890 adapter
= container_of(work
, struct qlcnic_adapter
, fw_work
.work
);
1891 idc
= &adapter
->ahw
->idc
;
1892 idc
->curr_state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
1894 switch (idc
->curr_state
) {
1895 case QLC_83XX_IDC_DEV_READY
:
1896 ret
= qlcnic_sriov_vf_idc_ready_state(adapter
);
1898 case QLC_83XX_IDC_DEV_NEED_RESET
:
1899 case QLC_83XX_IDC_DEV_INIT
:
1900 ret
= qlcnic_sriov_vf_idc_init_reset_state(adapter
);
1902 case QLC_83XX_IDC_DEV_NEED_QUISCENT
:
1903 ret
= qlcnic_sriov_vf_idc_need_quiescent_state(adapter
);
1905 case QLC_83XX_IDC_DEV_FAILED
:
1906 ret
= qlcnic_sriov_vf_idc_failed_state(adapter
);
1908 case QLC_83XX_IDC_DEV_QUISCENT
:
1911 ret
= qlcnic_sriov_vf_idc_unknown_state(adapter
);
1914 idc
->prev_state
= idc
->curr_state
;
1915 qlcnic_sriov_vf_periodic_tasks(adapter
);
1917 if (!ret
&& test_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
))
1918 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
1922 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter
*adapter
)
1924 while (test_and_set_bit(__QLCNIC_RESETTING
, &adapter
->state
))
1927 clear_bit(QLC_83XX_MODULE_LOADED
, &adapter
->ahw
->idc
.status
);
1928 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1929 cancel_delayed_work_sync(&adapter
->fw_work
);
1932 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov
*sriov
,
1933 struct qlcnic_vf_info
*vf
, u16 vlan_id
)
1935 int i
, err
= -EINVAL
;
1937 if (!vf
->sriov_vlans
)
1940 spin_lock_bh(&vf
->vlan_list_lock
);
1942 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1943 if (vf
->sriov_vlans
[i
] == vlan_id
) {
1949 spin_unlock_bh(&vf
->vlan_list_lock
);
1953 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov
*sriov
,
1954 struct qlcnic_vf_info
*vf
)
1958 spin_lock_bh(&vf
->vlan_list_lock
);
1960 if (vf
->num_vlan
>= sriov
->num_allowed_vlans
)
1963 spin_unlock_bh(&vf
->vlan_list_lock
);
1967 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter
*adapter
,
1970 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
1971 struct qlcnic_vf_info
*vf
;
1976 vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1977 vlan_exist
= qlcnic_sriov_check_any_vlan(vf
);
1978 if (sriov
->vlan_mode
!= QLC_GUEST_VLAN_MODE
)
1982 if (qlcnic_83xx_vf_check(adapter
) && vlan_exist
)
1985 if (qlcnic_sriov_validate_num_vlans(sriov
, vf
))
1988 if (sriov
->any_vlan
) {
1989 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1990 if (sriov
->allowed_vlans
[i
] == vid
)
1998 if (!vlan_exist
|| qlcnic_sriov_check_vlan_id(sriov
, vf
, vid
))
2005 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info
*vf
, u16 vlan_id
,
2006 enum qlcnic_vlan_operations opcode
)
2008 struct qlcnic_adapter
*adapter
= vf
->adapter
;
2009 struct qlcnic_sriov
*sriov
;
2011 sriov
= adapter
->ahw
->sriov
;
2013 if (!vf
->sriov_vlans
)
2016 spin_lock_bh(&vf
->vlan_list_lock
);
2020 qlcnic_sriov_add_vlan_id(sriov
, vf
, vlan_id
);
2022 case QLC_VLAN_DELETE
:
2023 qlcnic_sriov_del_vlan_id(sriov
, vf
, vlan_id
);
2026 netdev_err(adapter
->netdev
, "Invalid VLAN operation\n");
2029 spin_unlock_bh(&vf
->vlan_list_lock
);
2033 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter
*adapter
,
2036 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
2037 struct net_device
*netdev
= adapter
->netdev
;
2038 struct qlcnic_vf_info
*vf
;
2039 struct qlcnic_cmd_args cmd
;
2042 memset(&cmd
, 0, sizeof(cmd
));
2046 vf
= &adapter
->ahw
->sriov
->vf_info
[0];
2047 ret
= qlcnic_sriov_validate_vlan_cfg(adapter
, vid
, enable
);
2051 ret
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
,
2052 QLCNIC_BC_CMD_CFG_GUEST_VLAN
);
2056 cmd
.req
.arg
[1] = (enable
& 1) | vid
<< 16;
2058 qlcnic_sriov_cleanup_async_list(&sriov
->bc
);
2059 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
2061 dev_err(&adapter
->pdev
->dev
,
2062 "Failed to configure guest VLAN, err=%d\n", ret
);
2064 netif_addr_lock_bh(netdev
);
2065 qlcnic_free_mac_list(adapter
);
2066 netif_addr_unlock_bh(netdev
);
2069 qlcnic_sriov_vlan_operation(vf
, vid
, QLC_VLAN_ADD
);
2071 qlcnic_sriov_vlan_operation(vf
, vid
, QLC_VLAN_DELETE
);
2073 netif_addr_lock_bh(netdev
);
2074 qlcnic_set_multi(netdev
);
2075 netif_addr_unlock_bh(netdev
);
2078 qlcnic_free_mbx_args(&cmd
);
2082 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter
*adapter
)
2084 struct list_head
*head
= &adapter
->mac_list
;
2085 struct qlcnic_mac_vlan_list
*cur
;
2087 while (!list_empty(head
)) {
2088 cur
= list_entry(head
->next
, struct qlcnic_mac_vlan_list
, list
);
2089 qlcnic_sre_macaddr_change(adapter
, cur
->mac_addr
, cur
->vlan_id
,
2091 list_del(&cur
->list
);
2097 static int qlcnic_sriov_vf_shutdown(struct pci_dev
*pdev
)
2099 struct qlcnic_adapter
*adapter
= pci_get_drvdata(pdev
);
2100 struct net_device
*netdev
= adapter
->netdev
;
2103 netif_device_detach(netdev
);
2104 qlcnic_cancel_idc_work(adapter
);
2106 if (netif_running(netdev
))
2107 qlcnic_down(adapter
, netdev
);
2109 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
2110 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
2111 qlcnic_83xx_disable_mbx_intr(adapter
);
2112 cancel_delayed_work_sync(&adapter
->idc_aen_work
);
2114 retval
= pci_save_state(pdev
);
2121 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter
*adapter
)
2123 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
2124 struct net_device
*netdev
= adapter
->netdev
;
2127 set_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
2128 qlcnic_83xx_enable_mbx_interrupt(adapter
);
2129 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
2133 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
2135 if (netif_running(netdev
)) {
2136 err
= qlcnic_up(adapter
, netdev
);
2138 qlcnic_restore_indev_addr(netdev
, NETDEV_UP
);
2142 netif_device_attach(netdev
);
2143 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
2148 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter
*adapter
)
2150 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
2151 struct qlcnic_vf_info
*vf
;
2154 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
2155 vf
= &sriov
->vf_info
[i
];
2156 vf
->sriov_vlans
= kcalloc(sriov
->num_allowed_vlans
,
2157 sizeof(*vf
->sriov_vlans
), GFP_KERNEL
);
2161 void qlcnic_sriov_free_vlans(struct qlcnic_adapter
*adapter
)
2163 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
2164 struct qlcnic_vf_info
*vf
;
2167 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
2168 vf
= &sriov
->vf_info
[i
];
2169 kfree(vf
->sriov_vlans
);
2170 vf
->sriov_vlans
= NULL
;
2174 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov
*sriov
,
2175 struct qlcnic_vf_info
*vf
, u16 vlan_id
)
2179 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
2180 if (!vf
->sriov_vlans
[i
]) {
2181 vf
->sriov_vlans
[i
] = vlan_id
;
2188 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov
*sriov
,
2189 struct qlcnic_vf_info
*vf
, u16 vlan_id
)
2193 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
2194 if (vf
->sriov_vlans
[i
] == vlan_id
) {
2195 vf
->sriov_vlans
[i
] = 0;
2202 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info
*vf
)
2206 spin_lock_bh(&vf
->vlan_list_lock
);
2211 spin_unlock_bh(&vf
->vlan_list_lock
);