Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sriov.c
CommitLineData
4ad79e13 1/* bnx2x_sriov.c: QLogic Everest network driver.
290ca2bb 2 *
247fa82b 3 * Copyright 2009-2013 Broadcom Corporation
4ad79e13
YM
4 * Copyright 2014 QLogic Corporation
5 * All rights reserved
290ca2bb 6 *
4ad79e13 7 * Unless you and QLogic execute a separate written software license
290ca2bb
AE
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2, available
10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
11 *
12 * Notwithstanding the above, under no circumstances may you combine this
4ad79e13
YM
13 * software in any way with any other QLogic software provided under a
14 * license other than the GPL, without QLogic's express prior written
290ca2bb
AE
15 * consent.
16 *
08f6dd89
AE
17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18 * Written by: Shmulik Ravid
19 * Ariel Elior <ariel.elior@qlogic.com>
290ca2bb
AE
20 *
21 */
22#include "bnx2x.h"
23#include "bnx2x_init.h"
b56e9670 24#include "bnx2x_cmn.h"
3ec9f9ca 25#include "bnx2x_sp.h"
6411280a 26#include <linux/crc32.h>
3ec9f9ca 27#include <linux/if_vlan.h>
b56e9670 28
6495d15a
DK
29static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
30 struct bnx2x_virtf **vf,
31 struct pf_vf_bulletin_content **bulletin,
32 bool test_queue);
33
b56e9670
AE
34/* General service functions */
35static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
36 u16 pf_id)
37{
38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
39 pf_id);
40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
41 pf_id);
42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
43 pf_id);
44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
45 pf_id);
46}
47
48static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
49 u8 enable)
50{
51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
52 enable);
53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
54 enable);
55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
56 enable);
57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
58 enable);
59}
60
290ca2bb
AE
61int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
62{
63 int idx;
64
65 for_each_vf(bp, idx)
66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
67 break;
68 return idx;
69}
70
71static
72struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
73{
74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
76}
77
b93288d5
AE
78static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
79 u8 igu_sb_id, u8 segment, u16 index, u8 op,
80 u8 update)
81{
82 /* acking a VF sb through the PF - use the GRC */
83 u32 ctl;
84 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
85 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
86 u32 func_encode = vf->abs_vfid;
87 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
88 struct igu_regular cmd_data = {0};
89
90 cmd_data.sb_id_and_flags =
91 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
92 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
93 (update << IGU_REGULAR_BUPDATE_SHIFT) |
94 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
95
96 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
97 func_encode << IGU_CTRL_REG_FID_SHIFT |
98 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
99
100 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 cmd_data.sb_id_and_flags, igu_addr_data);
102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
103 mmiowb();
104 barrier();
105
106 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
107 ctl, igu_addr_ctl);
108 REG_WR(bp, igu_addr_ctl, ctl);
109 mmiowb();
110 barrier();
111}
3a3534ec
YM
112
113static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
114 struct bnx2x_virtf *vf,
115 bool print_err)
116{
117 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
118 if (print_err)
119 BNX2X_ERR("Slowpath objects not yet initialized!\n");
120 else
121 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
122 return false;
123 }
124 return true;
125}
126
8db573ba 127/* VFOP operations states */
8db573ba
AE
128void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
129 struct bnx2x_queue_init_params *init_params,
130 struct bnx2x_queue_setup_params *setup_params,
131 u16 q_idx, u16 sb_idx)
290ca2bb 132{
8db573ba
AE
133 DP(BNX2X_MSG_IOV,
134 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
135 vf->abs_vfid,
136 q_idx,
137 sb_idx,
138 init_params->tx.sb_cq_index,
139 init_params->tx.hc_rate,
140 setup_params->flags,
141 setup_params->txq_params.traffic_type);
290ca2bb
AE
142}
143
8db573ba
AE
144void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
145 struct bnx2x_queue_init_params *init_params,
146 struct bnx2x_queue_setup_params *setup_params,
147 u16 q_idx, u16 sb_idx)
290ca2bb 148{
8db573ba
AE
149 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
150
151 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
152 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
153 vf->abs_vfid,
154 q_idx,
155 sb_idx,
156 init_params->rx.sb_cq_index,
157 init_params->rx.hc_rate,
158 setup_params->gen_params.mtu,
159 rxq_params->buf_sz,
160 rxq_params->sge_buf_sz,
161 rxq_params->max_sges_pkt,
162 rxq_params->tpa_agg_sz,
163 setup_params->flags,
164 rxq_params->drop_flags,
165 rxq_params->cache_line_log);
290ca2bb
AE
166}
167
8db573ba
AE
168void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
169 struct bnx2x_virtf *vf,
170 struct bnx2x_vf_queue *q,
2dc33bbc 171 struct bnx2x_vf_queue_construct_params *p,
8db573ba 172 unsigned long q_type)
290ca2bb 173{
8db573ba
AE
174 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
175 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
290ca2bb 176
8db573ba 177 /* INIT */
290ca2bb 178
8db573ba
AE
179 /* Enable host coalescing in the transition to INIT state */
180 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
181 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
182
183 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
184 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
185
186 /* FW SB ID */
187 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
188 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
189
190 /* context */
191 init_p->cxts[0] = q->cxt;
192
193 /* SETUP */
194
195 /* Setup-op general parameters */
196 setup_p->gen_params.spcl_id = vf->sp_cl_id;
197 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
02dc4025 198 setup_p->gen_params.fp_hsi = vf->fp_hsi;
8db573ba 199
8db573ba
AE
200 /* Setup-op flags:
201 * collect statistics, zero statistics, local-switching, security,
202 * OV for Flex10, RSS and MCAST for leading
203 */
204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
206
207 /* for VFs, enable tx switching, bd coherency, and mac address
208 * anti-spoofing
209 */
210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
213
8db573ba
AE
214 /* Setup-op rx parameters */
215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
217
218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
221
222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
224 }
225
226 /* Setup-op tx parameters */
227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
290ca2bb
AE
230 }
231}
232
2dc33bbc
YM
233static int bnx2x_vf_queue_create(struct bnx2x *bp,
234 struct bnx2x_virtf *vf, int qid,
235 struct bnx2x_vf_queue_construct_params *qctor)
290ca2bb 236{
2dc33bbc
YM
237 struct bnx2x_queue_state_params *q_params;
238 int rc = 0;
8db573ba 239
2dc33bbc 240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
8db573ba 241
2dc33bbc
YM
242 /* Prepare ramrod information */
243 q_params = &qctor->qstate;
244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
8db573ba 246
2dc33bbc
YM
247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
248 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
250 goto out;
290ca2bb 251 }
290ca2bb 252
2dc33bbc
YM
253 /* Run Queue 'construction' ramrods */
254 q_params->cmd = BNX2X_Q_CMD_INIT;
255 rc = bnx2x_queue_state_change(bp, q_params);
256 if (rc)
257 goto out;
290ca2bb 258
2dc33bbc
YM
259 memcpy(&q_params->params.setup, &qctor->prep_qsetup,
260 sizeof(struct bnx2x_queue_setup_params));
261 q_params->cmd = BNX2X_Q_CMD_SETUP;
262 rc = bnx2x_queue_state_change(bp, q_params);
263 if (rc)
264 goto out;
290ca2bb 265
2dc33bbc
YM
266 /* enable interrupts */
267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
268 USTORM_ID, 0, IGU_INT_ENABLE, 0);
269out:
270 return rc;
290ca2bb
AE
271}
272
2dc33bbc
YM
273static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
274 int qid)
463a68a7 275{
2dc33bbc
YM
276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
277 BNX2X_Q_CMD_TERMINATE,
278 BNX2X_Q_CMD_CFC_DEL};
279 struct bnx2x_queue_state_params q_params;
280 int rc, i;
463a68a7 281
2dc33bbc 282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
463a68a7 283
2dc33bbc
YM
284 /* Prepare ramrod information */
285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
463a68a7 288
2dc33bbc
YM
289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
290 BNX2X_Q_LOGICAL_STATE_STOPPED) {
291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
292 goto out;
293 }
463a68a7 294
2dc33bbc
YM
295 /* Run Queue 'destruction' ramrods */
296 for (i = 0; i < ARRAY_SIZE(cmds); i++) {
297 q_params.cmd = cmds[i];
298 rc = bnx2x_queue_state_change(bp, &q_params);
299 if (rc) {
300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
301 return rc;
b9871bcf 302 }
463a68a7 303 }
2dc33bbc
YM
304out:
305 /* Clean Context */
306 if (bnx2x_vfq(vf, qid, cxt)) {
307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
463a68a7 309 }
2dc33bbc
YM
310
311 return 0;
463a68a7
AE
312}
313
8db573ba
AE
314static void
315bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
290ca2bb 316{
8db573ba
AE
317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
318 if (vf) {
b9871bcf
AE
319 /* the first igu entry belonging to VFs of this PF */
320 if (!BP_VFDB(bp)->first_vf_igu_entry)
321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
322
323 /* the first igu entry belonging to this VF */
8db573ba
AE
324 if (!vf_sb_count(vf))
325 vf->igu_base_id = igu_sb_id;
b9871bcf 326
8db573ba 327 ++vf_sb_count(vf);
b9871bcf 328 ++vf->sb_count;
8db573ba 329 }
b9871bcf 330 BP_VFDB(bp)->vf_sbs_pool++;
8db573ba 331}
290ca2bb 332
2dc33bbc
YM
333static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
334 struct bnx2x_vlan_mac_obj *obj,
335 atomic_t *counter)
8db573ba 336{
2dc33bbc
YM
337 struct list_head *pos;
338 int read_lock;
339 int cnt = 0;
8b09be5f 340
2dc33bbc
YM
341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
342 if (read_lock)
343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
290ca2bb 344
2dc33bbc
YM
345 list_for_each(pos, &obj->head)
346 cnt++;
290ca2bb 347
2dc33bbc
YM
348 if (!read_lock)
349 bnx2x_vlan_mac_h_read_unlock(bp, obj);
8b09be5f 350
2dc33bbc 351 atomic_set(counter, cnt);
8db573ba 352}
290ca2bb 353
2dc33bbc 354static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
05cc5a39 355 int qid, bool drv_only, int type)
8db573ba 356{
2dc33bbc
YM
357 struct bnx2x_vlan_mac_ramrod_params ramrod;
358 int rc;
290ca2bb 359
2dc33bbc 360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
05cc5a39
YM
361 (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
362 (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
290ca2bb 363
2dc33bbc
YM
364 /* Prepare ramrod params */
365 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
05cc5a39
YM
366 if (type == BNX2X_VF_FILTER_VLAN_MAC) {
367 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
368 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
369 } else if (type == BNX2X_VF_FILTER_MAC) {
2dc33bbc
YM
370 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
372 } else {
2dc33bbc 373 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
290ca2bb 374 }
2dc33bbc 375 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8db573ba 376
2dc33bbc
YM
377 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
378 if (drv_only)
379 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
380 else
381 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
8db573ba 382
2dc33bbc
YM
383 /* Start deleting */
384 rc = ramrod.vlan_mac_obj->delete_all(bp,
385 ramrod.vlan_mac_obj,
386 &ramrod.user_req.vlan_mac_flags,
387 &ramrod.ramrod_flags);
388 if (rc) {
389 BNX2X_ERR("Failed to delete all %s\n",
05cc5a39
YM
390 (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
391 (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
2dc33bbc 392 return rc;
290ca2bb 393 }
290ca2bb 394
2dc33bbc 395 return 0;
954ea748
AE
396}
397
2dc33bbc
YM
398static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
399 struct bnx2x_virtf *vf, int qid,
400 struct bnx2x_vf_mac_vlan_filter *filter,
401 bool drv_only)
463a68a7 402{
2dc33bbc
YM
403 struct bnx2x_vlan_mac_ramrod_params ramrod;
404 int rc;
463a68a7 405
2dc33bbc
YM
406 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
407 vf->abs_vfid, filter->add ? "Adding" : "Deleting",
05cc5a39
YM
408 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
409 (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
2dc33bbc
YM
410
411 /* Prepare ramrod params */
412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
05cc5a39
YM
413 if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
414 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
415 ramrod.user_req.u.vlan.vlan = filter->vid;
416 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
417 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
418 } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
2dc33bbc
YM
419 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
420 ramrod.user_req.u.vlan.vlan = filter->vid;
421 } else {
422 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
423 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
424 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
425 }
426 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
427 BNX2X_VLAN_MAC_DEL;
428
2dc33bbc
YM
429 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
430 if (drv_only)
431 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
432 else
433 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
434
435 /* Add/Remove the filter */
436 rc = bnx2x_config_vlan_mac(bp, &ramrod);
437 if (rc && rc != -EEXIST) {
438 BNX2X_ERR("Failed to %s %s\n",
439 filter->add ? "add" : "delete",
05cc5a39
YM
440 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
441 "VLAN-MAC" :
442 (filter->type == BNX2X_VF_FILTER_MAC) ?
443 "MAC" : "VLAN");
2dc33bbc 444 return rc;
954ea748 445 }
3a3534ec 446
2dc33bbc 447 return 0;
8db573ba 448}
290ca2bb 449
2dc33bbc
YM
450int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
451 struct bnx2x_vf_mac_vlan_filters *filters,
452 int qid, bool drv_only)
463a68a7 453{
2dc33bbc 454 int rc = 0, i;
463a68a7 455
2dc33bbc 456 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
3a3534ec
YM
457
458 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
459 return -EINVAL;
954ea748 460
2dc33bbc
YM
461 /* Prepare ramrod params */
462 for (i = 0; i < filters->count; i++) {
463 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
464 &filters->filters[i], drv_only);
465 if (rc)
466 break;
954ea748 467 }
290ca2bb 468
2dc33bbc
YM
469 /* Rollback if needed */
470 if (i != filters->count) {
471 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
472 i, filters->count + 1);
473 while (--i >= 0) {
474 filters->filters[i].add = !filters->filters[i].add;
475 bnx2x_vf_mac_vlan_config(bp, vf, qid,
476 &filters->filters[i],
477 drv_only);
478 }
290ca2bb 479 }
8db573ba 480
2dc33bbc
YM
481 /* It's our responsibility to free the filters */
482 kfree(filters);
8db573ba 483
2dc33bbc 484 return rc;
290ca2bb 485}
8db573ba 486
2dc33bbc
YM
487int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
488 struct bnx2x_vf_queue_construct_params *qctor)
d16132ce 489{
2dc33bbc 490 int rc;
d16132ce 491
2dc33bbc 492 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
d16132ce 493
2dc33bbc
YM
494 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
495 if (rc)
d16132ce
AE
496 goto op_err;
497
2dc33bbc 498 /* Schedule the configuration of any pending vlan filters */
2dc33bbc
YM
499 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
500 BNX2X_MSG_IOV);
501 return 0;
d16132ce 502op_err:
2dc33bbc
YM
503 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
504 return rc;
d16132ce
AE
505}
506
2dc33bbc 507static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
d16132ce
AE
508 int qid)
509{
2dc33bbc 510 int rc;
954ea748 511
2dc33bbc 512 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
954ea748 513
2dc33bbc
YM
514 /* If needed, clean the filtering data base */
515 if ((qid == LEADING_IDX) &&
516 bnx2x_validate_vf_sp_objs(bp, vf, false)) {
05cc5a39
YM
517 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
518 BNX2X_VF_FILTER_VLAN_MAC);
519 if (rc)
520 goto op_err;
521 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
522 BNX2X_VF_FILTER_VLAN);
2dc33bbc
YM
523 if (rc)
524 goto op_err;
05cc5a39
YM
525 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
526 BNX2X_VF_FILTER_MAC);
2dc33bbc
YM
527 if (rc)
528 goto op_err;
529 }
954ea748 530
2dc33bbc
YM
531 /* Terminate queue */
532 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
533 struct bnx2x_queue_state_params qstate;
858f4deb 534
2dc33bbc
YM
535 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
536 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
537 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
538 qstate.cmd = BNX2X_Q_CMD_TERMINATE;
539 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
540 rc = bnx2x_queue_state_change(bp, &qstate);
541 if (rc)
542 goto op_err;
954ea748 543 }
954ea748 544
2dc33bbc
YM
545 return 0;
546op_err:
547 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
548 return rc;
954ea748
AE
549}
550
2dc33bbc
YM
551int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
552 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
954ea748 553{
2dc33bbc
YM
554 struct bnx2x_mcast_list_elem *mc = NULL;
555 struct bnx2x_mcast_ramrod_params mcast;
556 int rc, i;
954ea748 557
2dc33bbc 558 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
954ea748 559
2dc33bbc
YM
560 /* Prepare Multicast command */
561 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
562 mcast.mcast_obj = &vf->mcast_obj;
563 if (drv_only)
564 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
565 else
566 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
567 if (mc_num) {
568 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
569 GFP_KERNEL);
570 if (!mc) {
d939be3a 571 BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
2dc33bbc
YM
572 return -ENOMEM;
573 }
574 }
954ea748 575
2dc33bbc
YM
576 if (mc_num) {
577 INIT_LIST_HEAD(&mcast.mcast_list);
578 for (i = 0; i < mc_num; i++) {
579 mc[i].mac = mcasts[i];
580 list_add_tail(&mc[i].link,
581 &mcast.mcast_list);
582 }
954ea748 583
2dc33bbc 584 /* add new mcasts */
ab15f86b 585 mcast.mcast_list_len = mc_num;
c7b7b483 586 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
2dc33bbc 587 if (rc)
c7b7b483
YM
588 BNX2X_ERR("Faled to set multicasts\n");
589 } else {
590 /* clear existing mcasts */
591 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
592 if (rc)
593 BNX2X_ERR("Failed to remove multicasts\n");
954ea748 594 }
2dc33bbc 595
c7b7b483
YM
596 kfree(mc);
597
2dc33bbc 598 return rc;
954ea748
AE
599}
600
e8379c79
YM
601static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
602 struct bnx2x_rx_mode_ramrod_params *ramrod,
603 struct bnx2x_virtf *vf,
604 unsigned long accept_flags)
605{
606 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
607
608 memset(ramrod, 0, sizeof(*ramrod));
609 ramrod->cid = vfq->cid;
610 ramrod->cl_id = vfq_cl_id(vf, vfq);
611 ramrod->rx_mode_obj = &bp->rx_mode_obj;
612 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
613 ramrod->rx_accept_flags = accept_flags;
614 ramrod->tx_accept_flags = accept_flags;
615 ramrod->pstate = &vf->filter_state;
616 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
617
618 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
619 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
620 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
621
622 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
623 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
624}
625
2dc33bbc
YM
626int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
627 int qid, unsigned long accept_flags)
954ea748 628{
2dc33bbc 629 struct bnx2x_rx_mode_ramrod_params ramrod;
954ea748 630
2dc33bbc 631 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
954ea748 632
2dc33bbc
YM
633 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
634 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
635 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
636 return bnx2x_config_rx_mode(bp, &ramrod);
954ea748
AE
637}
638
2dc33bbc 639int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
463a68a7 640{
2dc33bbc 641 int rc;
463a68a7 642
2dc33bbc 643 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
858f4deb 644
2dc33bbc
YM
645 /* Remove all classification configuration for leading queue */
646 if (qid == LEADING_IDX) {
647 rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
648 if (rc)
463a68a7 649 goto op_err;
463a68a7 650
2dc33bbc
YM
651 /* Remove filtering if feasible */
652 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
653 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
05cc5a39
YM
654 false,
655 BNX2X_VF_FILTER_VLAN_MAC);
656 if (rc)
657 goto op_err;
658 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
659 false,
660 BNX2X_VF_FILTER_VLAN);
2dc33bbc
YM
661 if (rc)
662 goto op_err;
663 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
05cc5a39
YM
664 false,
665 BNX2X_VF_FILTER_MAC);
2dc33bbc
YM
666 if (rc)
667 goto op_err;
668 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
669 if (rc)
670 goto op_err;
671 }
463a68a7
AE
672 }
673
2dc33bbc
YM
674 /* Destroy queue */
675 rc = bnx2x_vf_queue_destroy(bp, vf, qid);
676 if (rc)
677 goto op_err;
678 return rc;
679op_err:
680 BNX2X_ERR("vf[%d:%d] error: rc %d\n",
681 vf->abs_vfid, qid, rc);
682 return rc;
463a68a7
AE
683}
684
b56e9670
AE
685/* VF enable primitives
686 * when pretend is required the caller is responsible
687 * for calling pretend prior to calling these routines
688 */
689
b56e9670 690/* internal vf enable - until vf is enabled internally all transactions
16a5fd92 691 * are blocked. This routine should always be called last with pretend.
b56e9670
AE
692 */
693static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
694{
695 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
696}
697
698/* clears vf error in all semi blocks */
699static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
700{
701 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
702 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
703 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
704 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
705}
706
707static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
708{
709 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
710 u32 was_err_reg = 0;
711
712 switch (was_err_group) {
713 case 0:
714 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
715 break;
716 case 1:
717 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
718 break;
719 case 2:
720 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
721 break;
722 case 3:
723 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
724 break;
725 }
726 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
727}
728
b93288d5
AE
729static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
730{
731 int i;
732 u32 val;
733
734 /* Set VF masks and configuration - pretend */
735 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
736
737 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
738 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
739 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
740 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
741 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
742 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
743
744 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
745 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
b93288d5 746 val &= ~IGU_VF_CONF_PARENT_MASK;
656493d6 747 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
b93288d5
AE
748 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
749
750 DP(BNX2X_MSG_IOV,
656493d6
YM
751 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
752 vf->abs_vfid, val);
b93288d5
AE
753
754 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
755
756 /* iterate over all queues, clear sb consumer */
757 for (i = 0; i < vf_sb_count(vf); i++) {
758 u8 igu_sb_id = vf_igu_sb(vf, i);
759
760 /* zero prod memory */
761 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
762
763 /* clear sb state machine */
764 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
765 false /* VF */);
766
767 /* disable + update */
768 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
769 IGU_INT_DISABLE, 1);
770 }
771}
772
b56e9670
AE
773void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
774{
775 /* set the VF-PF association in the FW */
776 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
777 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
778
779 /* clear vf errors*/
780 bnx2x_vf_semi_clear_err(bp, abs_vfid);
781 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
782
783 /* internal vf-enable - pretend */
784 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
785 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
786 bnx2x_vf_enable_internal(bp, true);
787 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
788}
789
b93288d5
AE
790static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
791{
792 /* Reset vf in IGU interrupts are still disabled */
793 bnx2x_vf_igu_reset(bp, vf);
794
795 /* pretend to enable the vf with the PBF */
796 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
797 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
798 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
799}
800
b56e9670
AE
801static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
802{
803 struct pci_dev *dev;
804 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
805
806 if (!vf)
78c3bcc5 807 return false;
b56e9670
AE
808
809 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
810 if (dev)
811 return bnx2x_is_pcie_pending(dev);
b56e9670
AE
812 return false;
813}
814
815int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
816{
b56e9670
AE
817 /* Verify no pending pci transactions */
818 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
819 BNX2X_ERR("PCIE Transactions still pending\n");
820
821 return 0;
822}
823
824/* must be called after the number of PF queues and the number of VFs are
825 * both known
826 */
827static void
b9871bcf 828bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
b56e9670 829{
b9871bcf 830 struct vf_pf_resc_request *resc = &vf->alloc_resc;
b56e9670
AE
831
832 /* will be set only during VF-ACQUIRE */
833 resc->num_rxqs = 0;
834 resc->num_txqs = 0;
835
05cc5a39
YM
836 resc->num_mac_filters = VF_MAC_CREDIT_CNT;
837 resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
b56e9670
AE
838
839 /* no real limitation */
840 resc->num_mc_filters = 0;
841
842 /* num_sbs already set */
b9871bcf 843 resc->num_sbs = vf->sb_count;
b56e9670
AE
844}
845
f1929b01
AE
846/* FLR routines: */
847static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
848{
849 /* reset the state variables */
b9871bcf 850 bnx2x_iov_static_resc(bp, vf);
f1929b01
AE
851 vf->state = VF_FREE;
852}
853
d16132ce
AE
854static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
855{
856 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
857
858 /* DQ usage counter */
859 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
860 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
861 "DQ VF usage counter timed out",
862 poll_cnt);
863 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
864
865 /* FW cleanup command - poll for the results */
866 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
867 poll_cnt))
868 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
869
870 /* verify TX hw is flushed */
871 bnx2x_tx_hw_flushed(bp, poll_cnt);
872}
873
2dc33bbc 874static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
d16132ce 875{
2dc33bbc 876 int rc, i;
d16132ce 877
2dc33bbc 878 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
d16132ce 879
2dc33bbc
YM
880 /* the cleanup operations are valid if and only if the VF
881 * was first acquired.
882 */
883 for (i = 0; i < vf_rxq_count(vf); i++) {
884 rc = bnx2x_vf_queue_flr(bp, vf, i);
885 if (rc)
886 goto out;
887 }
d16132ce 888
2dc33bbc
YM
889 /* remove multicasts */
890 bnx2x_vf_mcast(bp, vf, NULL, 0, true);
d16132ce 891
2dc33bbc
YM
892 /* dispatch final cleanup and wait for HW queues to flush */
893 bnx2x_vf_flr_clnup_hw(bp, vf);
d16132ce 894
2dc33bbc
YM
895 /* release VF resources */
896 bnx2x_vf_free_resc(bp, vf);
d16132ce 897
2dc33bbc
YM
898 /* re-open the mailbox */
899 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
900 return;
901out:
902 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
903 vf->abs_vfid, i, rc);
d16132ce
AE
904}
905
2dc33bbc 906static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
d16132ce 907{
d16132ce 908 struct bnx2x_virtf *vf;
2dc33bbc 909 int i;
d16132ce 910
2dc33bbc
YM
911 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
912 /* VF should be RESET & in FLR cleanup states */
913 if (bnx2x_vf(bp, i, state) != VF_RESET ||
914 !bnx2x_vf(bp, i, flr_clnup_stage))
915 continue;
d16132ce 916
2dc33bbc
YM
917 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
918 i, BNX2X_NR_VIRTFN(bp));
d16132ce 919
d16132ce
AE
920 vf = BP_VF(bp, i);
921
922 /* lock the vf pf channel */
923 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
924
925 /* invoke the VF FLR SM */
2dc33bbc 926 bnx2x_vf_flr(bp, vf);
d16132ce 927
2dc33bbc
YM
928 /* mark the VF to be ACKED and continue */
929 vf->flr_clnup_stage = false;
930 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
d16132ce
AE
931 }
932
933 /* Acknowledge the handled VFs.
934 * we are acknowledge all the vfs which an flr was requested for, even
935 * if amongst them there are such that we never opened, since the mcp
936 * will interrupt us immediately again if we only ack some of the bits,
937 * resulting in an endless loop. This can happen for example in KVM
938 * where an 'all ones' flr request is sometimes given by hyper visor
939 */
940 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
941 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
942 for (i = 0; i < FLRD_VFS_DWORDS; i++)
943 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
944 bp->vfdb->flrd_vfs[i]);
945
946 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
947
948 /* clear the acked bits - better yet if the MCP implemented
949 * write to clear semantics
950 */
951 for (i = 0; i < FLRD_VFS_DWORDS; i++)
952 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
953}
954
955void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
956{
957 int i;
958
959 /* Read FLR'd VFs */
960 for (i = 0; i < FLRD_VFS_DWORDS; i++)
961 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
962
963 DP(BNX2X_MSG_MCP,
964 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
965 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
966
967 for_each_vf(bp, i) {
968 struct bnx2x_virtf *vf = BP_VF(bp, i);
969 u32 reset = 0;
970
971 if (vf->abs_vfid < 32)
972 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
973 else
974 reset = bp->vfdb->flrd_vfs[1] &
975 (1 << (vf->abs_vfid - 32));
976
977 if (reset) {
978 /* set as reset and ready for cleanup */
979 vf->state = VF_RESET;
2dc33bbc 980 vf->flr_clnup_stage = true;
d16132ce
AE
981
982 DP(BNX2X_MSG_IOV,
983 "Initiating Final cleanup for VF %d\n",
984 vf->abs_vfid);
985 }
986 }
987
988 /* do the FLR cleanup for all marked VFs*/
2dc33bbc 989 bnx2x_vf_flr_clnup(bp);
d16132ce
AE
990}
991
b56e9670
AE
992/* IOV global initialization routines */
993void bnx2x_iov_init_dq(struct bnx2x *bp)
994{
995 if (!IS_SRIOV(bp))
996 return;
997
998 /* Set the DQ such that the CID reflect the abs_vfid */
999 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1000 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1001
1002 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1003 * the PF L2 queues
1004 */
1005 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1006
1007 /* The VF window size is the log2 of the max number of CIDs per VF */
1008 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1009
1010 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1011 * the Pf doorbell size although the 2 are independent.
1012 */
b9871bcf 1013 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
b56e9670
AE
1014
1015 /* No security checks for now -
1016 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1017 * CID range 0 - 0x1ffff
1018 */
1019 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1020 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1021 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1022 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1023
f2cfa997
AE
1024 /* set the VF doorbell threshold. This threshold represents the amount
1025 * of doorbells allowed in the main DORQ fifo for a specific VF.
1026 */
1027 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
b56e9670
AE
1028}
1029
1030void bnx2x_iov_init_dmae(struct bnx2x *bp)
1031{
49baea88
AE
1032 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1033 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
b56e9670
AE
1034}
1035
1036static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1037{
1038 struct pci_dev *dev = bp->pdev;
1039 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1040
1041 return dev->bus->number + ((dev->devfn + iov->offset +
1042 iov->stride * vfid) >> 8);
1043}
1044
1045static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1046{
1047 struct pci_dev *dev = bp->pdev;
1048 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1049
1050 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1051}
1052
1053static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1054{
1055 int i, n;
1056 struct pci_dev *dev = bp->pdev;
1057 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1058
1059 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1060 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1061 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1062
6411280a 1063 size /= iov->total;
b56e9670
AE
1064 vf->bars[n].bar = start + size * vf->abs_vfid;
1065 vf->bars[n].size = size;
1066 }
1067}
1068
8db573ba
AE
1069static int bnx2x_ari_enabled(struct pci_dev *dev)
1070{
1071 return dev->bus->self && dev->bus->self->ari_enabled;
1072}
1073
0d8de80f 1074static int
8db573ba
AE
1075bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1076{
1077 int sb_id;
1078 u32 val;
b9871bcf 1079 u8 fid, current_pf = 0;
8db573ba
AE
1080
1081 /* IGU in normal mode - read CAM */
1082 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1083 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1084 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1085 continue;
1086 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
b9871bcf
AE
1087 if (fid & IGU_FID_ENCODE_IS_PF)
1088 current_pf = fid & IGU_FID_PF_NUM_MASK;
9ea75ded 1089 else if (current_pf == BP_FUNC(bp))
8db573ba
AE
1090 bnx2x_vf_set_igu_info(bp, sb_id,
1091 (fid & IGU_FID_VF_NUM_MASK));
8db573ba
AE
1092 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1093 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1094 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1095 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1096 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1097 }
b9871bcf 1098 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
0d8de80f 1099 return BP_VFDB(bp)->vf_sbs_pool;
8db573ba
AE
1100}
1101
1102static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1103{
1104 if (bp->vfdb) {
1105 kfree(bp->vfdb->vfqs);
1106 kfree(bp->vfdb->vfs);
1107 kfree(bp->vfdb);
1108 }
1109 bp->vfdb = NULL;
1110}
1111
1112static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1113{
1114 int pos;
1115 struct pci_dev *dev = bp->pdev;
1116
1117 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1118 if (!pos) {
1119 BNX2X_ERR("failed to find SRIOV capability in device\n");
1120 return -ENODEV;
1121 }
1122
1123 iov->pos = pos;
1124 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1125 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1126 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1127 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1128 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1129 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1130 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1131 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1132 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1133
1134 return 0;
1135}
1136
1137static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1138{
1139 u32 val;
1140
1141 /* read the SRIOV capability structure
1142 * The fields can be read via configuration read or
1143 * directly from the device (starting at offset PCICFG_OFFSET)
1144 */
1145 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1146 return -ENODEV;
1147
1148 /* get the number of SRIOV bars */
1149 iov->nres = 0;
1150
1151 /* read the first_vfid */
1152 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1153 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1154 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1155
1156 DP(BNX2X_MSG_IOV,
1157 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1158 BP_FUNC(bp),
1159 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1160 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1161
1162 return 0;
1163}
1164
8db573ba
AE
1165/* must be called after PF bars are mapped */
1166int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
b9871bcf 1167 int num_vfs_param)
8db573ba 1168{
b9871bcf 1169 int err, i;
8db573ba
AE
1170 struct bnx2x_sriov *iov;
1171 struct pci_dev *dev = bp->pdev;
1172
1173 bp->vfdb = NULL;
1174
1175 /* verify is pf */
1176 if (IS_VF(bp))
1177 return 0;
1178
1179 /* verify sriov capability is present in configuration space */
1180 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1181 return 0;
1182
1183 /* verify chip revision */
1184 if (CHIP_IS_E1x(bp))
1185 return 0;
1186
1187 /* check if SRIOV support is turned off */
1188 if (!num_vfs_param)
1189 return 0;
1190
1191 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1192 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1193 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1194 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1195 return 0;
1196 }
1197
1198 /* SRIOV can be enabled only with MSIX */
1199 if (int_mode_param == BNX2X_INT_MODE_MSI ||
10938604 1200 int_mode_param == BNX2X_INT_MODE_INTX) {
8db573ba 1201 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
10938604
AE
1202 return 0;
1203 }
8db573ba
AE
1204
1205 err = -EIO;
1206 /* verify ari is enabled */
1207 if (!bnx2x_ari_enabled(bp->pdev)) {
10938604
AE
1208 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1209 return 0;
8db573ba
AE
1210 }
1211
1212 /* verify igu is in normal mode */
1213 if (CHIP_INT_MODE_IS_BC(bp)) {
1214 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
10938604 1215 return 0;
8db573ba
AE
1216 }
1217
1218 /* allocate the vfs database */
1219 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1220 if (!bp->vfdb) {
1221 BNX2X_ERR("failed to allocate vf database\n");
1222 err = -ENOMEM;
1223 goto failed;
1224 }
1225
1226 /* get the sriov info - Linux already collected all the pertinent
1227 * information, however the sriov structure is for the private use
1228 * of the pci module. Also we want this information regardless
1229 * of the hyper-visor.
1230 */
1231 iov = &(bp->vfdb->sriov);
1232 err = bnx2x_sriov_info(bp, iov);
1233 if (err)
1234 goto failed;
1235
1236 /* SR-IOV capability was enabled but there are no VFs*/
1237 if (iov->total == 0)
1238 goto failed;
1239
3c76feff
AE
1240 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1241
1242 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1243 num_vfs_param, iov->nr_virtfn);
8db573ba
AE
1244
1245 /* allocate the vf array */
1246 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1247 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1248 if (!bp->vfdb->vfs) {
1249 BNX2X_ERR("failed to allocate vf array\n");
1250 err = -ENOMEM;
1251 goto failed;
1252 }
1253
1254 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1255 for_each_vf(bp, i) {
1256 bnx2x_vf(bp, i, index) = i;
1257 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1258 bnx2x_vf(bp, i, state) = VF_FREE;
8db573ba
AE
1259 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1260 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1261 }
1262
1263 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
0d8de80f
YM
1264 if (!bnx2x_get_vf_igu_cam_info(bp)) {
1265 BNX2X_ERR("No entries in IGU CAM for vfs\n");
1266 err = -EINVAL;
1267 goto failed;
1268 }
8db573ba 1269
8db573ba 1270 /* allocate the queue arrays for all VFs */
b9871bcf
AE
1271 bp->vfdb->vfqs = kzalloc(
1272 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1273 GFP_KERNEL);
1274
8db573ba
AE
1275 if (!bp->vfdb->vfqs) {
1276 BNX2X_ERR("failed to allocate vf queue array\n");
1277 err = -ENOMEM;
1278 goto failed;
1279 }
1280
370d4a26
YM
1281 /* Prepare the VFs event synchronization mechanism */
1282 mutex_init(&bp->vfdb->event_mutex);
1283
6495d15a
DK
1284 mutex_init(&bp->vfdb->bulletin_mutex);
1285
230d00eb
YM
1286 if (SHMEM2_HAS(bp, sriov_switch_mode))
1287 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1288
8db573ba
AE
1289 return 0;
1290failed:
1291 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1292 __bnx2x_iov_free_vfdb(bp);
1293 return err;
1294}
1295
fd1fc79d
AE
1296void bnx2x_iov_remove_one(struct bnx2x *bp)
1297{
826cb7b4
AE
1298 int vf_idx;
1299
fd1fc79d
AE
1300 /* if SRIOV is not enabled there's nothing to do */
1301 if (!IS_SRIOV(bp))
1302 return;
1303
a345ce71 1304 bnx2x_disable_sriov(bp);
8395be5e 1305
826cb7b4
AE
1306 /* disable access to all VFs */
1307 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1308 bnx2x_pretend_func(bp,
1309 HW_VF_HANDLE(bp,
1310 bp->vfdb->sriov.first_vf_in_pf +
1311 vf_idx));
1312 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1313 bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1314 bnx2x_vf_enable_internal(bp, 0);
1315 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1316 }
1317
fd1fc79d
AE
1318 /* free vf database */
1319 __bnx2x_iov_free_vfdb(bp);
1320}
1321
b56e9670
AE
1322void bnx2x_iov_free_mem(struct bnx2x *bp)
1323{
1324 int i;
1325
1326 if (!IS_SRIOV(bp))
1327 return;
1328
1329 /* free vfs hw contexts */
1330 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1331 struct hw_dma *cxt = &bp->vfdb->context[i];
1332 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1333 }
1334
1335 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1336 BP_VFDB(bp)->sp_dma.mapping,
1337 BP_VFDB(bp)->sp_dma.size);
1338
1339 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1340 BP_VF_MBX_DMA(bp)->mapping,
1341 BP_VF_MBX_DMA(bp)->size);
abc5a021
AE
1342
1343 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1344 BP_VF_BULLETIN_DMA(bp)->mapping,
1345 BP_VF_BULLETIN_DMA(bp)->size);
b56e9670
AE
1346}
1347
1348int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1349{
1350 size_t tot_size;
1351 int i, rc = 0;
1352
1353 if (!IS_SRIOV(bp))
1354 return rc;
1355
1356 /* allocate vfs hw contexts */
1357 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1358 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1359
1360 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1361 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1362 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1363
1364 if (cxt->size) {
cd2b0389
JP
1365 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1366 if (!cxt->addr)
1367 goto alloc_mem_err;
b56e9670
AE
1368 } else {
1369 cxt->addr = NULL;
1370 cxt->mapping = 0;
1371 }
1372 tot_size -= cxt->size;
1373 }
1374
1375 /* allocate vfs ramrods dma memory - client_init and set_mac */
1376 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
cd2b0389
JP
1377 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1378 tot_size);
1379 if (!BP_VFDB(bp)->sp_dma.addr)
1380 goto alloc_mem_err;
b56e9670
AE
1381 BP_VFDB(bp)->sp_dma.size = tot_size;
1382
1383 /* allocate mailboxes */
1384 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
cd2b0389
JP
1385 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1386 tot_size);
1387 if (!BP_VF_MBX_DMA(bp)->addr)
1388 goto alloc_mem_err;
1389
b56e9670
AE
1390 BP_VF_MBX_DMA(bp)->size = tot_size;
1391
abc5a021
AE
1392 /* allocate local bulletin boards */
1393 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
cd2b0389
JP
1394 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1395 tot_size);
1396 if (!BP_VF_BULLETIN_DMA(bp)->addr)
1397 goto alloc_mem_err;
1398
abc5a021
AE
1399 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1400
b56e9670
AE
1401 return 0;
1402
1403alloc_mem_err:
1404 return -ENOMEM;
1405}
1406
8ca5e17e
AE
1407static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1408 struct bnx2x_vf_queue *q)
1409{
1410 u8 cl_id = vfq_cl_id(vf, q);
1411 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1412 unsigned long q_type = 0;
1413
1414 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1415 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1416
1417 /* Queue State object */
1418 bnx2x_init_queue_obj(bp, &q->sp_obj,
1419 cl_id, &q->cid, 1, func_id,
1420 bnx2x_vf_sp(bp, vf, q_data),
1421 bnx2x_vf_sp_map(bp, vf, q_data),
1422 q_type);
1423
3a3534ec
YM
1424 /* sp indication is set only when vlan/mac/etc. are initialized */
1425 q->sp_initialized = false;
1426
8ca5e17e 1427 DP(BNX2X_MSG_IOV,
b9871bcf
AE
1428 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1429 vf->abs_vfid, q->sp_obj.func_id, q->cid);
8ca5e17e
AE
1430}
1431
6495d15a
DK
1432static int bnx2x_max_speed_cap(struct bnx2x *bp)
1433{
1434 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1435
1436 if (supported &
1437 (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1438 return 20000;
1439
1440 return 10000; /* assume lowest supported speed is 10G */
1441}
1442
1443int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1444{
1445 struct bnx2x_link_report_data *state = &bp->last_reported_link;
1446 struct pf_vf_bulletin_content *bulletin;
1447 struct bnx2x_virtf *vf;
1448 bool update = true;
1449 int rc = 0;
1450
1451 /* sanity and init */
1452 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1453 if (rc)
1454 return rc;
1455
1456 mutex_lock(&bp->vfdb->bulletin_mutex);
1457
1458 if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1459 bulletin->valid_bitmap |= 1 << LINK_VALID;
1460
1461 bulletin->link_speed = state->line_speed;
1462 bulletin->link_flags = 0;
1463 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1464 &state->link_report_flags))
1465 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1466 if (test_bit(BNX2X_LINK_REPORT_FD,
1467 &state->link_report_flags))
1468 bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1469 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1470 &state->link_report_flags))
1471 bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1472 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1473 &state->link_report_flags))
1474 bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1475 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1476 !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1477 bulletin->valid_bitmap |= 1 << LINK_VALID;
1478 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1479 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1480 (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1481 bulletin->valid_bitmap |= 1 << LINK_VALID;
1482 bulletin->link_speed = bnx2x_max_speed_cap(bp);
1483 bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1484 } else {
1485 update = false;
1486 }
1487
1488 if (update) {
1489 DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1490 "vf %d mode %u speed %d flags %x\n", idx,
1491 vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1492
1493 /* Post update on VF's bulletin board */
1494 rc = bnx2x_post_vf_bulletin(bp, idx);
1495 if (rc) {
1496 BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1497 goto out;
1498 }
1499 }
1500
1501out:
1502 mutex_unlock(&bp->vfdb->bulletin_mutex);
1503 return rc;
1504}
1505
1506int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1507{
1508 struct bnx2x *bp = netdev_priv(dev);
1509 struct bnx2x_virtf *vf = BP_VF(bp, idx);
1510
1511 if (!vf)
1512 return -EINVAL;
1513
1514 if (vf->link_cfg == link_state)
1515 return 0; /* nothing todo */
1516
1517 vf->link_cfg = link_state;
1518
1519 return bnx2x_iov_link_update_vf(bp, idx);
1520}
1521
1522void bnx2x_iov_link_update(struct bnx2x *bp)
1523{
1524 int vfid;
1525
1526 if (!IS_SRIOV(bp))
1527 return;
1528
1529 for_each_vf(bp, vfid)
1530 bnx2x_iov_link_update_vf(bp, vfid);
1531}
1532
b56e9670
AE
1533/* called by bnx2x_nic_load */
1534int bnx2x_iov_nic_init(struct bnx2x *bp)
1535{
b9871bcf 1536 int vfid;
b56e9670
AE
1537
1538 if (!IS_SRIOV(bp)) {
1539 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1540 return 0;
1541 }
1542
1543 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1544
03c22ea3
AE
1545 /* let FLR complete ... */
1546 msleep(100);
1547
b56e9670
AE
1548 /* initialize vf database */
1549 for_each_vf(bp, vfid) {
1550 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1551
1552 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1553 BNX2X_CIDS_PER_VF;
1554
1555 union cdu_context *base_cxt = (union cdu_context *)
1556 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1557 (base_vf_cid & (ILT_PAGE_CIDS-1));
1558
1559 DP(BNX2X_MSG_IOV,
1560 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1561 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1562 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1563
1564 /* init statically provisioned resources */
b9871bcf 1565 bnx2x_iov_static_resc(bp, vf);
b56e9670
AE
1566
1567 /* queues are initialized during VF-ACQUIRE */
b56e9670
AE
1568 vf->filter_state = 0;
1569 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1570
05cc5a39
YM
1571 bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1572 vf_vlan_rules_cnt(vf));
1573 bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1574 vf_mac_rules_cnt(vf));
1575
b56e9670
AE
1576 /* init mcast object - This object will be re-initialized
1577 * during VF-ACQUIRE with the proper cl_id and cid.
1578 * It needs to be initialized here so that it can be safely
1579 * handled by a subsequent FLR flow.
1580 */
1581 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1582 0xFF, 0xFF, 0xFF,
1583 bnx2x_vf_sp(bp, vf, mcast_rdata),
1584 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1585 BNX2X_FILTER_MCAST_PENDING,
1586 &vf->filter_state,
1587 BNX2X_OBJ_TYPE_RX_TX);
1588
1589 /* set the mailbox message addresses */
1590 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1591 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1592 MBX_MSG_ALIGNED_SIZE);
1593
1594 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1595 vfid * MBX_MSG_ALIGNED_SIZE;
1596
1597 /* Enable vf mailbox */
1598 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1599 }
1600
1601 /* Final VF init */
b9871bcf
AE
1602 for_each_vf(bp, vfid) {
1603 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
b56e9670
AE
1604
1605 /* fill in the BDF and bars */
b9871bcf
AE
1606 vf->bus = bnx2x_vf_bus(bp, vfid);
1607 vf->devfn = bnx2x_vf_devfn(bp, vfid);
b56e9670
AE
1608 bnx2x_vf_set_bars(bp, vf);
1609
1610 DP(BNX2X_MSG_IOV,
1611 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1612 vf->abs_vfid, vf->bus, vf->devfn,
1613 (unsigned)vf->bars[0].bar, vf->bars[0].size,
1614 (unsigned)vf->bars[1].bar, vf->bars[1].size,
1615 (unsigned)vf->bars[2].bar, vf->bars[2].size);
b56e9670
AE
1616 }
1617
1618 return 0;
1619}
290ca2bb 1620
f1929b01
AE
1621/* called by bnx2x_chip_cleanup */
1622int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1623{
1624 int i;
1625
1626 if (!IS_SRIOV(bp))
1627 return 0;
1628
1629 /* release all the VFs */
1630 for_each_vf(bp, i)
2dc33bbc 1631 bnx2x_vf_release(bp, BP_VF(bp, i));
f1929b01
AE
1632
1633 return 0;
1634}
1635
290ca2bb
AE
1636/* called by bnx2x_init_hw_func, returns the next ilt line */
1637int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1638{
1639 int i;
1640 struct bnx2x_ilt *ilt = BP_ILT(bp);
1641
1642 if (!IS_SRIOV(bp))
1643 return line;
1644
1645 /* set vfs ilt lines */
1646 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1647 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1648
1649 ilt->lines[line+i].page = hw_cxt->addr;
1650 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1651 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1652 }
1653 return line + i;
1654}
1655
fd1fc79d 1656static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
290ca2bb 1657{
fd1fc79d
AE
1658 return ((cid >= BNX2X_FIRST_VF_CID) &&
1659 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1660}
1661
1662static
1663void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1664 struct bnx2x_vf_queue *vfq,
1665 union event_ring_elem *elem)
1666{
1667 unsigned long ramrod_flags = 0;
1668 int rc = 0;
9cd753a1 1669 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
fd1fc79d
AE
1670
1671 /* Always push next commands out, don't wait here */
1672 set_bit(RAMROD_CONT, &ramrod_flags);
1673
9cd753a1 1674 switch (echo >> BNX2X_SWCID_SHIFT) {
fd1fc79d
AE
1675 case BNX2X_FILTER_MAC_PENDING:
1676 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1677 &ramrod_flags);
1678 break;
1679 case BNX2X_FILTER_VLAN_PENDING:
1680 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1681 &ramrod_flags);
1682 break;
1683 default:
9cd753a1 1684 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
fd1fc79d
AE
1685 return;
1686 }
1687 if (rc < 0)
1688 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1689 else if (rc > 0)
1690 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1691}
1692
1693static
1694void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1695 struct bnx2x_virtf *vf)
1696{
1697 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1698 int rc;
1699
1700 rparam.mcast_obj = &vf->mcast_obj;
1701 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1702
1703 /* If there are pending mcast commands - send them */
1704 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1705 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1706 if (rc < 0)
1707 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1708 rc);
1709 }
1710}
1711
1712static
1713void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1714 struct bnx2x_virtf *vf)
1715{
4e857c58 1716 smp_mb__before_atomic();
fd1fc79d 1717 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
4e857c58 1718 smp_mb__after_atomic();
fd1fc79d
AE
1719}
1720
2dc33bbc
YM
1721static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1722 struct bnx2x_virtf *vf)
1723{
1724 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1725}
1726
fd1fc79d
AE
1727int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1728{
1729 struct bnx2x_virtf *vf;
1730 int qidx = 0, abs_vfid;
1731 u8 opcode;
1732 u16 cid = 0xffff;
1733
1734 if (!IS_SRIOV(bp))
1735 return 1;
1736
1737 /* first get the cid - the only events we handle here are cfc-delete
1738 * and set-mac completion
1739 */
1740 opcode = elem->message.opcode;
1741
1742 switch (opcode) {
1743 case EVENT_RING_OPCODE_CFC_DEL:
da472731 1744 cid = SW_CID(elem->message.data.cfc_del_event.cid);
fd1fc79d
AE
1745 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1746 break;
1747 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1748 case EVENT_RING_OPCODE_MULTICAST_RULES:
1749 case EVENT_RING_OPCODE_FILTERS_RULES:
2dc33bbc 1750 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
9cd753a1 1751 cid = SW_CID(elem->message.data.eth_event.echo);
fd1fc79d
AE
1752 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1753 break;
1754 case EVENT_RING_OPCODE_VF_FLR:
1755 abs_vfid = elem->message.data.vf_flr_event.vf_id;
1756 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1757 abs_vfid);
1758 goto get_vf;
1759 case EVENT_RING_OPCODE_MALICIOUS_VF:
1760 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
076d1329
AE
1761 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1762 abs_vfid,
1763 elem->message.data.malicious_vf_event.err_id);
fd1fc79d
AE
1764 goto get_vf;
1765 default:
1766 return 1;
1767 }
1768
1769 /* check if the cid is the VF range */
1770 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1771 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1772 return 1;
1773 }
1774
1775 /* extract vf and rxq index from vf_cid - relies on the following:
1776 * 1. vfid on cid reflects the true abs_vfid
16a5fd92 1777 * 2. The max number of VFs (per path) is 64
fd1fc79d
AE
1778 */
1779 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1780 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1781get_vf:
1782 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1783
1784 if (!vf) {
1785 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1786 cid, abs_vfid);
1787 return 0;
1788 }
1789
1790 switch (opcode) {
1791 case EVENT_RING_OPCODE_CFC_DEL:
1792 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1793 vf->abs_vfid, qidx);
1794 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1795 &vfq_get(vf,
1796 qidx)->sp_obj,
1797 BNX2X_Q_CMD_CFC_DEL);
1798 break;
1799 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1800 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1801 vf->abs_vfid, qidx);
1802 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1803 break;
1804 case EVENT_RING_OPCODE_MULTICAST_RULES:
1805 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1806 vf->abs_vfid, qidx);
1807 bnx2x_vf_handle_mcast_eqe(bp, vf);
1808 break;
1809 case EVENT_RING_OPCODE_FILTERS_RULES:
1810 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1811 vf->abs_vfid, qidx);
1812 bnx2x_vf_handle_filters_eqe(bp, vf);
1813 break;
2dc33bbc
YM
1814 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1815 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1816 vf->abs_vfid, qidx);
1817 bnx2x_vf_handle_rss_update_eqe(bp, vf);
fd1fc79d 1818 case EVENT_RING_OPCODE_VF_FLR:
fd1fc79d 1819 case EVENT_RING_OPCODE_MALICIOUS_VF:
fd1fc79d 1820 /* Do nothing for now */
076d1329 1821 return 0;
fd1fc79d 1822 }
fd1fc79d
AE
1823
1824 return 0;
1825}
1826
1827static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1828{
1829 /* extract the vf from vf_cid - relies on the following:
1830 * 1. vfid on cid reflects the true abs_vfid
16a5fd92 1831 * 2. The max number of VFs (per path) is 64
fd1fc79d
AE
1832 */
1833 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1834 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1835}
1836
1837void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1838 struct bnx2x_queue_sp_obj **q_obj)
1839{
1840 struct bnx2x_virtf *vf;
1841
290ca2bb
AE
1842 if (!IS_SRIOV(bp))
1843 return;
1844
fd1fc79d
AE
1845 vf = bnx2x_vf_by_cid(bp, vf_cid);
1846
1847 if (vf) {
1848 /* extract queue index from vf_cid - relies on the following:
1849 * 1. vfid on cid reflects the true abs_vfid
16a5fd92 1850 * 2. The max number of VFs (per path) is 64
fd1fc79d
AE
1851 */
1852 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1853 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1854 } else {
1855 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1856 }
1857}
1858
67c431a5
AE
1859void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1860{
1861 int i;
1862 int first_queue_query_index, num_queues_req;
1863 dma_addr_t cur_data_offset;
1864 struct stats_query_entry *cur_query_entry;
1865 u8 stats_count = 0;
1866 bool is_fcoe = false;
1867
1868 if (!IS_SRIOV(bp))
1869 return;
1870
1871 if (!NO_FCOE(bp))
1872 is_fcoe = true;
1873
1874 /* fcoe adds one global request and one queue request */
1875 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1876 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1877 (is_fcoe ? 0 : 1);
1878
76ca70fa
YM
1879 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1880 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1881 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1882 first_queue_query_index + num_queues_req);
67c431a5
AE
1883
1884 cur_data_offset = bp->fw_stats_data_mapping +
1885 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1886 num_queues_req * sizeof(struct per_queue_stats);
1887
1888 cur_query_entry = &bp->fw_stats_req->
1889 query[first_queue_query_index + num_queues_req];
1890
1891 for_each_vf(bp, i) {
1892 int j;
1893 struct bnx2x_virtf *vf = BP_VF(bp, i);
1894
1895 if (vf->state != VF_ENABLED) {
76ca70fa
YM
1896 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1897 "vf %d not enabled so no stats for it\n",
1898 vf->abs_vfid);
67c431a5
AE
1899 continue;
1900 }
1901
1902 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
1903 for_each_vfq(vf, j) {
1904 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1905
b9871bcf
AE
1906 dma_addr_t q_stats_addr =
1907 vf->fw_stat_map + j * vf->stats_stride;
1908
67c431a5
AE
1909 /* collect stats fro active queues only */
1910 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1911 BNX2X_Q_LOGICAL_STATE_STOPPED)
1912 continue;
1913
1914 /* create stats query entry for this queue */
1915 cur_query_entry->kind = STATS_TYPE_QUEUE;
b9871bcf 1916 cur_query_entry->index = vfq_stat_id(vf, rxq);
67c431a5
AE
1917 cur_query_entry->funcID =
1918 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1919 cur_query_entry->address.hi =
b9871bcf 1920 cpu_to_le32(U64_HI(q_stats_addr));
67c431a5 1921 cur_query_entry->address.lo =
b9871bcf 1922 cpu_to_le32(U64_LO(q_stats_addr));
67c431a5
AE
1923 DP(BNX2X_MSG_IOV,
1924 "added address %x %x for vf %d queue %d client %d\n",
1925 cur_query_entry->address.hi,
1926 cur_query_entry->address.lo, cur_query_entry->funcID,
1927 j, cur_query_entry->index);
1928 cur_query_entry++;
1929 cur_data_offset += sizeof(struct per_queue_stats);
1930 stats_count++;
b9871bcf
AE
1931
1932 /* all stats are coalesced to the leading queue */
1933 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1934 break;
67c431a5
AE
1935 }
1936 }
1937 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1938}
1939
67c431a5 1940/* VF API helpers */
b93288d5
AE
1941static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1942 u8 enable)
1943{
1944 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1945 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1946
1947 REG_WR(bp, reg, val);
1948}
8ca5e17e 1949
99e9d211
AE
1950static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1951{
1952 int i;
1953
1954 for_each_vfq(vf, i)
1955 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1956 vfq_qzone_id(vf, vfq_get(vf, i)), false);
1957}
1958
1959static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1960{
1961 u32 val;
1962
1963 /* clear the VF configuration - pretend */
1964 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1965 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1966 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1967 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1968 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1969 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1970}
1971
8ca5e17e
AE
1972u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1973{
1974 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1975 BNX2X_VF_MAX_QUEUES);
1976}
1977
1978static
1979int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1980 struct vf_pf_resc_request *req_resc)
1981{
1982 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1983 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1984
1985 return ((req_resc->num_rxqs <= rxq_cnt) &&
1986 (req_resc->num_txqs <= txq_cnt) &&
1987 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1988 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
05cc5a39 1989 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
8ca5e17e
AE
1990}
1991
1992/* CORE VF API */
1993int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1994 struct vf_pf_resc_request *resc)
1995{
1996 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1997 BNX2X_CIDS_PER_VF;
1998
1999 union cdu_context *base_cxt = (union cdu_context *)
2000 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2001 (base_vf_cid & (ILT_PAGE_CIDS-1));
2002 int i;
2003
2004 /* if state is 'acquired' the VF was not released or FLR'd, in
2005 * this case the returned resources match the acquired already
2006 * acquired resources. Verify that the requested numbers do
2007 * not exceed the already acquired numbers.
2008 */
2009 if (vf->state == VF_ACQUIRED) {
2010 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2011 vf->abs_vfid);
2012
2013 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2014 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2015 vf->abs_vfid);
2016 return -EINVAL;
2017 }
2018 return 0;
2019 }
2020
2021 /* Otherwise vf state must be 'free' or 'reset' */
2022 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2023 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2024 vf->abs_vfid, vf->state);
2025 return -EINVAL;
2026 }
2027
2028 /* static allocation:
16a5fd92 2029 * the global maximum number are fixed per VF. Fail the request if
8ca5e17e
AE
2030 * requested number exceed these globals
2031 */
2032 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2033 DP(BNX2X_MSG_IOV,
2034 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2035 /* set the max resource in the vf */
2036 return -ENOMEM;
2037 }
2038
2039 /* Set resources counters - 0 request means max available */
2040 vf_sb_count(vf) = resc->num_sbs;
2041 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2042 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
8ca5e17e
AE
2043
2044 DP(BNX2X_MSG_IOV,
2045 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2046 vf_sb_count(vf), vf_rxq_count(vf),
2047 vf_txq_count(vf), vf_mac_rules_cnt(vf),
05cc5a39 2048 vf_vlan_rules_cnt(vf));
8ca5e17e
AE
2049
2050 /* Initialize the queues */
2051 if (!vf->vfqs) {
2052 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2053 return -EINVAL;
2054 }
2055
2056 for_each_vfq(vf, i) {
2057 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2058
2059 if (!q) {
b9871bcf 2060 BNX2X_ERR("q number %d was not allocated\n", i);
8ca5e17e
AE
2061 return -EINVAL;
2062 }
2063
2064 q->index = i;
2065 q->cxt = &((base_cxt + i)->eth);
2066 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2067
2068 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2069 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2070
2071 /* init SP objects */
2072 bnx2x_vfq_init(bp, vf, q);
2073 }
2074 vf->state = VF_ACQUIRED;
2075 return 0;
2076}
2077
b93288d5
AE
2078int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2079{
2080 struct bnx2x_func_init_params func_init = {0};
b93288d5
AE
2081 int i;
2082
2083 /* the sb resources are initialized at this point, do the
2084 * FW/HW initializations
2085 */
2086 for_each_vf_sb(vf, i)
2087 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2088 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2089
2090 /* Sanity checks */
2091 if (vf->state != VF_ACQUIRED) {
2092 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2093 vf->abs_vfid, vf->state);
2094 return -EINVAL;
2095 }
03c22ea3
AE
2096
2097 /* let FLR complete ... */
2098 msleep(100);
2099
b93288d5
AE
2100 /* FLR cleanup epilogue */
2101 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2102 return -EBUSY;
2103
2104 /* reset IGU VF statistics: MSIX */
2105 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2106
b93288d5 2107 /* function setup */
b93288d5
AE
2108 func_init.pf_id = BP_FUNC(bp);
2109 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
b93288d5
AE
2110 bnx2x_func_init(bp, &func_init);
2111
2112 /* Enable the vf */
2113 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2114 bnx2x_vf_enable_traffic(bp, vf);
2115
2116 /* queue protection table */
2117 for_each_vfq(vf, i)
2118 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2119 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2120
2121 vf->state = VF_ENABLED;
2122
abc5a021
AE
2123 /* update vf bulletin board */
2124 bnx2x_post_vf_bulletin(bp, vf->index);
2125
b93288d5
AE
2126 return 0;
2127}
2128
a3097bda
AE
2129struct set_vf_state_cookie {
2130 struct bnx2x_virtf *vf;
2131 u8 state;
2132};
2133
8e61777d 2134static void bnx2x_set_vf_state(void *cookie)
a3097bda
AE
2135{
2136 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2137
2138 p->vf->state = p->state;
2139}
2140
2dc33bbc 2141int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
99e9d211 2142{
2dc33bbc 2143 int rc = 0, i;
99e9d211 2144
2dc33bbc 2145 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
99e9d211 2146
2dc33bbc
YM
2147 /* Close all queues */
2148 for (i = 0; i < vf_rxq_count(vf); i++) {
2149 rc = bnx2x_vf_queue_teardown(bp, vf, i);
2150 if (rc)
2151 goto op_err;
2152 }
99e9d211 2153
2dc33bbc
YM
2154 /* disable the interrupts */
2155 DP(BNX2X_MSG_IOV, "disabling igu\n");
2156 bnx2x_vf_igu_disable(bp, vf);
99e9d211 2157
2dc33bbc
YM
2158 /* disable the VF */
2159 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2160 bnx2x_vf_clr_qtbl(bp, vf);
a3097bda
AE
2161
2162 /* need to make sure there are no outstanding stats ramrods which may
2163 * cause the device to access the VF's stats buffer which it will free
2164 * as soon as we return from the close flow.
2165 */
2166 {
2167 struct set_vf_state_cookie cookie;
2168
2169 cookie.vf = vf;
2170 cookie.state = VF_ACQUIRED;
dff173de
YM
2171 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2172 if (rc)
2173 goto op_err;
a3097bda
AE
2174 }
2175
99e9d211 2176 DP(BNX2X_MSG_IOV, "set state to acquired\n");
99e9d211 2177
2dc33bbc
YM
2178 return 0;
2179op_err:
2180 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2181 return rc;
99e9d211
AE
2182}
2183
16a5fd92 2184/* VF release can be called either: 1. The VF was acquired but
f1929b01
AE
2185 * not enabled 2. the vf was enabled or in the process of being
2186 * enabled
2187 */
2dc33bbc 2188int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
f1929b01 2189{
2dc33bbc 2190 int rc;
f1929b01
AE
2191
2192 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2193 vf->state == VF_FREE ? "Free" :
2194 vf->state == VF_ACQUIRED ? "Acquired" :
2195 vf->state == VF_ENABLED ? "Enabled" :
2196 vf->state == VF_RESET ? "Reset" :
2197 "Unknown");
2198
2199 switch (vf->state) {
2200 case VF_ENABLED:
2dc33bbc
YM
2201 rc = bnx2x_vf_close(bp, vf);
2202 if (rc)
f1929b01 2203 goto op_err;
2dc33bbc 2204 /* Fallthrough to release resources */
f1929b01
AE
2205 case VF_ACQUIRED:
2206 DP(BNX2X_MSG_IOV, "about to free resources\n");
2207 bnx2x_vf_free_resc(bp, vf);
2dc33bbc 2208 break;
f1929b01
AE
2209
2210 case VF_FREE:
2211 case VF_RESET:
f1929b01 2212 default:
2dc33bbc 2213 break;
b9871bcf 2214 }
2dc33bbc 2215 return 0;
b9871bcf 2216op_err:
2dc33bbc
YM
2217 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2218 return rc;
f1929b01
AE
2219}
2220
2dc33bbc
YM
2221int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2222 struct bnx2x_config_rss_params *rss)
b9871bcf 2223{
2dc33bbc
YM
2224 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2225 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2226 return bnx2x_config_rss(bp, rss);
b9871bcf
AE
2227}
2228
2dc33bbc
YM
2229int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2230 struct vfpf_tpa_tlv *tlv,
2231 struct bnx2x_queue_update_tpa_params *params)
14a94ebd 2232{
2dc33bbc
YM
2233 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2234 struct bnx2x_queue_state_params qstate;
2235 int qid, rc = 0;
14a94ebd 2236
2dc33bbc 2237 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
14a94ebd 2238
2dc33bbc
YM
2239 /* Set ramrod params */
2240 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2241 memcpy(&qstate.params.update_tpa, params,
2242 sizeof(struct bnx2x_queue_update_tpa_params));
2243 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2244 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
14a94ebd 2245
2dc33bbc
YM
2246 for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2247 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2248 qstate.params.update_tpa.sge_map = sge_addr[qid];
2249 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2250 vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2251 U64_LO(sge_addr[qid]));
2252 rc = bnx2x_queue_state_change(bp, &qstate);
2253 if (rc) {
2254 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2255 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2256 vf->abs_vfid, qid);
2257 return rc;
14a94ebd 2258 }
14a94ebd 2259 }
14a94ebd 2260
2dc33bbc 2261 return rc;
14a94ebd
MK
2262}
2263
f1929b01
AE
2264/* VF release ~ VF close + VF release-resources
2265 * Release is the ultimate SW shutdown and is called whenever an
2266 * irrecoverable error is encountered.
2267 */
2dc33bbc 2268int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
f1929b01 2269{
f1929b01 2270 int rc;
b9871bcf
AE
2271
2272 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
f1929b01
AE
2273 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2274
2dc33bbc 2275 rc = bnx2x_vf_free(bp, vf);
f1929b01
AE
2276 if (rc)
2277 WARN(rc,
2278 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2279 vf->abs_vfid, rc);
2dc33bbc
YM
2280 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2281 return rc;
f1929b01
AE
2282}
2283
8ca5e17e
AE
2284void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2285 enum channel_tlvs tlv)
2286{
b9871bcf
AE
2287 /* we don't lock the channel for unsupported tlvs */
2288 if (!bnx2x_tlv_supported(tlv)) {
2289 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2290 return;
2291 }
2292
8ca5e17e
AE
2293 /* lock the channel */
2294 mutex_lock(&vf->op_mutex);
2295
2296 /* record the locking op */
2297 vf->op_current = tlv;
2298
2299 /* log the lock */
2300 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2301 vf->abs_vfid, tlv);
2302}
2303
2304void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2305 enum channel_tlvs expected_tlv)
2306{
b9871bcf
AE
2307 enum channel_tlvs current_tlv;
2308
2309 if (!vf) {
2310 BNX2X_ERR("VF was %p\n", vf);
2311 return;
2312 }
2313
2314 current_tlv = vf->op_current;
2315
2316 /* we don't unlock the channel for unsupported tlvs */
2317 if (!bnx2x_tlv_supported(expected_tlv))
2318 return;
2319
8ca5e17e
AE
2320 WARN(expected_tlv != vf->op_current,
2321 "lock mismatch: expected %d found %d", expected_tlv,
2322 vf->op_current);
2323
b9871bcf
AE
2324 /* record the locking op */
2325 vf->op_current = CHANNEL_TLV_NONE;
2326
8ca5e17e
AE
2327 /* lock the channel */
2328 mutex_unlock(&vf->op_mutex);
2329
2330 /* log the unlock */
2331 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
0c23ad37 2332 vf->abs_vfid, current_tlv);
8ca5e17e 2333}
6411280a 2334
c14db202
YM
2335static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2336{
2337 struct bnx2x_queue_state_params q_params;
2338 u32 prev_flags;
2339 int i, rc;
2340
2341 /* Verify changes are needed and record current Tx switching state */
2342 prev_flags = bp->flags;
2343 if (enable)
2344 bp->flags |= TX_SWITCHING;
2345 else
2346 bp->flags &= ~TX_SWITCHING;
2347 if (prev_flags == bp->flags)
2348 return 0;
2349
2350 /* Verify state enables the sending of queue ramrods */
2351 if ((bp->state != BNX2X_STATE_OPEN) ||
2352 (bnx2x_get_q_logical_state(bp,
2353 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2354 BNX2X_Q_LOGICAL_STATE_ACTIVE))
2355 return 0;
2356
2357 /* send q. update ramrod to configure Tx switching */
2358 memset(&q_params, 0, sizeof(q_params));
2359 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2360 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2361 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2362 &q_params.params.update.update_flags);
2363 if (enable)
2364 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2365 &q_params.params.update.update_flags);
2366 else
2367 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2368 &q_params.params.update.update_flags);
2369
2370 /* send the ramrod on all the queues of the PF */
2371 for_each_eth_queue(bp, i) {
2372 struct bnx2x_fastpath *fp = &bp->fp[i];
2373
2374 /* Set the appropriate Queue object */
2375 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2376
2377 /* Update the Queue state */
2378 rc = bnx2x_queue_state_change(bp, &q_params);
2379 if (rc) {
2380 BNX2X_ERR("Failed to configure Tx switching\n");
2381 return rc;
2382 }
2383 }
2384
2385 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2386 return 0;
2387}
2388
3c76feff 2389int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
6411280a 2390{
3c76feff 2391 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
6411280a 2392
c8781cf4
MK
2393 if (!IS_SRIOV(bp)) {
2394 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2395 return -EINVAL;
2396 }
2397
3c76feff
AE
2398 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2399 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2400
2401 /* HW channel is only operational when PF is up */
2402 if (bp->state != BNX2X_STATE_OPEN) {
6bf07b8e 2403 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3c76feff
AE
2404 return -EINVAL;
2405 }
2406
2407 /* we are always bound by the total_vfs in the configuration space */
2408 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2409 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2410 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2411 num_vfs_param = BNX2X_NR_VIRTFN(bp);
2412 }
2413
2414 bp->requested_nr_virtfn = num_vfs_param;
2415 if (num_vfs_param == 0) {
c14db202 2416 bnx2x_set_pf_tx_switching(bp, false);
a345ce71 2417 bnx2x_disable_sriov(bp);
3c76feff
AE
2418 return 0;
2419 } else {
2420 return bnx2x_enable_sriov(bp);
2421 }
2422}
c14db202 2423
b9871bcf 2424#define IGU_ENTRY_SIZE 4
3c76feff
AE
2425
2426int bnx2x_enable_sriov(struct bnx2x *bp)
2427{
2428 int rc = 0, req_vfs = bp->requested_nr_virtfn;
b9871bcf
AE
2429 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2430 u32 igu_entry, address;
2431 u16 num_vf_queues;
3c76feff 2432
b9871bcf
AE
2433 if (req_vfs == 0)
2434 return 0;
2435
2436 first_vf = bp->vfdb->sriov.first_vf_in_pf;
2437
2438 /* statically distribute vf sb pool between VFs */
2439 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2440 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2441
2442 /* zero previous values learned from igu cam */
2443 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2444 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2445
2446 vf->sb_count = 0;
2447 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2448 }
2449 bp->vfdb->vf_sbs_pool = 0;
2450
2451 /* prepare IGU cam */
2452 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2453 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2454 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2455 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2456 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2457 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2458 IGU_REG_MAPPING_MEMORY_VALID;
2459 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2460 sb_idx, vf_idx);
2461 REG_WR(bp, address, igu_entry);
2462 sb_idx++;
2463 address += IGU_ENTRY_SIZE;
2464 }
2465 }
2466
2467 /* Reinitialize vf database according to igu cam */
2468 bnx2x_get_vf_igu_cam_info(bp);
2469
2470 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2471 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2472
2473 qcount = 0;
2474 for_each_vf(bp, vf_idx) {
2475 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2476
2477 /* set local queue arrays */
2478 vf->vfqs = &bp->vfdb->vfqs[qcount];
2479 qcount += vf_sb_count(vf);
717fa2b9 2480 bnx2x_iov_static_resc(bp, vf);
b9871bcf
AE
2481 }
2482
89e18ae6
MK
2483 /* prepare msix vectors in VF configuration space - the value in the
2484 * PCI configuration space should be the index of the last entry,
2485 * namely one less than the actual size of the table
2486 */
b9871bcf
AE
2487 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2488 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2489 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
89e18ae6 2490 num_vf_queues - 1);
717fa2b9 2491 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
89e18ae6 2492 vf_idx, num_vf_queues - 1);
b9871bcf
AE
2493 }
2494 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2495
2496 /* enable sriov. This will probe all the VFs, and consequentially cause
2497 * the "acquire" messages to appear on the VF PF channel.
2498 */
2499 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
826cb7b4 2500 bnx2x_disable_sriov(bp);
c14db202
YM
2501
2502 rc = bnx2x_set_pf_tx_switching(bp, true);
2503 if (rc)
2504 return rc;
2505
3c76feff
AE
2506 rc = pci_enable_sriov(bp->pdev, req_vfs);
2507 if (rc) {
6411280a 2508 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3c76feff
AE
2509 return rc;
2510 }
2511 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2512 return req_vfs;
6411280a
AE
2513}
2514
3ec9f9ca
AE
2515void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2516{
2517 int vfidx;
2518 struct pf_vf_bulletin_content *bulletin;
2519
2520 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2521 for_each_vf(bp, vfidx) {
05cc5a39 2522 bulletin = BP_VF_BULLETIN(bp, vfidx);
c46309c7 2523 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3ec9f9ca
AE
2524 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
2525 }
2526}
2527
3c76feff
AE
2528void bnx2x_disable_sriov(struct bnx2x *bp)
2529{
a345ce71
YM
2530 if (pci_vfs_assigned(bp->pdev)) {
2531 DP(BNX2X_MSG_IOV,
2532 "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2533 return;
2534 }
2535
3c76feff
AE
2536 pci_disable_sriov(bp->pdev);
2537}
2538
6495d15a
DK
2539static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2540 struct bnx2x_virtf **vf,
2541 struct pf_vf_bulletin_content **bulletin,
2542 bool test_queue)
3ec9f9ca 2543{
af902ae4 2544 if (bp->state != BNX2X_STATE_OPEN) {
6495d15a 2545 BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
af902ae4
AE
2546 return -EINVAL;
2547 }
2548
3ec9f9ca 2549 if (!IS_SRIOV(bp)) {
0c23ad37 2550 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
3ec9f9ca
AE
2551 return -EINVAL;
2552 }
2553
2554 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
6495d15a 2555 BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3ec9f9ca
AE
2556 vfidx, BNX2X_NR_VIRTFN(bp));
2557 return -EINVAL;
2558 }
2559
5ae30d78
AE
2560 /* init members */
2561 *vf = BP_VF(bp, vfidx);
2562 *bulletin = BP_VF_BULLETIN(bp, vfidx);
2563
2564 if (!*vf) {
6495d15a 2565 BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
b9871bcf
AE
2566 return -EINVAL;
2567 }
2568
6495d15a
DK
2569 if (test_queue && !(*vf)->vfqs) {
2570 BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3ec9f9ca
AE
2571 vfidx);
2572 return -EINVAL;
2573 }
2574
5ae30d78 2575 if (!*bulletin) {
6495d15a 2576 BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
5ae30d78
AE
2577 vfidx);
2578 return -EINVAL;
2579 }
2580
3ec9f9ca
AE
2581 return 0;
2582}
2583
2584int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2585 struct ifla_vf_info *ivi)
2586{
2587 struct bnx2x *bp = netdev_priv(dev);
5ae30d78
AE
2588 struct bnx2x_virtf *vf = NULL;
2589 struct pf_vf_bulletin_content *bulletin = NULL;
2590 struct bnx2x_vlan_mac_obj *mac_obj;
2591 struct bnx2x_vlan_mac_obj *vlan_obj;
3ec9f9ca
AE
2592 int rc;
2593
5ae30d78 2594 /* sanity and init */
6495d15a 2595 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
3ec9f9ca
AE
2596 if (rc)
2597 return rc;
6495d15a 2598
b9871bcf
AE
2599 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2600 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
5ae30d78 2601 if (!mac_obj || !vlan_obj) {
3c76feff
AE
2602 BNX2X_ERR("VF partially initialized\n");
2603 return -EINVAL;
2604 }
3ec9f9ca
AE
2605
2606 ivi->vf = vfidx;
2607 ivi->qos = 0;
ed616689
SC
2608 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2609 ivi->min_tx_rate = 0;
3ec9f9ca
AE
2610 ivi->spoofchk = 1; /*always enabled */
2611 if (vf->state == VF_ENABLED) {
2612 /* mac and vlan are in vlan_mac objects */
3a3534ec 2613 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
b9871bcf
AE
2614 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2615 0, ETH_ALEN);
b9871bcf
AE
2616 vlan_obj->get_n_elements(bp, vlan_obj, 1,
2617 (u8 *)&ivi->vlan, 0,
2618 VLAN_HLEN);
3a3534ec 2619 }
3ec9f9ca 2620 } else {
6495d15a 2621 mutex_lock(&bp->vfdb->bulletin_mutex);
3ec9f9ca
AE
2622 /* mac */
2623 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2624 /* mac configured by ndo so its in bulletin board */
2625 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2626 else
16a5fd92 2627 /* function has not been loaded yet. Show mac as 0s */
c7bf7169 2628 eth_zero_addr(ivi->mac);
3ec9f9ca
AE
2629
2630 /* vlan */
2631 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2632 /* vlan configured by ndo so its in bulletin board */
2633 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2634 else
16a5fd92 2635 /* function has not been loaded yet. Show vlans as 0s */
3ec9f9ca 2636 memset(&ivi->vlan, 0, VLAN_HLEN);
6495d15a
DK
2637
2638 mutex_unlock(&bp->vfdb->bulletin_mutex);
3ec9f9ca
AE
2639 }
2640
2641 return 0;
2642}
2643
6411280a
AE
2644/* New mac for VF. Consider these cases:
2645 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2646 * supply at acquire.
2647 * 2. VF has already been acquired but has not yet initialized - store in local
2648 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
2649 * will configure this mac when it is ready.
2650 * 3. VF has already initialized but has not yet setup a queue - post the new
2651 * mac on VF's bulletin board right now. VF will configure this mac when it
2652 * is ready.
2653 * 4. VF has already set a queue - delete any macs already configured for this
2654 * queue and manually config the new mac.
2655 * In any event, once this function has been called refuse any attempts by the
2656 * VF to configure any mac for itself except for this mac. In case of a race
2657 * where the VF fails to see the new post on its bulletin board before sending a
2658 * mac configuration request, the PF will simply fail the request and VF can try
3ec9f9ca 2659 * again after consulting its bulletin board.
6411280a 2660 */
3ec9f9ca 2661int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
6411280a
AE
2662{
2663 struct bnx2x *bp = netdev_priv(dev);
3ec9f9ca 2664 int rc, q_logical_state;
5ae30d78
AE
2665 struct bnx2x_virtf *vf = NULL;
2666 struct pf_vf_bulletin_content *bulletin = NULL;
6411280a 2667
6411280a
AE
2668 if (!is_valid_ether_addr(mac)) {
2669 BNX2X_ERR("mac address invalid\n");
2670 return -EINVAL;
2671 }
2672
6495d15a
DK
2673 /* sanity and init */
2674 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2675 if (rc)
2676 return rc;
2677
2678 mutex_lock(&bp->vfdb->bulletin_mutex);
2679
16a5fd92 2680 /* update PF's copy of the VF's bulletin. Will no longer accept mac
6411280a
AE
2681 * configuration requests from vf unless match this mac
2682 */
2683 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2684 memcpy(bulletin->mac, mac, ETH_ALEN);
2685
2686 /* Post update on VF's bulletin board */
2687 rc = bnx2x_post_vf_bulletin(bp, vfidx);
6495d15a
DK
2688
2689 /* release lock before checking return code */
2690 mutex_unlock(&bp->vfdb->bulletin_mutex);
2691
6411280a
AE
2692 if (rc) {
2693 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2694 return rc;
2695 }
2696
6411280a 2697 q_logical_state =
b9871bcf 2698 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
6411280a
AE
2699 if (vf->state == VF_ENABLED &&
2700 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2701 /* configure the mac in device on this vf's queue */
3ec9f9ca 2702 unsigned long ramrod_flags = 0;
3a3534ec 2703 struct bnx2x_vlan_mac_obj *mac_obj;
b9871bcf 2704
3a3534ec
YM
2705 /* User should be able to see failure reason in system logs */
2706 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2707 return -EINVAL;
6411280a
AE
2708
2709 /* must lock vfpf channel to protect against vf flows */
2710 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2711
2712 /* remove existing eth macs */
3a3534ec 2713 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
6411280a
AE
2714 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2715 if (rc) {
2716 BNX2X_ERR("failed to delete eth macs\n");
31329afd
AE
2717 rc = -EINVAL;
2718 goto out;
6411280a
AE
2719 }
2720
2721 /* remove existing uc list macs */
2722 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2723 if (rc) {
2724 BNX2X_ERR("failed to delete uc_list macs\n");
31329afd
AE
2725 rc = -EINVAL;
2726 goto out;
6411280a
AE
2727 }
2728
2729 /* configure the new mac to device */
3ec9f9ca 2730 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
6411280a 2731 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3ec9f9ca 2732 BNX2X_ETH_MAC, &ramrod_flags);
6411280a 2733
31329afd 2734out:
6411280a
AE
2735 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2736 }
2737
02948344 2738 return rc;
3ec9f9ca
AE
2739}
2740
05cc5a39
YM
2741static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2742 struct bnx2x_virtf *vf, bool accept)
2743{
2744 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2745 unsigned long accept_flags;
2746
2747 /* need to remove/add the VF's accept_any_vlan bit */
2748 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2749 if (accept)
2750 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2751 else
2752 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2753
2754 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2755 accept_flags);
2756 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2757 bnx2x_config_rx_mode(bp, &rx_ramrod);
2758}
2759
2760static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2761 u16 vlan, bool add)
3ec9f9ca 2762{
e8379c79 2763 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
05cc5a39
YM
2764 unsigned long ramrod_flags = 0;
2765 int rc = 0;
2766
2767 /* configure the new vlan to device */
2768 memset(&ramrod_param, 0, sizeof(ramrod_param));
2769 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2770 ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2771 ramrod_param.ramrod_flags = ramrod_flags;
2772 ramrod_param.user_req.u.vlan.vlan = vlan;
2773 ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2774 : BNX2X_VLAN_MAC_DEL;
2775 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2776 if (rc) {
2777 BNX2X_ERR("failed to configure vlan\n");
2778 return -EINVAL;
2779 }
2780
2781 return 0;
2782}
2783
2784int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
2785{
e8379c79 2786 struct pf_vf_bulletin_content *bulletin = NULL;
3ec9f9ca 2787 struct bnx2x *bp = netdev_priv(dev);
e8379c79
YM
2788 struct bnx2x_vlan_mac_obj *vlan_obj;
2789 unsigned long vlan_mac_flags = 0;
2790 unsigned long ramrod_flags = 0;
5ae30d78 2791 struct bnx2x_virtf *vf = NULL;
05cc5a39 2792 int i, rc;
3ec9f9ca 2793
3ec9f9ca
AE
2794 if (vlan > 4095) {
2795 BNX2X_ERR("illegal vlan value %d\n", vlan);
2796 return -EINVAL;
2797 }
2798
2799 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2800 vfidx, vlan, 0);
2801
6495d15a
DK
2802 /* sanity and init */
2803 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2804 if (rc)
2805 return rc;
2806
3ec9f9ca
AE
2807 /* update PF's copy of the VF's bulletin. No point in posting the vlan
2808 * to the VF since it doesn't have anything to do with it. But it useful
2809 * to store it here in case the VF is not up yet and we can only
e8379c79
YM
2810 * configure the vlan later when it does. Treat vlan id 0 as remove the
2811 * Host tag.
3ec9f9ca 2812 */
6495d15a
DK
2813 mutex_lock(&bp->vfdb->bulletin_mutex);
2814
e8379c79
YM
2815 if (vlan > 0)
2816 bulletin->valid_bitmap |= 1 << VLAN_VALID;
2817 else
2818 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3ec9f9ca
AE
2819 bulletin->vlan = vlan;
2820
05cc5a39
YM
2821 /* Post update on VF's bulletin board */
2822 rc = bnx2x_post_vf_bulletin(bp, vfidx);
2823 if (rc)
2824 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
6495d15a
DK
2825 mutex_unlock(&bp->vfdb->bulletin_mutex);
2826
3ec9f9ca 2827 /* is vf initialized and queue set up? */
e8379c79
YM
2828 if (vf->state != VF_ENABLED ||
2829 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2830 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2831 return rc;
3ec9f9ca 2832
3a3534ec
YM
2833 /* User should be able to see error in system logs */
2834 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2835 return -EINVAL;
3ec9f9ca 2836
e8379c79
YM
2837 /* must lock vfpf channel to protect against vf flows */
2838 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3ec9f9ca 2839
e8379c79
YM
2840 /* remove existing vlans */
2841 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3a3534ec 2842 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
e8379c79
YM
2843 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2844 &ramrod_flags);
2845 if (rc) {
2846 BNX2X_ERR("failed to delete vlans\n");
2847 rc = -EINVAL;
2848 goto out;
2849 }
2850
05cc5a39
YM
2851 /* clear accept_any_vlan when HV forces vlan, otherwise
2852 * according to VF capabilities
2853 */
2854 if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2855 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
e8379c79 2856
05cc5a39
YM
2857 rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2858 if (rc)
e8379c79 2859 goto out;
3ec9f9ca 2860
05cc5a39
YM
2861 /* send queue update ramrods to configure default vlan and
2862 * silent vlan removal
e8379c79 2863 */
05cc5a39
YM
2864 for_each_vfq(vf, i) {
2865 struct bnx2x_queue_state_params q_params = {NULL};
2866 struct bnx2x_queue_update_params *update_params;
2867
2868 q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2869
2870 /* validate the Q is UP */
2871 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2872 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2873 continue;
2874
2875 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2876 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2877 update_params = &q_params.params.update;
2878 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3ec9f9ca 2879 &update_params->update_flags);
05cc5a39 2880 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3ec9f9ca 2881 &update_params->update_flags);
05cc5a39
YM
2882 if (vlan == 0) {
2883 /* if vlan is 0 then we want to leave the VF traffic
2884 * untagged, and leave the incoming traffic untouched
2885 * (i.e. do not remove any vlan tags).
2886 */
2887 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2888 &update_params->update_flags);
2889 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2890 &update_params->update_flags);
2891 } else {
2892 /* configure default vlan to vf queue and set silent
2893 * vlan removal (the vf remains unaware of this vlan).
2894 */
2895 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2896 &update_params->update_flags);
2897 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2898 &update_params->update_flags);
2899 update_params->def_vlan = vlan;
2900 update_params->silent_removal_value =
2901 vlan & VLAN_VID_MASK;
2902 update_params->silent_removal_mask = VLAN_VID_MASK;
2903 }
3ec9f9ca 2904
05cc5a39
YM
2905 /* Update the Queue state */
2906 rc = bnx2x_queue_state_change(bp, &q_params);
2907 if (rc) {
2908 BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2909 i);
2910 goto out;
2911 }
e8379c79 2912 }
31329afd 2913out:
e8379c79
YM
2914 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2915
05cc5a39
YM
2916 if (rc)
2917 DP(BNX2X_MSG_IOV,
2918 "updated VF[%d] vlan configuration (vlan = %d)\n",
2919 vfidx, vlan);
2920
31329afd 2921 return rc;
6411280a
AE
2922}
2923
16a5fd92
YM
2924/* crc is the first field in the bulletin board. Compute the crc over the
2925 * entire bulletin board excluding the crc field itself. Use the length field
2926 * as the Bulletin Board was posted by a PF with possibly a different version
2927 * from the vf which will sample it. Therefore, the length is computed by the
6495d15a 2928 * PF and then used blindly by the VF.
6411280a 2929 */
6495d15a 2930u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
6411280a
AE
2931{
2932 return crc32(BULLETIN_CRC_SEED,
2933 ((u8 *)bulletin) + sizeof(bulletin->crc),
4c133c39 2934 bulletin->length - sizeof(bulletin->crc));
6411280a
AE
2935}
2936
2937/* Check for new posts on the bulletin board */
2938enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2939{
6495d15a 2940 struct pf_vf_bulletin_content *bulletin;
6411280a
AE
2941 int attempts;
2942
6495d15a
DK
2943 /* sampling structure in mid post may result with corrupted data
2944 * validate crc to ensure coherency.
2945 */
2946 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2947 u32 crc;
6411280a 2948
6495d15a
DK
2949 /* sample the bulletin board */
2950 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
2951 sizeof(union pf_vf_bulletin));
2952
2953 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
2954
2955 if (bp->shadow_bulletin.content.crc == crc)
2956 break;
2957
2958 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2959 bp->shadow_bulletin.content.crc, crc);
2960 }
2961
2962 if (attempts >= BULLETIN_ATTEMPTS) {
2963 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
2964 attempts);
2965 return PFVF_BULLETIN_CRC_ERR;
6411280a 2966 }
6495d15a
DK
2967 bulletin = &bp->shadow_bulletin.content;
2968
2969 /* bulletin board hasn't changed since last sample */
2970 if (bp->old_bulletin.version == bulletin->version)
2971 return PFVF_BULLETIN_UNCHANGED;
6411280a
AE
2972
2973 /* the mac address in bulletin board is valid and is new */
6495d15a
DK
2974 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
2975 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
6411280a 2976 /* update new mac to net device */
6495d15a
DK
2977 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
2978 }
2979
2980 if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
2981 DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
2982 bulletin->link_speed, bulletin->link_flags);
2983
2984 bp->vf_link_vars.line_speed = bulletin->link_speed;
2985 bp->vf_link_vars.link_report_flags = 0;
2986 /* Link is down */
2987 if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
2988 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2989 &bp->vf_link_vars.link_report_flags);
2990 /* Full DUPLEX */
2991 if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
2992 __set_bit(BNX2X_LINK_REPORT_FD,
2993 &bp->vf_link_vars.link_report_flags);
2994 /* Rx Flow Control is ON */
2995 if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
2996 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
2997 &bp->vf_link_vars.link_report_flags);
2998 /* Tx Flow Control is ON */
2999 if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3000 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3001 &bp->vf_link_vars.link_report_flags);
3002 __bnx2x_link_report(bp);
6411280a
AE
3003 }
3004
3005 /* copy new bulletin board to bp */
6495d15a
DK
3006 memcpy(&bp->old_bulletin, bulletin,
3007 sizeof(struct pf_vf_bulletin_content));
6411280a
AE
3008
3009 return PFVF_BULLETIN_UPDATED;
3010}
3011
37173488
YM
3012void bnx2x_timer_sriov(struct bnx2x *bp)
3013{
3014 bnx2x_sample_bulletin(bp);
3015
3016 /* if channel is down we need to self destruct */
230bb0f3
YM
3017 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3018 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3019 BNX2X_MSG_IOV);
37173488
YM
3020}
3021
1d6f3cd8 3022void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
6411280a
AE
3023{
3024 /* vf doorbells are embedded within the regview */
1d6f3cd8 3025 return bp->regview + PXP_VF_ADDR_DB_START;
6411280a
AE
3026}
3027
e2a367f8
YM
3028void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3029{
3030 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3031 sizeof(struct bnx2x_vf_mbx_msg));
3032 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3033 sizeof(union pf_vf_bulletin));
3034}
3035
6411280a
AE
3036int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3037{
8b49a4c7
DK
3038 mutex_init(&bp->vf2pf_mutex);
3039
6411280a 3040 /* allocate vf2pf mailbox for vf to pf channel */
cd2b0389
JP
3041 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3042 sizeof(struct bnx2x_vf_mbx_msg));
3043 if (!bp->vf2pf_mbox)
3044 goto alloc_mem_err;
6411280a
AE
3045
3046 /* allocate pf 2 vf bulletin board */
cd2b0389
JP
3047 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3048 sizeof(union pf_vf_bulletin));
3049 if (!bp->pf2vf_bulletin)
3050 goto alloc_mem_err;
6411280a 3051
6495d15a
DK
3052 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3053
6411280a
AE
3054 return 0;
3055
3056alloc_mem_err:
e2a367f8 3057 bnx2x_vf_pci_dealloc(bp);
6411280a
AE
3058 return -ENOMEM;
3059}
3c76feff 3060
78c3bcc5
AE
3061void bnx2x_iov_channel_down(struct bnx2x *bp)
3062{
3063 int vf_idx;
3064 struct pf_vf_bulletin_content *bulletin;
3065
3066 if (!IS_SRIOV(bp))
3067 return;
3068
3069 for_each_vf(bp, vf_idx) {
3070 /* locate this VFs bulletin board and update the channel down
3071 * bit
3072 */
3073 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3074 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3075
3076 /* update vf bulletin board */
3077 bnx2x_post_vf_bulletin(bp, vf_idx);
3078 }
3079}
370d4a26
YM
3080
3081void bnx2x_iov_task(struct work_struct *work)
3082{
3083 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3084
3085 if (!netif_running(bp->dev))
3086 return;
3087
3088 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3089 &bp->iov_task_state))
3090 bnx2x_vf_handle_flr_event(bp);
3091
370d4a26
YM
3092 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3093 &bp->iov_task_state))
3094 bnx2x_vf_mbx(bp);
3095}
3096
3097void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3098{
4e857c58 3099 smp_mb__before_atomic();
370d4a26 3100 set_bit(flag, &bp->iov_task_state);
4e857c58 3101 smp_mb__after_atomic();
370d4a26
YM
3102 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3103 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3104}
This page took 0.61441 seconds and 5 git commands to generate.