Merge tag 'regulator-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_vfpf.c
1 /* bnx2x_vfpf.c: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2013 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
18 */
19
20 #include "bnx2x.h"
21 #include "bnx2x_cmn.h"
22 #include <linux/crc32.h>
23
24 /* place a given tlv on the tlv buffer at a given offset */
25 void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
26 u16 length)
27 {
28 struct channel_tlv *tl =
29 (struct channel_tlv *)(tlvs_list + offset);
30
31 tl->type = type;
32 tl->length = length;
33 }
34
35 /* Clear the mailbox and init the header of the first tlv */
36 void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
37 u16 type, u16 length)
38 {
39 mutex_lock(&bp->vf2pf_mutex);
40
41 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
42 type);
43
44 /* Clear mailbox */
45 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
46
47 /* init type and length */
48 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
49
50 /* init first tlv header */
51 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
52 }
53
54 /* releases the mailbox */
55 void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
56 {
57 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
58 first_tlv->tl.type);
59
60 mutex_unlock(&bp->vf2pf_mutex);
61 }
62
63 /* list the types and lengths of the tlvs on the buffer */
64 void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
65 {
66 int i = 1;
67 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
68
69 while (tlv->type != CHANNEL_TLV_LIST_END) {
70 /* output tlv */
71 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
72 tlv->type, tlv->length);
73
74 /* advance to next tlv */
75 tlvs_list += tlv->length;
76
77 /* cast general tlv list pointer to channel tlv header*/
78 tlv = (struct channel_tlv *)tlvs_list;
79
80 i++;
81
82 /* break condition for this loop */
83 if (i > MAX_TLVS_IN_LIST) {
84 WARN(true, "corrupt tlvs");
85 return;
86 }
87 }
88
89 /* output last tlv */
90 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
91 tlv->type, tlv->length);
92 }
93
94 /* test whether we support a tlv type */
95 bool bnx2x_tlv_supported(u16 tlvtype)
96 {
97 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
98 }
99
100 static inline int bnx2x_pfvf_status_codes(int rc)
101 {
102 switch (rc) {
103 case 0:
104 return PFVF_STATUS_SUCCESS;
105 case -ENOMEM:
106 return PFVF_STATUS_NO_RESOURCE;
107 default:
108 return PFVF_STATUS_FAILURE;
109 }
110 }
111
112 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
113 {
114 struct cstorm_vf_zone_data __iomem *zone_data =
115 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
116 int tout = 100, interval = 100; /* wait for 10 seconds */
117
118 if (*done) {
119 BNX2X_ERR("done was non zero before message to pf was sent\n");
120 WARN_ON(true);
121 return -EINVAL;
122 }
123
124 /* if PF indicated channel is down avoid sending message. Return success
125 * so calling flow can continue
126 */
127 bnx2x_sample_bulletin(bp);
128 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
129 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
130 *done = PFVF_STATUS_SUCCESS;
131 return 0;
132 }
133
134 /* Write message address */
135 writel(U64_LO(msg_mapping),
136 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
137 writel(U64_HI(msg_mapping),
138 &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
139
140 /* make sure the address is written before FW accesses it */
141 wmb();
142
143 /* Trigger the PF FW */
144 writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
145
146 /* Wait for PF to complete */
147 while ((tout >= 0) && (!*done)) {
148 msleep(interval);
149 tout -= 1;
150
151 /* progress indicator - HV can take its own sweet time in
152 * answering VFs...
153 */
154 DP_CONT(BNX2X_MSG_IOV, ".");
155 }
156
157 if (!*done) {
158 BNX2X_ERR("PF response has timed out\n");
159 return -EAGAIN;
160 }
161 DP(BNX2X_MSG_SP, "Got a response from PF\n");
162 return 0;
163 }
164
165 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
166 {
167 u32 me_reg;
168 int tout = 10, interval = 100; /* Wait for 1 sec */
169
170 do {
171 /* pxp traps vf read of doorbells and returns me reg value */
172 me_reg = readl(bp->doorbells);
173 if (GOOD_ME_REG(me_reg))
174 break;
175
176 msleep(interval);
177
178 BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
179 me_reg);
180 } while (tout-- > 0);
181
182 if (!GOOD_ME_REG(me_reg)) {
183 BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
184 return -EINVAL;
185 }
186
187 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
188
189 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
190
191 return 0;
192 }
193
194 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
195 {
196 int rc = 0, attempts = 0;
197 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
198 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
199 u32 vf_id;
200 bool resources_acquired = false;
201
202 /* clear mailbox and prep first tlv */
203 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
204
205 if (bnx2x_get_vf_id(bp, &vf_id)) {
206 rc = -EAGAIN;
207 goto out;
208 }
209
210 req->vfdev_info.vf_id = vf_id;
211 req->vfdev_info.vf_os = 0;
212
213 req->resc_request.num_rxqs = rx_count;
214 req->resc_request.num_txqs = tx_count;
215 req->resc_request.num_sbs = bp->igu_sb_cnt;
216 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
217 req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
218
219 /* pf 2 vf bulletin board address */
220 req->bulletin_addr = bp->pf2vf_bulletin_mapping;
221
222 /* add list termination tlv */
223 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
224 sizeof(struct channel_list_end_tlv));
225
226 /* output tlvs list */
227 bnx2x_dp_tlv_list(bp, req);
228
229 while (!resources_acquired) {
230 DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
231
232 /* send acquire request */
233 rc = bnx2x_send_msg2pf(bp,
234 &resp->hdr.status,
235 bp->vf2pf_mbox_mapping);
236
237 /* PF timeout */
238 if (rc)
239 goto out;
240
241 /* copy acquire response from buffer to bp */
242 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
243
244 attempts++;
245
246 /* test whether the PF accepted our request. If not, humble
247 * the request and try again.
248 */
249 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
250 DP(BNX2X_MSG_SP, "resources acquired\n");
251 resources_acquired = true;
252 } else if (bp->acquire_resp.hdr.status ==
253 PFVF_STATUS_NO_RESOURCE &&
254 attempts < VF_ACQUIRE_THRESH) {
255 DP(BNX2X_MSG_SP,
256 "PF unwilling to fulfill resource request. Try PF recommended amount\n");
257
258 /* humble our request */
259 req->resc_request.num_txqs =
260 min(req->resc_request.num_txqs,
261 bp->acquire_resp.resc.num_txqs);
262 req->resc_request.num_rxqs =
263 min(req->resc_request.num_rxqs,
264 bp->acquire_resp.resc.num_rxqs);
265 req->resc_request.num_sbs =
266 min(req->resc_request.num_sbs,
267 bp->acquire_resp.resc.num_sbs);
268 req->resc_request.num_mac_filters =
269 min(req->resc_request.num_mac_filters,
270 bp->acquire_resp.resc.num_mac_filters);
271 req->resc_request.num_vlan_filters =
272 min(req->resc_request.num_vlan_filters,
273 bp->acquire_resp.resc.num_vlan_filters);
274 req->resc_request.num_mc_filters =
275 min(req->resc_request.num_mc_filters,
276 bp->acquire_resp.resc.num_mc_filters);
277
278 /* Clear response buffer */
279 memset(&bp->vf2pf_mbox->resp, 0,
280 sizeof(union pfvf_tlvs));
281 } else {
282 /* PF reports error */
283 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
284 bp->acquire_resp.hdr.status);
285 rc = -EAGAIN;
286 goto out;
287 }
288 }
289
290 /* get HW info */
291 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
292 bp->link_params.chip_id = bp->common.chip_id;
293 bp->db_size = bp->acquire_resp.pfdev_info.db_size;
294 bp->common.int_block = INT_BLOCK_IGU;
295 bp->common.chip_port_mode = CHIP_2_PORT_MODE;
296 bp->igu_dsb_id = -1;
297 bp->mf_ov = 0;
298 bp->mf_mode = 0;
299 bp->common.flash_size = 0;
300 bp->flags |=
301 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
302 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
303 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
304 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
305 sizeof(bp->fw_ver));
306
307 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
308 memcpy(bp->dev->dev_addr,
309 bp->acquire_resp.resc.current_mac_addr,
310 ETH_ALEN);
311
312 out:
313 bnx2x_vfpf_finalize(bp, &req->first_tlv);
314 return rc;
315 }
316
317 int bnx2x_vfpf_release(struct bnx2x *bp)
318 {
319 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
320 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
321 u32 rc, vf_id;
322
323 /* clear mailbox and prep first tlv */
324 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
325
326 if (bnx2x_get_vf_id(bp, &vf_id)) {
327 rc = -EAGAIN;
328 goto out;
329 }
330
331 req->vf_id = vf_id;
332
333 /* add list termination tlv */
334 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
335 sizeof(struct channel_list_end_tlv));
336
337 /* output tlvs list */
338 bnx2x_dp_tlv_list(bp, req);
339
340 /* send release request */
341 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
342
343 if (rc)
344 /* PF timeout */
345 goto out;
346
347 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
348 /* PF released us */
349 DP(BNX2X_MSG_SP, "vf released\n");
350 } else {
351 /* PF reports error */
352 BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
353 resp->hdr.status);
354 rc = -EAGAIN;
355 goto out;
356 }
357 out:
358 bnx2x_vfpf_finalize(bp, &req->first_tlv);
359
360 return rc;
361 }
362
363 /* Tell PF about SB addresses */
364 int bnx2x_vfpf_init(struct bnx2x *bp)
365 {
366 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
367 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
368 int rc, i;
369
370 /* clear mailbox and prep first tlv */
371 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
372
373 /* status blocks */
374 for_each_eth_queue(bp, i)
375 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
376 status_blk_mapping);
377
378 /* statistics - requests only supports single queue for now */
379 req->stats_addr = bp->fw_stats_data_mapping +
380 offsetof(struct bnx2x_fw_stats_data, queue_stats);
381
382 req->stats_stride = sizeof(struct per_queue_stats);
383
384 /* add list termination tlv */
385 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
386 sizeof(struct channel_list_end_tlv));
387
388 /* output tlvs list */
389 bnx2x_dp_tlv_list(bp, req);
390
391 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
392 if (rc)
393 goto out;
394
395 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
396 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
397 resp->hdr.status);
398 rc = -EAGAIN;
399 goto out;
400 }
401
402 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
403 out:
404 bnx2x_vfpf_finalize(bp, &req->first_tlv);
405
406 return rc;
407 }
408
409 /* CLOSE VF - opposite to INIT_VF */
410 void bnx2x_vfpf_close_vf(struct bnx2x *bp)
411 {
412 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
413 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
414 int i, rc;
415 u32 vf_id;
416
417 /* If we haven't got a valid VF id, there is no sense to
418 * continue with sending messages
419 */
420 if (bnx2x_get_vf_id(bp, &vf_id))
421 goto free_irq;
422
423 /* Close the queues */
424 for_each_queue(bp, i)
425 bnx2x_vfpf_teardown_queue(bp, i);
426
427 /* remove mac */
428 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
429
430 /* clear mailbox and prep first tlv */
431 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
432
433 req->vf_id = vf_id;
434
435 /* add list termination tlv */
436 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
437 sizeof(struct channel_list_end_tlv));
438
439 /* output tlvs list */
440 bnx2x_dp_tlv_list(bp, req);
441
442 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
443
444 if (rc)
445 BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
446
447 else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
448 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
449 resp->hdr.status);
450
451 bnx2x_vfpf_finalize(bp, &req->first_tlv);
452
453 free_irq:
454 /* Disable HW interrupts, NAPI */
455 bnx2x_netif_stop(bp, 0);
456 /* Delete all NAPI objects */
457 bnx2x_del_all_napi(bp);
458
459 /* Release IRQs */
460 bnx2x_free_irq(bp);
461 }
462
463 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
464 struct bnx2x_vf_queue *q)
465 {
466 u8 cl_id = vfq_cl_id(vf, q);
467 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
468
469 /* mac */
470 bnx2x_init_mac_obj(bp, &q->mac_obj,
471 cl_id, q->cid, func_id,
472 bnx2x_vf_sp(bp, vf, mac_rdata),
473 bnx2x_vf_sp_map(bp, vf, mac_rdata),
474 BNX2X_FILTER_MAC_PENDING,
475 &vf->filter_state,
476 BNX2X_OBJ_TYPE_RX_TX,
477 &bp->macs_pool);
478 /* vlan */
479 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
480 cl_id, q->cid, func_id,
481 bnx2x_vf_sp(bp, vf, vlan_rdata),
482 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
483 BNX2X_FILTER_VLAN_PENDING,
484 &vf->filter_state,
485 BNX2X_OBJ_TYPE_RX_TX,
486 &bp->vlans_pool);
487
488 /* mcast */
489 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
490 q->cid, func_id, func_id,
491 bnx2x_vf_sp(bp, vf, mcast_rdata),
492 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
493 BNX2X_FILTER_MCAST_PENDING,
494 &vf->filter_state,
495 BNX2X_OBJ_TYPE_RX_TX);
496
497 /* rss */
498 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
499 func_id, func_id,
500 bnx2x_vf_sp(bp, vf, rss_rdata),
501 bnx2x_vf_sp_map(bp, vf, rss_rdata),
502 BNX2X_FILTER_RSS_CONF_PENDING,
503 &vf->filter_state,
504 BNX2X_OBJ_TYPE_RX_TX);
505
506 vf->leading_rss = cl_id;
507 q->is_leading = true;
508 }
509
510 /* ask the pf to open a queue for the vf */
511 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
512 bool is_leading)
513 {
514 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
515 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
516 u8 fp_idx = fp->index;
517 u16 tpa_agg_size = 0, flags = 0;
518 int rc;
519
520 /* clear mailbox and prep first tlv */
521 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
522
523 /* select tpa mode to request */
524 if (!fp->disable_tpa) {
525 flags |= VFPF_QUEUE_FLG_TPA;
526 flags |= VFPF_QUEUE_FLG_TPA_IPV6;
527 if (fp->mode == TPA_MODE_GRO)
528 flags |= VFPF_QUEUE_FLG_TPA_GRO;
529 tpa_agg_size = TPA_AGG_SIZE;
530 }
531
532 if (is_leading)
533 flags |= VFPF_QUEUE_FLG_LEADING_RSS;
534
535 /* calculate queue flags */
536 flags |= VFPF_QUEUE_FLG_STATS;
537 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
538 flags |= VFPF_QUEUE_FLG_VLAN;
539 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
540
541 /* Common */
542 req->vf_qid = fp_idx;
543 req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
544
545 /* Rx */
546 req->rxq.rcq_addr = fp->rx_comp_mapping;
547 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
548 req->rxq.rxq_addr = fp->rx_desc_mapping;
549 req->rxq.sge_addr = fp->rx_sge_mapping;
550 req->rxq.vf_sb = fp_idx;
551 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
552 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
553 req->rxq.mtu = bp->dev->mtu;
554 req->rxq.buf_sz = fp->rx_buf_size;
555 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
556 req->rxq.tpa_agg_sz = tpa_agg_size;
557 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
558 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
559 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
560 req->rxq.flags = flags;
561 req->rxq.drop_flags = 0;
562 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
563 req->rxq.stat_id = -1; /* No stats at the moment */
564
565 /* Tx */
566 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
567 req->txq.vf_sb = fp_idx;
568 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
569 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
570 req->txq.flags = flags;
571 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
572
573 /* add list termination tlv */
574 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
575 sizeof(struct channel_list_end_tlv));
576
577 /* output tlvs list */
578 bnx2x_dp_tlv_list(bp, req);
579
580 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
581 if (rc)
582 BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
583 fp_idx);
584
585 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
586 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
587 fp_idx, resp->hdr.status);
588 rc = -EINVAL;
589 }
590
591 bnx2x_vfpf_finalize(bp, &req->first_tlv);
592
593 return rc;
594 }
595
596 int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
597 {
598 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
599 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
600 int rc;
601
602 /* clear mailbox and prep first tlv */
603 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
604 sizeof(*req));
605
606 req->vf_qid = qidx;
607
608 /* add list termination tlv */
609 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
610 sizeof(struct channel_list_end_tlv));
611
612 /* output tlvs list */
613 bnx2x_dp_tlv_list(bp, req);
614
615 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
616
617 if (rc) {
618 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
619 rc);
620 goto out;
621 }
622
623 /* PF failed the transaction */
624 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
625 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
626 resp->hdr.status);
627 rc = -EINVAL;
628 }
629
630 out:
631 bnx2x_vfpf_finalize(bp, &req->first_tlv);
632 return rc;
633 }
634
635 /* request pf to add a mac for the vf */
636 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
637 {
638 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
639 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
640 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
641 int rc = 0;
642
643 /* clear mailbox and prep first tlv */
644 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
645 sizeof(*req));
646
647 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
648 req->vf_qid = vf_qid;
649 req->n_mac_vlan_filters = 1;
650
651 req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
652 if (set)
653 req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
654
655 /* sample bulletin board for new mac */
656 bnx2x_sample_bulletin(bp);
657
658 /* copy mac from device to request */
659 memcpy(req->filters[0].mac, addr, ETH_ALEN);
660
661 /* add list termination tlv */
662 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
663 sizeof(struct channel_list_end_tlv));
664
665 /* output tlvs list */
666 bnx2x_dp_tlv_list(bp, req);
667
668 /* send message to pf */
669 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
670 if (rc) {
671 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
672 goto out;
673 }
674
675 /* failure may mean PF was configured with a new mac for us */
676 while (resp->hdr.status == PFVF_STATUS_FAILURE) {
677 DP(BNX2X_MSG_IOV,
678 "vfpf SET MAC failed. Check bulletin board for new posts\n");
679
680 /* copy mac from bulletin to device */
681 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
682
683 /* check if bulletin board was updated */
684 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
685 /* copy mac from device to request */
686 memcpy(req->filters[0].mac, bp->dev->dev_addr,
687 ETH_ALEN);
688
689 /* send message to pf */
690 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
691 bp->vf2pf_mbox_mapping);
692 } else {
693 /* no new info in bulletin */
694 break;
695 }
696 }
697
698 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
699 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
700 rc = -EINVAL;
701 }
702 out:
703 bnx2x_vfpf_finalize(bp, &req->first_tlv);
704
705 return 0;
706 }
707
708 /* request pf to config rss table for vf queues*/
709 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
710 struct bnx2x_config_rss_params *params)
711 {
712 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
713 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
714 int rc = 0;
715
716 /* clear mailbox and prep first tlv */
717 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
718 sizeof(*req));
719
720 /* add list termination tlv */
721 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
722 sizeof(struct channel_list_end_tlv));
723
724 memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
725 memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
726 req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
727 req->rss_key_size = T_ETH_RSS_KEY;
728 req->rss_result_mask = params->rss_result_mask;
729
730 /* flags handled individually for backward/forward compatability */
731 if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
732 req->rss_flags |= VFPF_RSS_MODE_DISABLED;
733 if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
734 req->rss_flags |= VFPF_RSS_MODE_REGULAR;
735 if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
736 req->rss_flags |= VFPF_RSS_SET_SRCH;
737 if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
738 req->rss_flags |= VFPF_RSS_IPV4;
739 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
740 req->rss_flags |= VFPF_RSS_IPV4_TCP;
741 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
742 req->rss_flags |= VFPF_RSS_IPV4_UDP;
743 if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
744 req->rss_flags |= VFPF_RSS_IPV6;
745 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
746 req->rss_flags |= VFPF_RSS_IPV6_TCP;
747 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
748 req->rss_flags |= VFPF_RSS_IPV6_UDP;
749
750 DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
751
752 /* output tlvs list */
753 bnx2x_dp_tlv_list(bp, req);
754
755 /* send message to pf */
756 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
757 if (rc) {
758 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
759 goto out;
760 }
761
762 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
763 BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
764 resp->hdr.status);
765 rc = -EINVAL;
766 }
767 out:
768 bnx2x_vfpf_finalize(bp, &req->first_tlv);
769
770 return 0;
771 }
772
773 int bnx2x_vfpf_set_mcast(struct net_device *dev)
774 {
775 struct bnx2x *bp = netdev_priv(dev);
776 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
777 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
778 int rc, i = 0;
779 struct netdev_hw_addr *ha;
780
781 if (bp->state != BNX2X_STATE_OPEN) {
782 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
783 return -EINVAL;
784 }
785
786 /* clear mailbox and prep first tlv */
787 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
788 sizeof(*req));
789
790 /* Get Rx mode requested */
791 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
792
793 netdev_for_each_mc_addr(ha, dev) {
794 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
795 bnx2x_mc_addr(ha));
796 memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
797 i++;
798 }
799
800 /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
801 * addresses tops
802 */
803 if (i >= PFVF_MAX_MULTICAST_PER_VF) {
804 DP(NETIF_MSG_IFUP,
805 "VF supports not more than %d multicast MAC addresses\n",
806 PFVF_MAX_MULTICAST_PER_VF);
807 return -EINVAL;
808 }
809
810 req->n_multicast = i;
811 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
812 req->vf_qid = 0;
813
814 /* add list termination tlv */
815 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
816 sizeof(struct channel_list_end_tlv));
817
818 /* output tlvs list */
819 bnx2x_dp_tlv_list(bp, req);
820 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
821 if (rc) {
822 BNX2X_ERR("Sending a message failed: %d\n", rc);
823 goto out;
824 }
825
826 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
827 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
828 resp->hdr.status);
829 rc = -EINVAL;
830 }
831 out:
832 bnx2x_vfpf_finalize(bp, &req->first_tlv);
833
834 return 0;
835 }
836
837 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
838 {
839 int mode = bp->rx_mode;
840 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
841 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
842 int rc;
843
844 /* clear mailbox and prep first tlv */
845 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
846 sizeof(*req));
847
848 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
849
850 switch (mode) {
851 case BNX2X_RX_MODE_NONE: /* no Rx */
852 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
853 break;
854 case BNX2X_RX_MODE_NORMAL:
855 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
856 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
857 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
858 break;
859 case BNX2X_RX_MODE_ALLMULTI:
860 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
861 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
862 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
863 break;
864 case BNX2X_RX_MODE_PROMISC:
865 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
866 req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
867 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
868 break;
869 default:
870 BNX2X_ERR("BAD rx mode (%d)\n", mode);
871 rc = -EINVAL;
872 goto out;
873 }
874
875 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
876 req->vf_qid = 0;
877
878 /* add list termination tlv */
879 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
880 sizeof(struct channel_list_end_tlv));
881
882 /* output tlvs list */
883 bnx2x_dp_tlv_list(bp, req);
884
885 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
886 if (rc)
887 BNX2X_ERR("Sending a message failed: %d\n", rc);
888
889 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
890 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
891 rc = -EINVAL;
892 }
893 out:
894 bnx2x_vfpf_finalize(bp, &req->first_tlv);
895
896 return rc;
897 }
898
899 /* General service functions */
900 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
901 {
902 u32 addr = BAR_CSTRORM_INTMEM +
903 CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
904
905 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
906 }
907
908 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
909 {
910 u32 addr = BAR_CSTRORM_INTMEM +
911 CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
912
913 REG_WR8(bp, addr, 1);
914 }
915
916 static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
917 {
918 int i;
919
920 for_each_vf(bp, i)
921 storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
922 }
923
924 /* enable vf_pf mailbox (aka vf-pf-channel) */
925 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
926 {
927 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
928
929 /* enable the mailbox in the FW */
930 storm_memset_vf_mbx_ack(bp, abs_vfid);
931 storm_memset_vf_mbx_valid(bp, abs_vfid);
932
933 /* enable the VF access to the mailbox */
934 bnx2x_vf_enable_access(bp, abs_vfid);
935 }
936
937 /* this works only on !E1h */
938 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
939 dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
940 u32 vf_addr_lo, u32 len32)
941 {
942 struct dmae_command dmae;
943
944 if (CHIP_IS_E1x(bp)) {
945 BNX2X_ERR("Chip revision does not support VFs\n");
946 return DMAE_NOT_RDY;
947 }
948
949 if (!bp->dmae_ready) {
950 BNX2X_ERR("DMAE is not ready, can not copy\n");
951 return DMAE_NOT_RDY;
952 }
953
954 /* set opcode and fixed command fields */
955 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
956
957 if (from_vf) {
958 dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
959 (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
960 (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
961
962 dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
963
964 dmae.src_addr_lo = vf_addr_lo;
965 dmae.src_addr_hi = vf_addr_hi;
966 dmae.dst_addr_lo = U64_LO(pf_addr);
967 dmae.dst_addr_hi = U64_HI(pf_addr);
968 } else {
969 dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
970 (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
971 (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
972
973 dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
974
975 dmae.src_addr_lo = U64_LO(pf_addr);
976 dmae.src_addr_hi = U64_HI(pf_addr);
977 dmae.dst_addr_lo = vf_addr_lo;
978 dmae.dst_addr_hi = vf_addr_hi;
979 }
980 dmae.len = len32;
981
982 /* issue the command and wait for completion */
983 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
984 }
985
986 static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
987 {
988 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
989 u64 vf_addr;
990 dma_addr_t pf_addr;
991 u16 length, type;
992 int rc;
993 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
994
995 /* prepare response */
996 type = mbx->first_tlv.tl.type;
997 length = type == CHANNEL_TLV_ACQUIRE ?
998 sizeof(struct pfvf_acquire_resp_tlv) :
999 sizeof(struct pfvf_general_resp_tlv);
1000 bnx2x_add_tlv(bp, resp, 0, type, length);
1001 resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
1002 bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
1003 sizeof(struct channel_list_end_tlv));
1004 bnx2x_dp_tlv_list(bp, resp);
1005 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1006 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1007
1008 /* send response */
1009 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1010 mbx->first_tlv.resp_msg_offset;
1011 pf_addr = mbx->msg_mapping +
1012 offsetof(struct bnx2x_vf_mbx_msg, resp);
1013
1014 /* copy the response body, if there is one, before the header, as the vf
1015 * is sensitive to the header being written
1016 */
1017 if (resp->hdr.tl.length > sizeof(u64)) {
1018 length = resp->hdr.tl.length - sizeof(u64);
1019 vf_addr += sizeof(u64);
1020 pf_addr += sizeof(u64);
1021 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1022 U64_HI(vf_addr),
1023 U64_LO(vf_addr),
1024 length/4);
1025 if (rc) {
1026 BNX2X_ERR("Failed to copy response body to VF %d\n",
1027 vf->abs_vfid);
1028 goto mbx_error;
1029 }
1030 vf_addr -= sizeof(u64);
1031 pf_addr -= sizeof(u64);
1032 }
1033
1034 /* ack the FW */
1035 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1036 mmiowb();
1037
1038 /* initiate dmae to send the response */
1039 mbx->flags &= ~VF_MSG_INPROCESS;
1040
1041 /* copy the response header including status-done field,
1042 * must be last dmae, must be after FW is acked
1043 */
1044 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1045 U64_HI(vf_addr),
1046 U64_LO(vf_addr),
1047 sizeof(u64)/4);
1048
1049 /* unlock channel mutex */
1050 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1051
1052 if (rc) {
1053 BNX2X_ERR("Failed to copy response status to VF %d\n",
1054 vf->abs_vfid);
1055 goto mbx_error;
1056 }
1057 return;
1058
1059 mbx_error:
1060 bnx2x_vf_release(bp, vf, false); /* non blocking */
1061 }
1062
1063 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1064 struct bnx2x_vf_mbx *mbx, int vfop_status)
1065 {
1066 int i;
1067 struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1068 struct pf_vf_resc *resc = &resp->resc;
1069 u8 status = bnx2x_pfvf_status_codes(vfop_status);
1070
1071 memset(resp, 0, sizeof(*resp));
1072
1073 /* fill in pfdev info */
1074 resp->pfdev_info.chip_num = bp->common.chip_id;
1075 resp->pfdev_info.db_size = bp->db_size;
1076 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1077 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1078 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
1079 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1080 sizeof(resp->pfdev_info.fw_ver));
1081
1082 if (status == PFVF_STATUS_NO_RESOURCE ||
1083 status == PFVF_STATUS_SUCCESS) {
1084 /* set resources numbers, if status equals NO_RESOURCE these
1085 * are max possible numbers
1086 */
1087 resc->num_rxqs = vf_rxq_count(vf) ? :
1088 bnx2x_vf_max_queue_cnt(bp, vf);
1089 resc->num_txqs = vf_txq_count(vf) ? :
1090 bnx2x_vf_max_queue_cnt(bp, vf);
1091 resc->num_sbs = vf_sb_count(vf);
1092 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1093 resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1094 resc->num_mc_filters = 0;
1095
1096 if (status == PFVF_STATUS_SUCCESS) {
1097 /* fill in the allocated resources */
1098 struct pf_vf_bulletin_content *bulletin =
1099 BP_VF_BULLETIN(bp, vf->index);
1100
1101 for_each_vfq(vf, i)
1102 resc->hw_qid[i] =
1103 vfq_qzone_id(vf, vfq_get(vf, i));
1104
1105 for_each_vf_sb(vf, i) {
1106 resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1107 resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1108 }
1109
1110 /* if a mac has been set for this vf, supply it */
1111 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1112 memcpy(resc->current_mac_addr, bulletin->mac,
1113 ETH_ALEN);
1114 }
1115 }
1116 }
1117
1118 DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1119 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1120 vf->abs_vfid,
1121 resp->pfdev_info.chip_num,
1122 resp->pfdev_info.db_size,
1123 resp->pfdev_info.indices_per_sb,
1124 resp->pfdev_info.pf_cap,
1125 resc->num_rxqs,
1126 resc->num_txqs,
1127 resc->num_sbs,
1128 resc->num_mac_filters,
1129 resc->num_vlan_filters,
1130 resc->num_mc_filters,
1131 resp->pfdev_info.fw_ver);
1132
1133 DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1134 for (i = 0; i < vf_rxq_count(vf); i++)
1135 DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1136 DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1137 for (i = 0; i < vf_sb_count(vf); i++)
1138 DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1139 resc->hw_sbs[i].hw_sb_id,
1140 resc->hw_sbs[i].sb_qid);
1141 DP_CONT(BNX2X_MSG_IOV, "]\n");
1142
1143 /* send the response */
1144 vf->op_rc = vfop_status;
1145 bnx2x_vf_mbx_resp(bp, vf);
1146 }
1147
1148 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1149 struct bnx2x_vf_mbx *mbx)
1150 {
1151 int rc;
1152 struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1153
1154 /* log vfdef info */
1155 DP(BNX2X_MSG_IOV,
1156 "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1157 vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1158 acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1159 acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1160 acquire->resc_request.num_vlan_filters,
1161 acquire->resc_request.num_mc_filters);
1162
1163 /* acquire the resources */
1164 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1165
1166 /* store address of vf's bulletin board */
1167 vf->bulletin_map = acquire->bulletin_addr;
1168
1169 /* response */
1170 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1171 }
1172
1173 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1174 struct bnx2x_vf_mbx *mbx)
1175 {
1176 struct vfpf_init_tlv *init = &mbx->msg->req.init;
1177
1178 /* record ghost addresses from vf message */
1179 vf->spq_map = init->spq_addr;
1180 vf->fw_stat_map = init->stats_addr;
1181 vf->stats_stride = init->stats_stride;
1182 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1183
1184 /* set VF multiqueue statistics collection mode */
1185 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1186 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1187
1188 /* response */
1189 bnx2x_vf_mbx_resp(bp, vf);
1190 }
1191
1192 /* convert MBX queue-flags to standard SP queue-flags */
1193 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1194 unsigned long *sp_q_flags)
1195 {
1196 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1197 __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1198 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1199 __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1200 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1201 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1202 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1203 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1204 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1205 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1206 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1207 __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1208 if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1209 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1210 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1211 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1212 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1213 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1214
1215 /* outer vlan removal is set according to PF's multi function mode */
1216 if (IS_MF_SD(bp))
1217 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1218 }
1219
1220 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1221 struct bnx2x_vf_mbx *mbx)
1222 {
1223 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1224 struct bnx2x_vfop_cmd cmd = {
1225 .done = bnx2x_vf_mbx_resp,
1226 .block = false,
1227 };
1228
1229 /* verify vf_qid */
1230 if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1231 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1232 setup_q->vf_qid, vf_rxq_count(vf));
1233 vf->op_rc = -EINVAL;
1234 goto response;
1235 }
1236
1237 /* tx queues must be setup alongside rx queues thus if the rx queue
1238 * is not marked as valid there's nothing to do.
1239 */
1240 if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1241 struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1242 unsigned long q_type = 0;
1243
1244 struct bnx2x_queue_init_params *init_p;
1245 struct bnx2x_queue_setup_params *setup_p;
1246
1247 if (bnx2x_vfq_is_leading(q))
1248 bnx2x_leading_vfq_init(bp, vf, q);
1249
1250 /* re-init the VF operation context */
1251 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
1252 setup_p = &vf->op_params.qctor.prep_qsetup;
1253 init_p = &vf->op_params.qctor.qstate.params.init;
1254
1255 /* activate immediately */
1256 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1257
1258 if (setup_q->param_valid & VFPF_TXQ_VALID) {
1259 struct bnx2x_txq_setup_params *txq_params =
1260 &setup_p->txq_params;
1261
1262 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1263
1264 /* save sb resource index */
1265 q->sb_idx = setup_q->txq.vf_sb;
1266
1267 /* tx init */
1268 init_p->tx.hc_rate = setup_q->txq.hc_rate;
1269 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1270
1271 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1272 &init_p->tx.flags);
1273
1274 /* tx setup - flags */
1275 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1276 &setup_p->flags);
1277
1278 /* tx setup - general, nothing */
1279
1280 /* tx setup - tx */
1281 txq_params->dscr_map = setup_q->txq.txq_addr;
1282 txq_params->sb_cq_index = setup_q->txq.sb_index;
1283 txq_params->traffic_type = setup_q->txq.traffic_type;
1284
1285 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1286 q->index, q->sb_idx);
1287 }
1288
1289 if (setup_q->param_valid & VFPF_RXQ_VALID) {
1290 struct bnx2x_rxq_setup_params *rxq_params =
1291 &setup_p->rxq_params;
1292
1293 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1294
1295 /* Note: there is no support for different SBs
1296 * for TX and RX
1297 */
1298 q->sb_idx = setup_q->rxq.vf_sb;
1299
1300 /* rx init */
1301 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1302 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1303 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1304 &init_p->rx.flags);
1305
1306 /* rx setup - flags */
1307 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1308 &setup_p->flags);
1309
1310 /* rx setup - general */
1311 setup_p->gen_params.mtu = setup_q->rxq.mtu;
1312
1313 /* rx setup - rx */
1314 rxq_params->drop_flags = setup_q->rxq.drop_flags;
1315 rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1316 rxq_params->sge_map = setup_q->rxq.sge_addr;
1317 rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1318 rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1319 rxq_params->buf_sz = setup_q->rxq.buf_sz;
1320 rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1321 rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1322 rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1323 rxq_params->cache_line_log =
1324 setup_q->rxq.cache_line_log;
1325 rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1326
1327 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1328 q->index, q->sb_idx);
1329 }
1330 /* complete the preparations */
1331 bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
1332
1333 vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
1334 if (vf->op_rc)
1335 goto response;
1336 return;
1337 }
1338 response:
1339 bnx2x_vf_mbx_resp(bp, vf);
1340 }
1341
1342 enum bnx2x_vfop_filters_state {
1343 BNX2X_VFOP_MBX_Q_FILTERS_MACS,
1344 BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
1345 BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
1346 BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
1347 BNX2X_VFOP_MBX_Q_FILTERS_DONE
1348 };
1349
1350 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1351 struct bnx2x_virtf *vf,
1352 struct vfpf_set_q_filters_tlv *tlv,
1353 struct bnx2x_vfop_filters **pfl,
1354 u32 type_flag)
1355 {
1356 int i, j;
1357 struct bnx2x_vfop_filters *fl = NULL;
1358 size_t fsz;
1359
1360 fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
1361 sizeof(struct bnx2x_vfop_filters);
1362
1363 fl = kzalloc(fsz, GFP_KERNEL);
1364 if (!fl)
1365 return -ENOMEM;
1366
1367 INIT_LIST_HEAD(&fl->head);
1368
1369 for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1370 struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1371
1372 if ((msg_filter->flags & type_flag) != type_flag)
1373 continue;
1374 if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
1375 fl->filters[j].mac = msg_filter->mac;
1376 fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
1377 } else {
1378 fl->filters[j].vid = msg_filter->vlan_tag;
1379 fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
1380 }
1381 fl->filters[j].add =
1382 (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
1383 true : false;
1384 list_add_tail(&fl->filters[j++].link, &fl->head);
1385 }
1386 if (list_empty(&fl->head))
1387 kfree(fl);
1388 else
1389 *pfl = fl;
1390
1391 return 0;
1392 }
1393
1394 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1395 struct vfpf_q_mac_vlan_filter *filter)
1396 {
1397 DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1398 if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1399 DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1400 if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1401 DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1402 DP_CONT(msglvl, "\n");
1403 }
1404
1405 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1406 struct vfpf_set_q_filters_tlv *filters)
1407 {
1408 int i;
1409
1410 if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1411 for (i = 0; i < filters->n_mac_vlan_filters; i++)
1412 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1413 &filters->filters[i]);
1414
1415 if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1416 DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1417
1418 if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1419 for (i = 0; i < filters->n_multicast; i++)
1420 DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1421 }
1422
1423 #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
1424 #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
1425
1426 static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1427 {
1428 int rc;
1429
1430 struct vfpf_set_q_filters_tlv *msg =
1431 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1432
1433 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1434 enum bnx2x_vfop_filters_state state = vfop->state;
1435
1436 struct bnx2x_vfop_cmd cmd = {
1437 .done = bnx2x_vfop_mbx_qfilters,
1438 .block = false,
1439 };
1440
1441 DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
1442
1443 if (vfop->rc < 0)
1444 goto op_err;
1445
1446 switch (state) {
1447 case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
1448 /* next state */
1449 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
1450
1451 /* check for any vlan/mac changes */
1452 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1453 /* build mac list */
1454 struct bnx2x_vfop_filters *fl = NULL;
1455
1456 vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1457 VFPF_MAC_FILTER);
1458 if (vfop->rc)
1459 goto op_err;
1460
1461 if (fl) {
1462 /* set mac list */
1463 rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
1464 msg->vf_qid,
1465 false);
1466 if (rc) {
1467 vfop->rc = rc;
1468 goto op_err;
1469 }
1470 return;
1471 }
1472 }
1473 /* fall through */
1474
1475 case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
1476 /* next state */
1477 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
1478
1479 /* check for any vlan/mac changes */
1480 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1481 /* build vlan list */
1482 struct bnx2x_vfop_filters *fl = NULL;
1483
1484 vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1485 VFPF_VLAN_FILTER);
1486 if (vfop->rc)
1487 goto op_err;
1488
1489 if (fl) {
1490 /* set vlan list */
1491 rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
1492 msg->vf_qid,
1493 false);
1494 if (rc) {
1495 vfop->rc = rc;
1496 goto op_err;
1497 }
1498 return;
1499 }
1500 }
1501 /* fall through */
1502
1503 case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
1504 /* next state */
1505 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
1506
1507 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1508 unsigned long accept = 0;
1509
1510 /* covert VF-PF if mask to bnx2x accept flags */
1511 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
1512 __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1513
1514 if (msg->rx_mask &
1515 VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
1516 __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1517
1518 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
1519 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
1520
1521 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
1522 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
1523
1524 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
1525 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1526
1527 /* A packet arriving the vf's mac should be accepted
1528 * with any vlan
1529 */
1530 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1531
1532 /* set rx-mode */
1533 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
1534 msg->vf_qid, accept);
1535 if (rc) {
1536 vfop->rc = rc;
1537 goto op_err;
1538 }
1539 return;
1540 }
1541 /* fall through */
1542
1543 case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
1544 /* next state */
1545 vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
1546
1547 if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1548 /* set mcasts */
1549 rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
1550 msg->n_multicast, false);
1551 if (rc) {
1552 vfop->rc = rc;
1553 goto op_err;
1554 }
1555 return;
1556 }
1557 /* fall through */
1558 op_done:
1559 case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
1560 bnx2x_vfop_end(bp, vf, vfop);
1561 return;
1562 op_err:
1563 BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1564 vf->abs_vfid, msg->vf_qid, vfop->rc);
1565 goto op_done;
1566
1567 default:
1568 bnx2x_vfop_default(state);
1569 }
1570 }
1571
1572 static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
1573 struct bnx2x_virtf *vf,
1574 struct bnx2x_vfop_cmd *cmd)
1575 {
1576 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1577 if (vfop) {
1578 bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
1579 bnx2x_vfop_mbx_qfilters, cmd->done);
1580 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
1581 cmd->block);
1582 }
1583 return -ENOMEM;
1584 }
1585
1586 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1587 struct bnx2x_virtf *vf,
1588 struct bnx2x_vf_mbx *mbx)
1589 {
1590 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1591 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1592 struct bnx2x_vfop_cmd cmd = {
1593 .done = bnx2x_vf_mbx_resp,
1594 .block = false,
1595 };
1596
1597 /* if a mac was already set for this VF via the set vf mac ndo, we only
1598 * accept mac configurations of that mac. Why accept them at all?
1599 * because PF may have been unable to configure the mac at the time
1600 * since queue was not set up.
1601 */
1602 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1603 /* once a mac was set by ndo can only accept a single mac... */
1604 if (filters->n_mac_vlan_filters > 1) {
1605 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
1606 vf->abs_vfid);
1607 vf->op_rc = -EPERM;
1608 goto response;
1609 }
1610
1611 /* ...and only the mac set by the ndo */
1612 if (filters->n_mac_vlan_filters == 1 &&
1613 memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
1614 BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1615 vf->abs_vfid);
1616
1617 vf->op_rc = -EPERM;
1618 goto response;
1619 }
1620 }
1621
1622 /* verify vf_qid */
1623 if (filters->vf_qid > vf_rxq_count(vf))
1624 goto response;
1625
1626 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1627 vf->abs_vfid,
1628 filters->vf_qid);
1629
1630 /* print q_filter message */
1631 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1632
1633 vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
1634 if (vf->op_rc)
1635 goto response;
1636 return;
1637
1638 response:
1639 bnx2x_vf_mbx_resp(bp, vf);
1640 }
1641
1642 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1643 struct bnx2x_vf_mbx *mbx)
1644 {
1645 int qid = mbx->msg->req.q_op.vf_qid;
1646 struct bnx2x_vfop_cmd cmd = {
1647 .done = bnx2x_vf_mbx_resp,
1648 .block = false,
1649 };
1650
1651 DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1652 vf->abs_vfid, qid);
1653
1654 vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
1655 if (vf->op_rc)
1656 bnx2x_vf_mbx_resp(bp, vf);
1657 }
1658
1659 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1660 struct bnx2x_vf_mbx *mbx)
1661 {
1662 struct bnx2x_vfop_cmd cmd = {
1663 .done = bnx2x_vf_mbx_resp,
1664 .block = false,
1665 };
1666
1667 DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1668
1669 vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
1670 if (vf->op_rc)
1671 bnx2x_vf_mbx_resp(bp, vf);
1672 }
1673
1674 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1675 struct bnx2x_vf_mbx *mbx)
1676 {
1677 struct bnx2x_vfop_cmd cmd = {
1678 .done = bnx2x_vf_mbx_resp,
1679 .block = false,
1680 };
1681
1682 DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1683
1684 vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
1685 if (vf->op_rc)
1686 bnx2x_vf_mbx_resp(bp, vf);
1687 }
1688
1689 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1690 struct bnx2x_vf_mbx *mbx)
1691 {
1692 struct bnx2x_vfop_cmd cmd = {
1693 .done = bnx2x_vf_mbx_resp,
1694 .block = false,
1695 };
1696 struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
1697 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1698
1699 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1700 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1701 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1702 vf->index);
1703 vf->op_rc = -EINVAL;
1704 goto mbx_resp;
1705 }
1706
1707 /* set vfop params according to rss tlv */
1708 memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
1709 T_ETH_INDIRECTION_TABLE_SIZE);
1710 memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
1711 sizeof(rss_tlv->rss_key));
1712 vf_op_params->rss_obj = &vf->rss_conf_obj;
1713 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1714
1715 /* flags handled individually for backward/forward compatability */
1716 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1717 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1718 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1719 __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
1720 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1721 __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
1722 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1723 __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
1724 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1725 __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
1726 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1727 __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
1728 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1729 __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
1730 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1731 __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
1732 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1733 __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
1734
1735 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1736 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1737 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1738 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1739 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1740 vf->op_rc = -EINVAL;
1741 goto mbx_resp;
1742 }
1743
1744 vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
1745
1746 mbx_resp:
1747 if (vf->op_rc)
1748 bnx2x_vf_mbx_resp(bp, vf);
1749 }
1750
1751 /* dispatch request */
1752 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1753 struct bnx2x_vf_mbx *mbx)
1754 {
1755 int i;
1756
1757 /* check if tlv type is known */
1758 if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
1759 /* Lock the per vf op mutex and note the locker's identity.
1760 * The unlock will take place in mbx response.
1761 */
1762 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1763
1764 /* switch on the opcode */
1765 switch (mbx->first_tlv.tl.type) {
1766 case CHANNEL_TLV_ACQUIRE:
1767 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1768 return;
1769 case CHANNEL_TLV_INIT:
1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1771 return;
1772 case CHANNEL_TLV_SETUP_Q:
1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1774 return;
1775 case CHANNEL_TLV_SET_Q_FILTERS:
1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1777 return;
1778 case CHANNEL_TLV_TEARDOWN_Q:
1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1780 return;
1781 case CHANNEL_TLV_CLOSE:
1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1783 return;
1784 case CHANNEL_TLV_RELEASE:
1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1786 return;
1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 return;
1790 }
1791
1792 } else {
1793 /* unknown TLV - this may belong to a VF driver from the future
1794 * - a version written after this PF driver was written, which
1795 * supports features unknown as of yet. Too bad since we don't
1796 * support them. Or this may be because someone wrote a crappy
1797 * VF driver and is sending garbage over the channel.
1798 */
1799 BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
1800 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
1801 vf->state);
1802 for (i = 0; i < 20; i++)
1803 DP_CONT(BNX2X_MSG_IOV, "%x ",
1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1805 }
1806
1807 /* can we respond to VF (do we have an address for it?) */
1808 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1809 /* mbx_resp uses the op_rc of the VF */
1810 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1811
1812 /* notify the VF that we do not support this request */
1813 bnx2x_vf_mbx_resp(bp, vf);
1814 } else {
1815 /* can't send a response since this VF is unknown to us
1816 * just ack the FW to release the mailbox and unlock
1817 * the channel.
1818 */
1819 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1820 /* Firmware ack should be written before unlocking channel */
1821 mmiowb();
1822 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1823 }
1824 }
1825
1826 /* handle new vf-pf message */
1827 void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
1828 {
1829 struct bnx2x_virtf *vf;
1830 struct bnx2x_vf_mbx *mbx;
1831 u8 vf_idx;
1832 int rc;
1833
1834 DP(BNX2X_MSG_IOV,
1835 "vf pf event received: vfid %d, address_hi %x, address lo %x",
1836 vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
1837 /* Sanity checks consider removing later */
1838
1839 /* check if the vf_id is valid */
1840 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
1841 BNX2X_NR_VIRTFN(bp)) {
1842 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
1843 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
1844 goto mbx_done;
1845 }
1846 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
1847 mbx = BP_VF_MBX(bp, vf_idx);
1848
1849 /* verify an event is not currently being processed -
1850 * debug failsafe only
1851 */
1852 if (mbx->flags & VF_MSG_INPROCESS) {
1853 BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
1854 vfpf_event->vf_id);
1855 goto mbx_done;
1856 }
1857 vf = BP_VF(bp, vf_idx);
1858
1859 /* save the VF message address */
1860 mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
1861 mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
1862 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1863 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1864
1865 /* dmae to get the VF request */
1866 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
1867 mbx->vf_addr_hi, mbx->vf_addr_lo,
1868 sizeof(union vfpf_tlvs)/4);
1869 if (rc) {
1870 BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
1871 goto mbx_error;
1872 }
1873
1874 /* process the VF message header */
1875 mbx->first_tlv = mbx->msg->req.first_tlv;
1876
1877 /* dispatch the request (will prepare the response) */
1878 bnx2x_vf_mbx_request(bp, vf, mbx);
1879 goto mbx_done;
1880
1881 mbx_error:
1882 bnx2x_vf_release(bp, vf, false); /* non blocking */
1883 mbx_done:
1884 return;
1885 }
1886
1887 /* propagate local bulletin board to vf */
1888 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
1889 {
1890 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
1891 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
1892 vf * BULLETIN_CONTENT_SIZE;
1893 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
1894 int rc;
1895
1896 /* can only update vf after init took place */
1897 if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
1898 bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
1899 return 0;
1900
1901 /* increment bulletin board version and compute crc */
1902 bulletin->version++;
1903 bulletin->length = BULLETIN_CONTENT_SIZE;
1904 bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
1905
1906 /* propagate bulletin board via dmae to vm memory */
1907 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
1908 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
1909 U64_LO(vf_addr), bulletin->length / 4);
1910 return rc;
1911 }
This page took 0.097633 seconds and 5 git commands to generate.