Merge remote-tracking branch 'regulator/for-next'
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4vf / t4vf_hw.c
1 /*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/pci.h>
37
38 #include "t4vf_common.h"
39 #include "t4vf_defs.h"
40
41 #include "../cxgb4/t4_regs.h"
42 #include "../cxgb4/t4_values.h"
43 #include "../cxgb4/t4fw_api.h"
44
45 /*
46 * Wait for the device to become ready (signified by our "who am I" register
47 * returning a value other than all 1's). Return an error if it doesn't
48 * become ready ...
49 */
50 int t4vf_wait_dev_ready(struct adapter *adapter)
51 {
52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
53 const u32 notready1 = 0xffffffff;
54 const u32 notready2 = 0xeeeeeeee;
55 u32 val;
56
57 val = t4_read_reg(adapter, whoami);
58 if (val != notready1 && val != notready2)
59 return 0;
60 msleep(500);
61 val = t4_read_reg(adapter, whoami);
62 if (val != notready1 && val != notready2)
63 return 0;
64 else
65 return -EIO;
66 }
67
68 /*
69 * Get the reply to a mailbox command and store it in @rpl in big-endian order
70 * (since the firmware data structures are specified in a big-endian layout).
71 */
72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
73 u32 mbox_data)
74 {
75 for ( ; size; size -= 8, mbox_data += 8)
76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
77 }
78
79 /**
80 * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
81 * @adapter: the adapter
82 * @cmd: the Firmware Mailbox Command or Reply
83 * @size: command length in bytes
84 * @access: the time (ms) needed to access the Firmware Mailbox
85 * @execute: the time (ms) the command spent being executed
86 */
87 static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
88 int size, int access, int execute)
89 {
90 struct mbox_cmd_log *log = adapter->mbox_log;
91 struct mbox_cmd *entry;
92 int i;
93
94 entry = mbox_cmd_log_entry(log, log->cursor++);
95 if (log->cursor == log->size)
96 log->cursor = 0;
97
98 for (i = 0; i < size / 8; i++)
99 entry->cmd[i] = be64_to_cpu(cmd[i]);
100 while (i < MBOX_LEN / 8)
101 entry->cmd[i++] = 0;
102 entry->timestamp = jiffies;
103 entry->seqno = log->seqno++;
104 entry->access = access;
105 entry->execute = execute;
106 }
107
108 /**
109 * t4vf_wr_mbox_core - send a command to FW through the mailbox
110 * @adapter: the adapter
111 * @cmd: the command to write
112 * @size: command length in bytes
113 * @rpl: where to optionally store the reply
114 * @sleep_ok: if true we may sleep while awaiting command completion
115 *
116 * Sends the given command to FW through the mailbox and waits for the
117 * FW to execute the command. If @rpl is not %NULL it is used to store
118 * the FW's reply to the command. The command and its optional reply
119 * are of the same length. FW can take up to 500 ms to respond.
120 * @sleep_ok determines whether we may sleep while awaiting the response.
121 * If sleeping is allowed we use progressive backoff otherwise we spin.
122 *
123 * The return value is 0 on success or a negative errno on failure. A
124 * failure can happen either because we are not able to execute the
125 * command or FW executes it but signals an error. In the latter case
126 * the return value is the error code indicated by FW (negated).
127 */
128 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
129 void *rpl, bool sleep_ok)
130 {
131 static const int delay[] = {
132 1, 1, 3, 5, 10, 10, 20, 50, 100
133 };
134
135 u16 access = 0, execute = 0;
136 u32 v, mbox_data;
137 int i, ms, delay_idx, ret;
138 const __be64 *p;
139 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
140 u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
141 __be64 cmd_rpl[MBOX_LEN / 8];
142 struct mbox_list entry;
143
144 /* In T6, mailbox size is changed to 128 bytes to avoid
145 * invalidating the entire prefetch buffer.
146 */
147 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
148 mbox_data = T4VF_MBDATA_BASE_ADDR;
149 else
150 mbox_data = T6VF_MBDATA_BASE_ADDR;
151
152 /*
153 * Commands must be multiples of 16 bytes in length and may not be
154 * larger than the size of the Mailbox Data register array.
155 */
156 if ((size % 16) != 0 ||
157 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
158 return -EINVAL;
159
160 /* Queue ourselves onto the mailbox access list. When our entry is at
161 * the front of the list, we have rights to access the mailbox. So we
162 * wait [for a while] till we're at the front [or bail out with an
163 * EBUSY] ...
164 */
165 spin_lock(&adapter->mbox_lock);
166 list_add_tail(&entry.list, &adapter->mlist.list);
167 spin_unlock(&adapter->mbox_lock);
168
169 delay_idx = 0;
170 ms = delay[0];
171
172 for (i = 0; ; i += ms) {
173 /* If we've waited too long, return a busy indication. This
174 * really ought to be based on our initial position in the
175 * mailbox access list but this is a start. We very rearely
176 * contend on access to the mailbox ...
177 */
178 if (i > FW_CMD_MAX_TIMEOUT) {
179 spin_lock(&adapter->mbox_lock);
180 list_del(&entry.list);
181 spin_unlock(&adapter->mbox_lock);
182 ret = -EBUSY;
183 t4vf_record_mbox(adapter, cmd, size, access, ret);
184 return ret;
185 }
186
187 /* If we're at the head, break out and start the mailbox
188 * protocol.
189 */
190 if (list_first_entry(&adapter->mlist.list, struct mbox_list,
191 list) == &entry)
192 break;
193
194 /* Delay for a bit before checking again ... */
195 if (sleep_ok) {
196 ms = delay[delay_idx]; /* last element may repeat */
197 if (delay_idx < ARRAY_SIZE(delay) - 1)
198 delay_idx++;
199 msleep(ms);
200 } else {
201 mdelay(ms);
202 }
203 }
204
205 /*
206 * Loop trying to get ownership of the mailbox. Return an error
207 * if we can't gain ownership.
208 */
209 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
210 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
211 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
212 if (v != MBOX_OWNER_DRV) {
213 spin_lock(&adapter->mbox_lock);
214 list_del(&entry.list);
215 spin_unlock(&adapter->mbox_lock);
216 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
217 t4vf_record_mbox(adapter, cmd, size, access, ret);
218 return ret;
219 }
220
221 /*
222 * Write the command array into the Mailbox Data register array and
223 * transfer ownership of the mailbox to the firmware.
224 *
225 * For the VFs, the Mailbox Data "registers" are actually backed by
226 * T4's "MA" interface rather than PL Registers (as is the case for
227 * the PFs). Because these are in different coherency domains, the
228 * write to the VF's PL-register-backed Mailbox Control can race in
229 * front of the writes to the MA-backed VF Mailbox Data "registers".
230 * So we need to do a read-back on at least one byte of the VF Mailbox
231 * Data registers before doing the write to the VF Mailbox Control
232 * register.
233 */
234 if (cmd_op != FW_VI_STATS_CMD)
235 t4vf_record_mbox(adapter, cmd, size, access, 0);
236 for (i = 0, p = cmd; i < size; i += 8)
237 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
238 t4_read_reg(adapter, mbox_data); /* flush write */
239
240 t4_write_reg(adapter, mbox_ctl,
241 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
242 t4_read_reg(adapter, mbox_ctl); /* flush write */
243
244 /*
245 * Spin waiting for firmware to acknowledge processing our command.
246 */
247 delay_idx = 0;
248 ms = delay[0];
249
250 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
251 if (sleep_ok) {
252 ms = delay[delay_idx];
253 if (delay_idx < ARRAY_SIZE(delay) - 1)
254 delay_idx++;
255 msleep(ms);
256 } else
257 mdelay(ms);
258
259 /*
260 * If we're the owner, see if this is the reply we wanted.
261 */
262 v = t4_read_reg(adapter, mbox_ctl);
263 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
264 /*
265 * If the Message Valid bit isn't on, revoke ownership
266 * of the mailbox and continue waiting for our reply.
267 */
268 if ((v & MBMSGVALID_F) == 0) {
269 t4_write_reg(adapter, mbox_ctl,
270 MBOWNER_V(MBOX_OWNER_NONE));
271 continue;
272 }
273
274 /*
275 * We now have our reply. Extract the command return
276 * value, copy the reply back to our caller's buffer
277 * (if specified) and revoke ownership of the mailbox.
278 * We return the (negated) firmware command return
279 * code (this depends on FW_SUCCESS == 0).
280 */
281 get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
282
283 /* return value in low-order little-endian word */
284 v = be64_to_cpu(cmd_rpl[0]);
285
286 if (rpl) {
287 /* request bit in high-order BE word */
288 WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
289 & FW_CMD_REQUEST_F) == 0);
290 memcpy(rpl, cmd_rpl, size);
291 WARN_ON((be32_to_cpu(*(__be32 *)rpl)
292 & FW_CMD_REQUEST_F) != 0);
293 }
294 t4_write_reg(adapter, mbox_ctl,
295 MBOWNER_V(MBOX_OWNER_NONE));
296 execute = i + ms;
297 if (cmd_op != FW_VI_STATS_CMD)
298 t4vf_record_mbox(adapter, cmd_rpl, size, access,
299 execute);
300 spin_lock(&adapter->mbox_lock);
301 list_del(&entry.list);
302 spin_unlock(&adapter->mbox_lock);
303 return -FW_CMD_RETVAL_G(v);
304 }
305 }
306
307 /* We timed out. Return the error ... */
308 ret = -ETIMEDOUT;
309 t4vf_record_mbox(adapter, cmd, size, access, ret);
310 spin_lock(&adapter->mbox_lock);
311 list_del(&entry.list);
312 spin_unlock(&adapter->mbox_lock);
313 return ret;
314 }
315
316 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
317 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
318 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
319
320 /**
321 * init_link_config - initialize a link's SW state
322 * @lc: structure holding the link state
323 * @caps: link capabilities
324 *
325 * Initializes the SW state maintained for each link, including the link's
326 * capabilities and default speed/flow-control/autonegotiation settings.
327 */
328 static void init_link_config(struct link_config *lc, unsigned int caps)
329 {
330 lc->supported = caps;
331 lc->lp_advertising = 0;
332 lc->requested_speed = 0;
333 lc->speed = 0;
334 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
335 if (lc->supported & FW_PORT_CAP_ANEG) {
336 lc->advertising = lc->supported & ADVERT_MASK;
337 lc->autoneg = AUTONEG_ENABLE;
338 lc->requested_fc |= PAUSE_AUTONEG;
339 } else {
340 lc->advertising = 0;
341 lc->autoneg = AUTONEG_DISABLE;
342 }
343 }
344
345 /**
346 * t4vf_port_init - initialize port hardware/software state
347 * @adapter: the adapter
348 * @pidx: the adapter port index
349 */
350 int t4vf_port_init(struct adapter *adapter, int pidx)
351 {
352 struct port_info *pi = adap2pinfo(adapter, pidx);
353 struct fw_vi_cmd vi_cmd, vi_rpl;
354 struct fw_port_cmd port_cmd, port_rpl;
355 int v;
356
357 /*
358 * Execute a VI Read command to get our Virtual Interface information
359 * like MAC address, etc.
360 */
361 memset(&vi_cmd, 0, sizeof(vi_cmd));
362 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
363 FW_CMD_REQUEST_F |
364 FW_CMD_READ_F);
365 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
366 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
367 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
368 if (v)
369 return v;
370
371 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
372 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
373 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
374
375 /*
376 * If we don't have read access to our port information, we're done
377 * now. Otherwise, execute a PORT Read command to get it ...
378 */
379 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
380 return 0;
381
382 memset(&port_cmd, 0, sizeof(port_cmd));
383 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
384 FW_CMD_REQUEST_F |
385 FW_CMD_READ_F |
386 FW_PORT_CMD_PORTID_V(pi->port_id));
387 port_cmd.action_to_len16 =
388 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
389 FW_LEN16(port_cmd));
390 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
391 if (v)
392 return v;
393
394 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
395 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
396 FW_PORT_CMD_MDIOADDR_G(v) : -1;
397 pi->port_type = FW_PORT_CMD_PTYPE_G(v);
398 pi->mod_type = FW_PORT_MOD_TYPE_NA;
399
400 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
401
402 return 0;
403 }
404
405 /**
406 * t4vf_fw_reset - issue a reset to FW
407 * @adapter: the adapter
408 *
409 * Issues a reset command to FW. For a Physical Function this would
410 * result in the Firmware resetting all of its state. For a Virtual
411 * Function this just resets the state associated with the VF.
412 */
413 int t4vf_fw_reset(struct adapter *adapter)
414 {
415 struct fw_reset_cmd cmd;
416
417 memset(&cmd, 0, sizeof(cmd));
418 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
419 FW_CMD_WRITE_F);
420 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
421 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
422 }
423
424 /**
425 * t4vf_query_params - query FW or device parameters
426 * @adapter: the adapter
427 * @nparams: the number of parameters
428 * @params: the parameter names
429 * @vals: the parameter values
430 *
431 * Reads the values of firmware or device parameters. Up to 7 parameters
432 * can be queried at once.
433 */
434 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
435 const u32 *params, u32 *vals)
436 {
437 int i, ret;
438 struct fw_params_cmd cmd, rpl;
439 struct fw_params_param *p;
440 size_t len16;
441
442 if (nparams > 7)
443 return -EINVAL;
444
445 memset(&cmd, 0, sizeof(cmd));
446 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
447 FW_CMD_REQUEST_F |
448 FW_CMD_READ_F);
449 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
450 param[nparams].mnem), 16);
451 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
452 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
453 p->mnem = htonl(*params++);
454
455 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
456 if (ret == 0)
457 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
458 *vals++ = be32_to_cpu(p->val);
459 return ret;
460 }
461
462 /**
463 * t4vf_set_params - sets FW or device parameters
464 * @adapter: the adapter
465 * @nparams: the number of parameters
466 * @params: the parameter names
467 * @vals: the parameter values
468 *
469 * Sets the values of firmware or device parameters. Up to 7 parameters
470 * can be specified at once.
471 */
472 int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
473 const u32 *params, const u32 *vals)
474 {
475 int i;
476 struct fw_params_cmd cmd;
477 struct fw_params_param *p;
478 size_t len16;
479
480 if (nparams > 7)
481 return -EINVAL;
482
483 memset(&cmd, 0, sizeof(cmd));
484 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
485 FW_CMD_REQUEST_F |
486 FW_CMD_WRITE_F);
487 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
488 param[nparams]), 16);
489 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
490 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
491 p->mnem = cpu_to_be32(*params++);
492 p->val = cpu_to_be32(*vals++);
493 }
494
495 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
496 }
497
498 /**
499 * t4vf_fl_pkt_align - return the fl packet alignment
500 * @adapter: the adapter
501 *
502 * T4 has a single field to specify the packing and padding boundary.
503 * T5 onwards has separate fields for this and hence the alignment for
504 * next packet offset is maximum of these two. And T6 changes the
505 * Ingress Padding Boundary Shift, so it's all a mess and it's best
506 * if we put this in low-level Common Code ...
507 *
508 */
509 int t4vf_fl_pkt_align(struct adapter *adapter)
510 {
511 u32 sge_control, sge_control2;
512 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
513
514 sge_control = adapter->params.sge.sge_control;
515
516 /* T4 uses a single control field to specify both the PCIe Padding and
517 * Packing Boundary. T5 introduced the ability to specify these
518 * separately. The actual Ingress Packet Data alignment boundary
519 * within Packed Buffer Mode is the maximum of these two
520 * specifications. (Note that it makes no real practical sense to
521 * have the Pading Boudary be larger than the Packing Boundary but you
522 * could set the chip up that way and, in fact, legacy T4 code would
523 * end doing this because it would initialize the Padding Boundary and
524 * leave the Packing Boundary initialized to 0 (16 bytes).)
525 * Padding Boundary values in T6 starts from 8B,
526 * where as it is 32B for T4 and T5.
527 */
528 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
529 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
530 else
531 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
532
533 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
534
535 fl_align = ingpadboundary;
536 if (!is_t4(adapter->params.chip)) {
537 /* T5 has a different interpretation of one of the PCIe Packing
538 * Boundary values.
539 */
540 sge_control2 = adapter->params.sge.sge_control2;
541 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
542 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
543 ingpackboundary = 16;
544 else
545 ingpackboundary = 1 << (ingpackboundary +
546 INGPACKBOUNDARY_SHIFT_X);
547
548 fl_align = max(ingpadboundary, ingpackboundary);
549 }
550 return fl_align;
551 }
552
553 /**
554 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
555 * @adapter: the adapter
556 * @qid: the Queue ID
557 * @qtype: the Ingress or Egress type for @qid
558 * @pbar2_qoffset: BAR2 Queue Offset
559 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
560 *
561 * Returns the BAR2 SGE Queue Registers information associated with the
562 * indicated Absolute Queue ID. These are passed back in return value
563 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
564 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
565 *
566 * This may return an error which indicates that BAR2 SGE Queue
567 * registers aren't available. If an error is not returned, then the
568 * following values are returned:
569 *
570 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
571 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
572 *
573 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
574 * require the "Inferred Queue ID" ability may be used. E.g. the
575 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
576 * then these "Inferred Queue ID" register may not be used.
577 */
578 int t4vf_bar2_sge_qregs(struct adapter *adapter,
579 unsigned int qid,
580 enum t4_bar2_qtype qtype,
581 u64 *pbar2_qoffset,
582 unsigned int *pbar2_qid)
583 {
584 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
585 u64 bar2_page_offset, bar2_qoffset;
586 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
587
588 /* T4 doesn't support BAR2 SGE Queue registers.
589 */
590 if (is_t4(adapter->params.chip))
591 return -EINVAL;
592
593 /* Get our SGE Page Size parameters.
594 */
595 page_shift = adapter->params.sge.sge_vf_hps + 10;
596 page_size = 1 << page_shift;
597
598 /* Get the right Queues per Page parameters for our Queue.
599 */
600 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
601 ? adapter->params.sge.sge_vf_eq_qpp
602 : adapter->params.sge.sge_vf_iq_qpp);
603 qpp_mask = (1 << qpp_shift) - 1;
604
605 /* Calculate the basics of the BAR2 SGE Queue register area:
606 * o The BAR2 page the Queue registers will be in.
607 * o The BAR2 Queue ID.
608 * o The BAR2 Queue ID Offset into the BAR2 page.
609 */
610 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
611 bar2_qid = qid & qpp_mask;
612 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
613
614 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
615 * hardware will infer the Absolute Queue ID simply from the writes to
616 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
617 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
618 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
619 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
620 * from the BAR2 Page and BAR2 Queue ID.
621 *
622 * One important censequence of this is that some BAR2 SGE registers
623 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
624 * there. But other registers synthesize the SGE Queue ID purely
625 * from the writes to the registers -- the Write Combined Doorbell
626 * Buffer is a good example. These BAR2 SGE Registers are only
627 * available for those BAR2 SGE Register areas where the SGE Absolute
628 * Queue ID can be inferred from simple writes.
629 */
630 bar2_qoffset = bar2_page_offset;
631 bar2_qinferred = (bar2_qid_offset < page_size);
632 if (bar2_qinferred) {
633 bar2_qoffset += bar2_qid_offset;
634 bar2_qid = 0;
635 }
636
637 *pbar2_qoffset = bar2_qoffset;
638 *pbar2_qid = bar2_qid;
639 return 0;
640 }
641
642 unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
643 {
644 u32 whoami;
645
646 whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
647 return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
648 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami));
649 }
650
651 /**
652 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
653 * @adapter: the adapter
654 *
655 * Retrieves various core SGE parameters in the form of hardware SGE
656 * register values. The caller is responsible for decoding these as
657 * needed. The SGE parameters are stored in @adapter->params.sge.
658 */
659 int t4vf_get_sge_params(struct adapter *adapter)
660 {
661 struct sge_params *sge_params = &adapter->params.sge;
662 u32 params[7], vals[7];
663 int v;
664
665 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
666 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
667 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
668 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
669 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
670 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
671 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
672 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
673 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
674 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
675 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
676 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
677 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
678 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
679 v = t4vf_query_params(adapter, 7, params, vals);
680 if (v)
681 return v;
682 sge_params->sge_control = vals[0];
683 sge_params->sge_host_page_size = vals[1];
684 sge_params->sge_fl_buffer_size[0] = vals[2];
685 sge_params->sge_fl_buffer_size[1] = vals[3];
686 sge_params->sge_timer_value_0_and_1 = vals[4];
687 sge_params->sge_timer_value_2_and_3 = vals[5];
688 sge_params->sge_timer_value_4_and_5 = vals[6];
689
690 /* T4 uses a single control field to specify both the PCIe Padding and
691 * Packing Boundary. T5 introduced the ability to specify these
692 * separately with the Padding Boundary in SGE_CONTROL and and Packing
693 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
694 * SGE_CONTROL in order to determine how ingress packet data will be
695 * laid out in Packed Buffer Mode. Unfortunately, older versions of
696 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
697 * failure grabbing it we throw an error since we can't figure out the
698 * right value.
699 */
700 if (!is_t4(adapter->params.chip)) {
701 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
702 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
703 v = t4vf_query_params(adapter, 1, params, vals);
704 if (v != FW_SUCCESS) {
705 dev_err(adapter->pdev_dev,
706 "Unable to get SGE Control2; "
707 "probably old firmware.\n");
708 return v;
709 }
710 sge_params->sge_control2 = vals[0];
711 }
712
713 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
714 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
715 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
716 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
717 v = t4vf_query_params(adapter, 2, params, vals);
718 if (v)
719 return v;
720 sge_params->sge_ingress_rx_threshold = vals[0];
721 sge_params->sge_congestion_control = vals[1];
722
723 /* For T5 and later we want to use the new BAR2 Doorbells.
724 * Unfortunately, older firmware didn't allow the this register to be
725 * read.
726 */
727 if (!is_t4(adapter->params.chip)) {
728 unsigned int pf, s_hps, s_qpp;
729
730 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
731 FW_PARAMS_PARAM_XYZ_V(
732 SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
733 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
734 FW_PARAMS_PARAM_XYZ_V(
735 SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
736 v = t4vf_query_params(adapter, 2, params, vals);
737 if (v != FW_SUCCESS) {
738 dev_warn(adapter->pdev_dev,
739 "Unable to get VF SGE Queues/Page; "
740 "probably old firmware.\n");
741 return v;
742 }
743 sge_params->sge_egress_queues_per_page = vals[0];
744 sge_params->sge_ingress_queues_per_page = vals[1];
745
746 /* We need the Queues/Page for our VF. This is based on the
747 * PF from which we're instantiated and is indexed in the
748 * register we just read. Do it once here so other code in
749 * the driver can just use it.
750 */
751 pf = t4vf_get_pf_from_vf(adapter);
752 s_hps = (HOSTPAGESIZEPF0_S +
753 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
754 sge_params->sge_vf_hps =
755 ((sge_params->sge_host_page_size >> s_hps)
756 & HOSTPAGESIZEPF0_M);
757
758 s_qpp = (QUEUESPERPAGEPF0_S +
759 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
760 sge_params->sge_vf_eq_qpp =
761 ((sge_params->sge_egress_queues_per_page >> s_qpp)
762 & QUEUESPERPAGEPF0_M);
763 sge_params->sge_vf_iq_qpp =
764 ((sge_params->sge_ingress_queues_per_page >> s_qpp)
765 & QUEUESPERPAGEPF0_M);
766 }
767
768 return 0;
769 }
770
771 /**
772 * t4vf_get_vpd_params - retrieve device VPD paremeters
773 * @adapter: the adapter
774 *
775 * Retrives various device Vital Product Data parameters. The parameters
776 * are stored in @adapter->params.vpd.
777 */
778 int t4vf_get_vpd_params(struct adapter *adapter)
779 {
780 struct vpd_params *vpd_params = &adapter->params.vpd;
781 u32 params[7], vals[7];
782 int v;
783
784 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
785 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
786 v = t4vf_query_params(adapter, 1, params, vals);
787 if (v)
788 return v;
789 vpd_params->cclk = vals[0];
790
791 return 0;
792 }
793
794 /**
795 * t4vf_get_dev_params - retrieve device paremeters
796 * @adapter: the adapter
797 *
798 * Retrives various device parameters. The parameters are stored in
799 * @adapter->params.dev.
800 */
801 int t4vf_get_dev_params(struct adapter *adapter)
802 {
803 struct dev_params *dev_params = &adapter->params.dev;
804 u32 params[7], vals[7];
805 int v;
806
807 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
808 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
809 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
810 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
811 v = t4vf_query_params(adapter, 2, params, vals);
812 if (v)
813 return v;
814 dev_params->fwrev = vals[0];
815 dev_params->tprev = vals[1];
816
817 return 0;
818 }
819
820 /**
821 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
822 * @adapter: the adapter
823 *
824 * Retrieves global RSS mode and parameters with which we have to live
825 * and stores them in the @adapter's RSS parameters.
826 */
827 int t4vf_get_rss_glb_config(struct adapter *adapter)
828 {
829 struct rss_params *rss = &adapter->params.rss;
830 struct fw_rss_glb_config_cmd cmd, rpl;
831 int v;
832
833 /*
834 * Execute an RSS Global Configuration read command to retrieve
835 * our RSS configuration.
836 */
837 memset(&cmd, 0, sizeof(cmd));
838 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
839 FW_CMD_REQUEST_F |
840 FW_CMD_READ_F);
841 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
842 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
843 if (v)
844 return v;
845
846 /*
847 * Transate the big-endian RSS Global Configuration into our
848 * cpu-endian format based on the RSS mode. We also do first level
849 * filtering at this point to weed out modes which don't support
850 * VF Drivers ...
851 */
852 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
853 be32_to_cpu(rpl.u.manual.mode_pkd));
854 switch (rss->mode) {
855 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
856 u32 word = be32_to_cpu(
857 rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
858
859 rss->u.basicvirtual.synmapen =
860 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
861 rss->u.basicvirtual.syn4tupenipv6 =
862 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
863 rss->u.basicvirtual.syn2tupenipv6 =
864 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
865 rss->u.basicvirtual.syn4tupenipv4 =
866 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
867 rss->u.basicvirtual.syn2tupenipv4 =
868 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
869
870 rss->u.basicvirtual.ofdmapen =
871 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
872
873 rss->u.basicvirtual.tnlmapen =
874 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
875 rss->u.basicvirtual.tnlalllookup =
876 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
877
878 rss->u.basicvirtual.hashtoeplitz =
879 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
880
881 /* we need at least Tunnel Map Enable to be set */
882 if (!rss->u.basicvirtual.tnlmapen)
883 return -EINVAL;
884 break;
885 }
886
887 default:
888 /* all unknown/unsupported RSS modes result in an error */
889 return -EINVAL;
890 }
891
892 return 0;
893 }
894
895 /**
896 * t4vf_get_vfres - retrieve VF resource limits
897 * @adapter: the adapter
898 *
899 * Retrieves configured resource limits and capabilities for a virtual
900 * function. The results are stored in @adapter->vfres.
901 */
902 int t4vf_get_vfres(struct adapter *adapter)
903 {
904 struct vf_resources *vfres = &adapter->params.vfres;
905 struct fw_pfvf_cmd cmd, rpl;
906 int v;
907 u32 word;
908
909 /*
910 * Execute PFVF Read command to get VF resource limits; bail out early
911 * with error on command failure.
912 */
913 memset(&cmd, 0, sizeof(cmd));
914 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
915 FW_CMD_REQUEST_F |
916 FW_CMD_READ_F);
917 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
918 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
919 if (v)
920 return v;
921
922 /*
923 * Extract VF resource limits and return success.
924 */
925 word = be32_to_cpu(rpl.niqflint_niq);
926 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
927 vfres->niq = FW_PFVF_CMD_NIQ_G(word);
928
929 word = be32_to_cpu(rpl.type_to_neq);
930 vfres->neq = FW_PFVF_CMD_NEQ_G(word);
931 vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
932
933 word = be32_to_cpu(rpl.tc_to_nexactf);
934 vfres->tc = FW_PFVF_CMD_TC_G(word);
935 vfres->nvi = FW_PFVF_CMD_NVI_G(word);
936 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
937
938 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
939 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
940 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
941 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
942
943 return 0;
944 }
945
946 /**
947 * t4vf_read_rss_vi_config - read a VI's RSS configuration
948 * @adapter: the adapter
949 * @viid: Virtual Interface ID
950 * @config: pointer to host-native VI RSS Configuration buffer
951 *
952 * Reads the Virtual Interface's RSS configuration information and
953 * translates it into CPU-native format.
954 */
955 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
956 union rss_vi_config *config)
957 {
958 struct fw_rss_vi_config_cmd cmd, rpl;
959 int v;
960
961 memset(&cmd, 0, sizeof(cmd));
962 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
963 FW_CMD_REQUEST_F |
964 FW_CMD_READ_F |
965 FW_RSS_VI_CONFIG_CMD_VIID(viid));
966 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
967 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
968 if (v)
969 return v;
970
971 switch (adapter->params.rss.mode) {
972 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
973 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
974
975 config->basicvirtual.ip6fourtupen =
976 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
977 config->basicvirtual.ip6twotupen =
978 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
979 config->basicvirtual.ip4fourtupen =
980 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
981 config->basicvirtual.ip4twotupen =
982 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
983 config->basicvirtual.udpen =
984 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
985 config->basicvirtual.defaultq =
986 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
987 break;
988 }
989
990 default:
991 return -EINVAL;
992 }
993
994 return 0;
995 }
996
997 /**
998 * t4vf_write_rss_vi_config - write a VI's RSS configuration
999 * @adapter: the adapter
1000 * @viid: Virtual Interface ID
1001 * @config: pointer to host-native VI RSS Configuration buffer
1002 *
1003 * Write the Virtual Interface's RSS configuration information
1004 * (translating it into firmware-native format before writing).
1005 */
1006 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
1007 union rss_vi_config *config)
1008 {
1009 struct fw_rss_vi_config_cmd cmd, rpl;
1010
1011 memset(&cmd, 0, sizeof(cmd));
1012 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
1013 FW_CMD_REQUEST_F |
1014 FW_CMD_WRITE_F |
1015 FW_RSS_VI_CONFIG_CMD_VIID(viid));
1016 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1017 switch (adapter->params.rss.mode) {
1018 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
1019 u32 word = 0;
1020
1021 if (config->basicvirtual.ip6fourtupen)
1022 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
1023 if (config->basicvirtual.ip6twotupen)
1024 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
1025 if (config->basicvirtual.ip4fourtupen)
1026 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
1027 if (config->basicvirtual.ip4twotupen)
1028 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
1029 if (config->basicvirtual.udpen)
1030 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
1031 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
1032 config->basicvirtual.defaultq);
1033 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
1034 break;
1035 }
1036
1037 default:
1038 return -EINVAL;
1039 }
1040
1041 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1042 }
1043
1044 /**
1045 * t4vf_config_rss_range - configure a portion of the RSS mapping table
1046 * @adapter: the adapter
1047 * @viid: Virtual Interface of RSS Table Slice
1048 * @start: starting entry in the table to write
1049 * @n: how many table entries to write
1050 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
1051 * @nrspq: number of values in @rspq
1052 *
1053 * Programs the selected part of the VI's RSS mapping table with the
1054 * provided values. If @nrspq < @n the supplied values are used repeatedly
1055 * until the full table range is populated.
1056 *
1057 * The caller must ensure the values in @rspq are in the range 0..1023.
1058 */
1059 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
1060 int start, int n, const u16 *rspq, int nrspq)
1061 {
1062 const u16 *rsp = rspq;
1063 const u16 *rsp_end = rspq+nrspq;
1064 struct fw_rss_ind_tbl_cmd cmd;
1065
1066 /*
1067 * Initialize firmware command template to write the RSS table.
1068 */
1069 memset(&cmd, 0, sizeof(cmd));
1070 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
1071 FW_CMD_REQUEST_F |
1072 FW_CMD_WRITE_F |
1073 FW_RSS_IND_TBL_CMD_VIID_V(viid));
1074 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1075
1076 /*
1077 * Each firmware RSS command can accommodate up to 32 RSS Ingress
1078 * Queue Identifiers. These Ingress Queue IDs are packed three to
1079 * a 32-bit word as 10-bit values with the upper remaining 2 bits
1080 * reserved.
1081 */
1082 while (n > 0) {
1083 __be32 *qp = &cmd.iq0_to_iq2;
1084 int nq = min(n, 32);
1085 int ret;
1086
1087 /*
1088 * Set up the firmware RSS command header to send the next
1089 * "nq" Ingress Queue IDs to the firmware.
1090 */
1091 cmd.niqid = cpu_to_be16(nq);
1092 cmd.startidx = cpu_to_be16(start);
1093
1094 /*
1095 * "nq" more done for the start of the next loop.
1096 */
1097 start += nq;
1098 n -= nq;
1099
1100 /*
1101 * While there are still Ingress Queue IDs to stuff into the
1102 * current firmware RSS command, retrieve them from the
1103 * Ingress Queue ID array and insert them into the command.
1104 */
1105 while (nq > 0) {
1106 /*
1107 * Grab up to the next 3 Ingress Queue IDs (wrapping
1108 * around the Ingress Queue ID array if necessary) and
1109 * insert them into the firmware RSS command at the
1110 * current 3-tuple position within the commad.
1111 */
1112 u16 qbuf[3];
1113 u16 *qbp = qbuf;
1114 int nqbuf = min(3, nq);
1115
1116 nq -= nqbuf;
1117 qbuf[0] = qbuf[1] = qbuf[2] = 0;
1118 while (nqbuf) {
1119 nqbuf--;
1120 *qbp++ = *rsp++;
1121 if (rsp >= rsp_end)
1122 rsp = rspq;
1123 }
1124 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
1125 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
1126 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
1127 }
1128
1129 /*
1130 * Send this portion of the RRS table update to the firmware;
1131 * bail out on any errors.
1132 */
1133 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1134 if (ret)
1135 return ret;
1136 }
1137 return 0;
1138 }
1139
1140 /**
1141 * t4vf_alloc_vi - allocate a virtual interface on a port
1142 * @adapter: the adapter
1143 * @port_id: physical port associated with the VI
1144 *
1145 * Allocate a new Virtual Interface and bind it to the indicated
1146 * physical port. Return the new Virtual Interface Identifier on
1147 * success, or a [negative] error number on failure.
1148 */
1149 int t4vf_alloc_vi(struct adapter *adapter, int port_id)
1150 {
1151 struct fw_vi_cmd cmd, rpl;
1152 int v;
1153
1154 /*
1155 * Execute a VI command to allocate Virtual Interface and return its
1156 * VIID.
1157 */
1158 memset(&cmd, 0, sizeof(cmd));
1159 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1160 FW_CMD_REQUEST_F |
1161 FW_CMD_WRITE_F |
1162 FW_CMD_EXEC_F);
1163 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1164 FW_VI_CMD_ALLOC_F);
1165 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
1166 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1167 if (v)
1168 return v;
1169
1170 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
1171 }
1172
1173 /**
1174 * t4vf_free_vi -- free a virtual interface
1175 * @adapter: the adapter
1176 * @viid: the virtual interface identifier
1177 *
1178 * Free a previously allocated Virtual Interface. Return an error on
1179 * failure.
1180 */
1181 int t4vf_free_vi(struct adapter *adapter, int viid)
1182 {
1183 struct fw_vi_cmd cmd;
1184
1185 /*
1186 * Execute a VI command to free the Virtual Interface.
1187 */
1188 memset(&cmd, 0, sizeof(cmd));
1189 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1190 FW_CMD_REQUEST_F |
1191 FW_CMD_EXEC_F);
1192 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1193 FW_VI_CMD_FREE_F);
1194 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
1195 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1196 }
1197
1198 /**
1199 * t4vf_enable_vi - enable/disable a virtual interface
1200 * @adapter: the adapter
1201 * @viid: the Virtual Interface ID
1202 * @rx_en: 1=enable Rx, 0=disable Rx
1203 * @tx_en: 1=enable Tx, 0=disable Tx
1204 *
1205 * Enables/disables a virtual interface.
1206 */
1207 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
1208 bool rx_en, bool tx_en)
1209 {
1210 struct fw_vi_enable_cmd cmd;
1211
1212 memset(&cmd, 0, sizeof(cmd));
1213 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1214 FW_CMD_REQUEST_F |
1215 FW_CMD_EXEC_F |
1216 FW_VI_ENABLE_CMD_VIID_V(viid));
1217 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
1218 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
1219 FW_LEN16(cmd));
1220 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1221 }
1222
1223 /**
1224 * t4vf_identify_port - identify a VI's port by blinking its LED
1225 * @adapter: the adapter
1226 * @viid: the Virtual Interface ID
1227 * @nblinks: how many times to blink LED at 2.5 Hz
1228 *
1229 * Identifies a VI's port by blinking its LED.
1230 */
1231 int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
1232 unsigned int nblinks)
1233 {
1234 struct fw_vi_enable_cmd cmd;
1235
1236 memset(&cmd, 0, sizeof(cmd));
1237 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1238 FW_CMD_REQUEST_F |
1239 FW_CMD_EXEC_F |
1240 FW_VI_ENABLE_CMD_VIID_V(viid));
1241 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
1242 FW_LEN16(cmd));
1243 cmd.blinkdur = cpu_to_be16(nblinks);
1244 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1245 }
1246
1247 /**
1248 * t4vf_set_rxmode - set Rx properties of a virtual interface
1249 * @adapter: the adapter
1250 * @viid: the VI id
1251 * @mtu: the new MTU or -1 for no change
1252 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
1253 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
1254 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
1255 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
1256 * -1 no change
1257 *
1258 * Sets Rx properties of a virtual interface.
1259 */
1260 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
1261 int mtu, int promisc, int all_multi, int bcast, int vlanex,
1262 bool sleep_ok)
1263 {
1264 struct fw_vi_rxmode_cmd cmd;
1265
1266 /* convert to FW values */
1267 if (mtu < 0)
1268 mtu = FW_VI_RXMODE_CMD_MTU_M;
1269 if (promisc < 0)
1270 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
1271 if (all_multi < 0)
1272 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
1273 if (bcast < 0)
1274 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
1275 if (vlanex < 0)
1276 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
1277
1278 memset(&cmd, 0, sizeof(cmd));
1279 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
1280 FW_CMD_REQUEST_F |
1281 FW_CMD_WRITE_F |
1282 FW_VI_RXMODE_CMD_VIID_V(viid));
1283 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1284 cmd.mtu_to_vlanexen =
1285 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
1286 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
1287 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
1288 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
1289 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
1290 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1291 }
1292
1293 /**
1294 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
1295 * @adapter: the adapter
1296 * @viid: the Virtual Interface Identifier
1297 * @free: if true any existing filters for this VI id are first removed
1298 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1299 * @addr: the MAC address(es)
1300 * @idx: where to store the index of each allocated filter
1301 * @hash: pointer to hash address filter bitmap
1302 * @sleep_ok: call is allowed to sleep
1303 *
1304 * Allocates an exact-match filter for each of the supplied addresses and
1305 * sets it to the corresponding address. If @idx is not %NULL it should
1306 * have at least @naddr entries, each of which will be set to the index of
1307 * the filter allocated for the corresponding MAC address. If a filter
1308 * could not be allocated for an address its index is set to 0xffff.
1309 * If @hash is not %NULL addresses that fail to allocate an exact filter
1310 * are hashed and update the hash filter bitmap pointed at by @hash.
1311 *
1312 * Returns a negative error number or the number of filters allocated.
1313 */
1314 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1315 unsigned int naddr, const u8 **addr, u16 *idx,
1316 u64 *hash, bool sleep_ok)
1317 {
1318 int offset, ret = 0;
1319 unsigned nfilters = 0;
1320 unsigned int rem = naddr;
1321 struct fw_vi_mac_cmd cmd, rpl;
1322 unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1323
1324 if (naddr > max_naddr)
1325 return -EINVAL;
1326
1327 for (offset = 0; offset < naddr; /**/) {
1328 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1329 ? rem
1330 : ARRAY_SIZE(cmd.u.exact));
1331 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1332 u.exact[fw_naddr]), 16);
1333 struct fw_vi_mac_exact *p;
1334 int i;
1335
1336 memset(&cmd, 0, sizeof(cmd));
1337 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1338 FW_CMD_REQUEST_F |
1339 FW_CMD_WRITE_F |
1340 (free ? FW_CMD_EXEC_F : 0) |
1341 FW_VI_MAC_CMD_VIID_V(viid));
1342 cmd.freemacs_to_len16 =
1343 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
1344 FW_CMD_LEN16_V(len16));
1345
1346 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1347 p->valid_to_idx = cpu_to_be16(
1348 FW_VI_MAC_CMD_VALID_F |
1349 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
1350 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1351 }
1352
1353
1354 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1355 sleep_ok);
1356 if (ret && ret != -ENOMEM)
1357 break;
1358
1359 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1360 u16 index = FW_VI_MAC_CMD_IDX_G(
1361 be16_to_cpu(p->valid_to_idx));
1362
1363 if (idx)
1364 idx[offset+i] =
1365 (index >= max_naddr
1366 ? 0xffff
1367 : index);
1368 if (index < max_naddr)
1369 nfilters++;
1370 else if (hash)
1371 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1372 }
1373
1374 free = false;
1375 offset += fw_naddr;
1376 rem -= fw_naddr;
1377 }
1378
1379 /*
1380 * If there were no errors or we merely ran out of room in our MAC
1381 * address arena, return the number of filters actually written.
1382 */
1383 if (ret == 0 || ret == -ENOMEM)
1384 ret = nfilters;
1385 return ret;
1386 }
1387
1388 /**
1389 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
1390 * @adapter: the adapter
1391 * @viid: the VI id
1392 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1393 * @addr: the MAC address(es)
1394 * @sleep_ok: call is allowed to sleep
1395 *
1396 * Frees the exact-match filter for each of the supplied addresses
1397 *
1398 * Returns a negative error number or the number of filters freed.
1399 */
1400 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid,
1401 unsigned int naddr, const u8 **addr, bool sleep_ok)
1402 {
1403 int offset, ret = 0;
1404 struct fw_vi_mac_cmd cmd;
1405 unsigned int nfilters = 0;
1406 unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1407 unsigned int rem = naddr;
1408
1409 if (naddr > max_naddr)
1410 return -EINVAL;
1411
1412 for (offset = 0; offset < (int)naddr ; /**/) {
1413 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ?
1414 rem : ARRAY_SIZE(cmd.u.exact));
1415 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1416 u.exact[fw_naddr]), 16);
1417 struct fw_vi_mac_exact *p;
1418 int i;
1419
1420 memset(&cmd, 0, sizeof(cmd));
1421 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1422 FW_CMD_REQUEST_F |
1423 FW_CMD_WRITE_F |
1424 FW_CMD_EXEC_V(0) |
1425 FW_VI_MAC_CMD_VIID_V(viid));
1426 cmd.freemacs_to_len16 =
1427 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
1428 FW_CMD_LEN16_V(len16));
1429
1430 for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
1431 p->valid_to_idx = cpu_to_be16(
1432 FW_VI_MAC_CMD_VALID_F |
1433 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
1434 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1435 }
1436
1437 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd,
1438 sleep_ok);
1439 if (ret)
1440 break;
1441
1442 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1443 u16 index = FW_VI_MAC_CMD_IDX_G(
1444 be16_to_cpu(p->valid_to_idx));
1445
1446 if (index < max_naddr)
1447 nfilters++;
1448 }
1449
1450 offset += fw_naddr;
1451 rem -= fw_naddr;
1452 }
1453
1454 if (ret == 0)
1455 ret = nfilters;
1456 return ret;
1457 }
1458
1459 /**
1460 * t4vf_change_mac - modifies the exact-match filter for a MAC address
1461 * @adapter: the adapter
1462 * @viid: the Virtual Interface ID
1463 * @idx: index of existing filter for old value of MAC address, or -1
1464 * @addr: the new MAC address value
1465 * @persist: if idx < 0, the new MAC allocation should be persistent
1466 *
1467 * Modifies an exact-match filter and sets it to the new MAC address.
1468 * Note that in general it is not possible to modify the value of a given
1469 * filter so the generic way to modify an address filter is to free the
1470 * one being used by the old address value and allocate a new filter for
1471 * the new address value. @idx can be -1 if the address is a new
1472 * addition.
1473 *
1474 * Returns a negative error number or the index of the filter with the new
1475 * MAC value.
1476 */
1477 int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1478 int idx, const u8 *addr, bool persist)
1479 {
1480 int ret;
1481 struct fw_vi_mac_cmd cmd, rpl;
1482 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1483 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1484 u.exact[1]), 16);
1485 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
1486
1487 /*
1488 * If this is a new allocation, determine whether it should be
1489 * persistent (across a "freemacs" operation) or not.
1490 */
1491 if (idx < 0)
1492 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
1493
1494 memset(&cmd, 0, sizeof(cmd));
1495 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1496 FW_CMD_REQUEST_F |
1497 FW_CMD_WRITE_F |
1498 FW_VI_MAC_CMD_VIID_V(viid));
1499 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1500 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
1501 FW_VI_MAC_CMD_IDX_V(idx));
1502 memcpy(p->macaddr, addr, sizeof(p->macaddr));
1503
1504 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1505 if (ret == 0) {
1506 p = &rpl.u.exact[0];
1507 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
1508 if (ret >= max_mac_addr)
1509 ret = -ENOMEM;
1510 }
1511 return ret;
1512 }
1513
1514 /**
1515 * t4vf_set_addr_hash - program the MAC inexact-match hash filter
1516 * @adapter: the adapter
1517 * @viid: the Virtual Interface Identifier
1518 * @ucast: whether the hash filter should also match unicast addresses
1519 * @vec: the value to be written to the hash filter
1520 * @sleep_ok: call is allowed to sleep
1521 *
1522 * Sets the 64-bit inexact-match hash filter for a virtual interface.
1523 */
1524 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
1525 bool ucast, u64 vec, bool sleep_ok)
1526 {
1527 struct fw_vi_mac_cmd cmd;
1528 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1529 u.exact[0]), 16);
1530
1531 memset(&cmd, 0, sizeof(cmd));
1532 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1533 FW_CMD_REQUEST_F |
1534 FW_CMD_WRITE_F |
1535 FW_VI_ENABLE_CMD_VIID_V(viid));
1536 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
1537 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
1538 FW_CMD_LEN16_V(len16));
1539 cmd.u.hash.hashvec = cpu_to_be64(vec);
1540 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1541 }
1542
1543 /**
1544 * t4vf_get_port_stats - collect "port" statistics
1545 * @adapter: the adapter
1546 * @pidx: the port index
1547 * @s: the stats structure to fill
1548 *
1549 * Collect statistics for the "port"'s Virtual Interface.
1550 */
1551 int t4vf_get_port_stats(struct adapter *adapter, int pidx,
1552 struct t4vf_port_stats *s)
1553 {
1554 struct port_info *pi = adap2pinfo(adapter, pidx);
1555 struct fw_vi_stats_vf fwstats;
1556 unsigned int rem = VI_VF_NUM_STATS;
1557 __be64 *fwsp = (__be64 *)&fwstats;
1558
1559 /*
1560 * Grab the Virtual Interface statistics a chunk at a time via mailbox
1561 * commands. We could use a Work Request and get all of them at once
1562 * but that's an asynchronous interface which is awkward to use.
1563 */
1564 while (rem) {
1565 unsigned int ix = VI_VF_NUM_STATS - rem;
1566 unsigned int nstats = min(6U, rem);
1567 struct fw_vi_stats_cmd cmd, rpl;
1568 size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
1569 sizeof(struct fw_vi_stats_ctl));
1570 size_t len16 = DIV_ROUND_UP(len, 16);
1571 int ret;
1572
1573 memset(&cmd, 0, sizeof(cmd));
1574 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
1575 FW_VI_STATS_CMD_VIID_V(pi->viid) |
1576 FW_CMD_REQUEST_F |
1577 FW_CMD_READ_F);
1578 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1579 cmd.u.ctl.nstats_ix =
1580 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
1581 FW_VI_STATS_CMD_NSTATS_V(nstats));
1582 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
1583 if (ret)
1584 return ret;
1585
1586 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
1587
1588 rem -= nstats;
1589 fwsp += nstats;
1590 }
1591
1592 /*
1593 * Translate firmware statistics into host native statistics.
1594 */
1595 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
1596 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
1597 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
1598 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
1599 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
1600 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
1601 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
1602 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
1603 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
1604
1605 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
1606 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
1607 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
1608 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
1609 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
1610 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
1611
1612 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
1613
1614 return 0;
1615 }
1616
1617 /**
1618 * t4vf_iq_free - free an ingress queue and its free lists
1619 * @adapter: the adapter
1620 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
1621 * @iqid: ingress queue ID
1622 * @fl0id: FL0 queue ID or 0xffff if no attached FL0
1623 * @fl1id: FL1 queue ID or 0xffff if no attached FL1
1624 *
1625 * Frees an ingress queue and its associated free lists, if any.
1626 */
1627 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
1628 unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
1629 {
1630 struct fw_iq_cmd cmd;
1631
1632 memset(&cmd, 0, sizeof(cmd));
1633 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
1634 FW_CMD_REQUEST_F |
1635 FW_CMD_EXEC_F);
1636 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
1637 FW_LEN16(cmd));
1638 cmd.type_to_iqandstindex =
1639 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
1640
1641 cmd.iqid = cpu_to_be16(iqid);
1642 cmd.fl0id = cpu_to_be16(fl0id);
1643 cmd.fl1id = cpu_to_be16(fl1id);
1644 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1645 }
1646
1647 /**
1648 * t4vf_eth_eq_free - free an Ethernet egress queue
1649 * @adapter: the adapter
1650 * @eqid: egress queue ID
1651 *
1652 * Frees an Ethernet egress queue.
1653 */
1654 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1655 {
1656 struct fw_eq_eth_cmd cmd;
1657
1658 memset(&cmd, 0, sizeof(cmd));
1659 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
1660 FW_CMD_REQUEST_F |
1661 FW_CMD_EXEC_F);
1662 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
1663 FW_LEN16(cmd));
1664 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
1665 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1666 }
1667
1668 /**
1669 * t4vf_handle_fw_rpl - process a firmware reply message
1670 * @adapter: the adapter
1671 * @rpl: start of the firmware message
1672 *
1673 * Processes a firmware message, such as link state change messages.
1674 */
1675 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1676 {
1677 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1678 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
1679
1680 switch (opcode) {
1681 case FW_PORT_CMD: {
1682 /*
1683 * Link/module state change message.
1684 */
1685 const struct fw_port_cmd *port_cmd =
1686 (const struct fw_port_cmd *)rpl;
1687 u32 stat, mod;
1688 int action, port_id, link_ok, speed, fc, pidx;
1689
1690 /*
1691 * Extract various fields from port status change message.
1692 */
1693 action = FW_PORT_CMD_ACTION_G(
1694 be32_to_cpu(port_cmd->action_to_len16));
1695 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1696 dev_err(adapter->pdev_dev,
1697 "Unknown firmware PORT reply action %x\n",
1698 action);
1699 break;
1700 }
1701
1702 port_id = FW_PORT_CMD_PORTID_G(
1703 be32_to_cpu(port_cmd->op_to_portid));
1704
1705 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
1706 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
1707 speed = 0;
1708 fc = 0;
1709 if (stat & FW_PORT_CMD_RXPAUSE_F)
1710 fc |= PAUSE_RX;
1711 if (stat & FW_PORT_CMD_TXPAUSE_F)
1712 fc |= PAUSE_TX;
1713 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1714 speed = 100;
1715 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1716 speed = 1000;
1717 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1718 speed = 10000;
1719 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1720 speed = 40000;
1721
1722 /*
1723 * Scan all of our "ports" (Virtual Interfaces) looking for
1724 * those bound to the physical port which has changed. If
1725 * our recorded state doesn't match the current state,
1726 * signal that change to the OS code.
1727 */
1728 for_each_port(adapter, pidx) {
1729 struct port_info *pi = adap2pinfo(adapter, pidx);
1730 struct link_config *lc;
1731
1732 if (pi->port_id != port_id)
1733 continue;
1734
1735 lc = &pi->link_cfg;
1736
1737 mod = FW_PORT_CMD_MODTYPE_G(stat);
1738 if (mod != pi->mod_type) {
1739 pi->mod_type = mod;
1740 t4vf_os_portmod_changed(adapter, pidx);
1741 }
1742
1743 if (link_ok != lc->link_ok || speed != lc->speed ||
1744 fc != lc->fc) {
1745 /* something changed */
1746 lc->link_ok = link_ok;
1747 lc->speed = speed;
1748 lc->fc = fc;
1749 lc->supported =
1750 be16_to_cpu(port_cmd->u.info.pcap);
1751 lc->lp_advertising =
1752 be16_to_cpu(port_cmd->u.info.lpacap);
1753 t4vf_os_link_changed(adapter, pidx, link_ok);
1754 }
1755 }
1756 break;
1757 }
1758
1759 default:
1760 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
1761 opcode);
1762 }
1763 return 0;
1764 }
1765
1766 /**
1767 */
1768 int t4vf_prep_adapter(struct adapter *adapter)
1769 {
1770 int err;
1771 unsigned int chipid;
1772
1773 /* Wait for the device to become ready before proceeding ...
1774 */
1775 err = t4vf_wait_dev_ready(adapter);
1776 if (err)
1777 return err;
1778
1779 /* Default port and clock for debugging in case we can't reach
1780 * firmware.
1781 */
1782 adapter->params.nports = 1;
1783 adapter->params.vfres.pmask = 1;
1784 adapter->params.vpd.cclk = 50000;
1785
1786 adapter->params.chip = 0;
1787 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
1788 case CHELSIO_T4:
1789 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
1790 adapter->params.arch.sge_fl_db = DBPRIO_F;
1791 adapter->params.arch.mps_tcam_size =
1792 NUM_MPS_CLS_SRAM_L_INSTANCES;
1793 break;
1794
1795 case CHELSIO_T5:
1796 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1797 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
1798 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
1799 adapter->params.arch.mps_tcam_size =
1800 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1801 break;
1802
1803 case CHELSIO_T6:
1804 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1805 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
1806 adapter->params.arch.sge_fl_db = 0;
1807 adapter->params.arch.mps_tcam_size =
1808 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1809 break;
1810 }
1811
1812 return 0;
1813 }
1814
1815 /**
1816 * t4vf_get_vf_mac_acl - Get the MAC address to be set to
1817 * the VI of this VF.
1818 * @adapter: The adapter
1819 * @pf: The pf associated with vf
1820 * @naddr: the number of ACL MAC addresses returned in addr
1821 * @addr: Placeholder for MAC addresses
1822 *
1823 * Find the MAC address to be set to the VF's VI. The requested MAC address
1824 * is from the host OS via callback in the PF driver.
1825 */
1826 int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
1827 unsigned int *naddr, u8 *addr)
1828 {
1829 struct fw_acl_mac_cmd cmd;
1830 int ret;
1831
1832 memset(&cmd, 0, sizeof(cmd));
1833 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
1834 FW_CMD_REQUEST_F |
1835 FW_CMD_READ_F);
1836 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
1837 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
1838 if (ret)
1839 return ret;
1840
1841 if (cmd.nmac < *naddr)
1842 *naddr = cmd.nmac;
1843
1844 switch (pf) {
1845 case 3:
1846 memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
1847 break;
1848 case 2:
1849 memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2));
1850 break;
1851 case 1:
1852 memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1));
1853 break;
1854 case 0:
1855 memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0));
1856 break;
1857 }
1858
1859 return ret;
1860 }
This page took 0.071587 seconds and 5 git commands to generate.