Commit | Line | Data |
---|---|---|
16f8bd4b CL |
1 | /* |
2 | * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet | |
3 | * driver for Linux. | |
4 | * | |
5 | * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. | |
6 | * | |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | */ | |
35 | ||
16f8bd4b CL |
36 | #include <linux/pci.h> |
37 | ||
38 | #include "t4vf_common.h" | |
39 | #include "t4vf_defs.h" | |
40 | ||
41 | #include "../cxgb4/t4_regs.h" | |
f612b815 | 42 | #include "../cxgb4/t4_values.h" |
16f8bd4b CL |
43 | #include "../cxgb4/t4fw_api.h" |
44 | ||
45 | /* | |
46 | * Wait for the device to become ready (signified by our "who am I" register | |
47 | * returning a value other than all 1's). Return an error if it doesn't | |
48 | * become ready ... | |
49 | */ | |
d289f864 | 50 | int t4vf_wait_dev_ready(struct adapter *adapter) |
16f8bd4b CL |
51 | { |
52 | const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; | |
53 | const u32 notready1 = 0xffffffff; | |
54 | const u32 notready2 = 0xeeeeeeee; | |
55 | u32 val; | |
56 | ||
57 | val = t4_read_reg(adapter, whoami); | |
58 | if (val != notready1 && val != notready2) | |
59 | return 0; | |
60 | msleep(500); | |
61 | val = t4_read_reg(adapter, whoami); | |
62 | if (val != notready1 && val != notready2) | |
63 | return 0; | |
64 | else | |
65 | return -EIO; | |
66 | } | |
67 | ||
68 | /* | |
69 | * Get the reply to a mailbox command and store it in @rpl in big-endian order | |
70 | * (since the firmware data structures are specified in a big-endian layout). | |
71 | */ | |
72 | static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, | |
73 | u32 mbox_data) | |
74 | { | |
75 | for ( ; size; size -= 8, mbox_data += 8) | |
76 | *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); | |
77 | } | |
78 | ||
ae7b7576 HS |
79 | /** |
80 | * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log | |
81 | * @adapter: the adapter | |
82 | * @cmd: the Firmware Mailbox Command or Reply | |
83 | * @size: command length in bytes | |
84 | * @access: the time (ms) needed to access the Firmware Mailbox | |
85 | * @execute: the time (ms) the command spent being executed | |
16f8bd4b | 86 | */ |
ae7b7576 HS |
87 | static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd, |
88 | int size, int access, int execute) | |
16f8bd4b | 89 | { |
ae7b7576 HS |
90 | struct mbox_cmd_log *log = adapter->mbox_log; |
91 | struct mbox_cmd *entry; | |
92 | int i; | |
93 | ||
94 | entry = mbox_cmd_log_entry(log, log->cursor++); | |
95 | if (log->cursor == log->size) | |
96 | log->cursor = 0; | |
97 | ||
98 | for (i = 0; i < size / 8; i++) | |
99 | entry->cmd[i] = be64_to_cpu(cmd[i]); | |
100 | while (i < MBOX_LEN / 8) | |
101 | entry->cmd[i++] = 0; | |
102 | entry->timestamp = jiffies; | |
103 | entry->seqno = log->seqno++; | |
104 | entry->access = access; | |
105 | entry->execute = execute; | |
16f8bd4b CL |
106 | } |
107 | ||
108 | /** | |
109 | * t4vf_wr_mbox_core - send a command to FW through the mailbox | |
110 | * @adapter: the adapter | |
111 | * @cmd: the command to write | |
112 | * @size: command length in bytes | |
113 | * @rpl: where to optionally store the reply | |
114 | * @sleep_ok: if true we may sleep while awaiting command completion | |
115 | * | |
116 | * Sends the given command to FW through the mailbox and waits for the | |
117 | * FW to execute the command. If @rpl is not %NULL it is used to store | |
118 | * the FW's reply to the command. The command and its optional reply | |
119 | * are of the same length. FW can take up to 500 ms to respond. | |
120 | * @sleep_ok determines whether we may sleep while awaiting the response. | |
121 | * If sleeping is allowed we use progressive backoff otherwise we spin. | |
122 | * | |
123 | * The return value is 0 on success or a negative errno on failure. A | |
124 | * failure can happen either because we are not able to execute the | |
125 | * command or FW executes it but signals an error. In the latter case | |
126 | * the return value is the error code indicated by FW (negated). | |
127 | */ | |
128 | int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |
129 | void *rpl, bool sleep_ok) | |
130 | { | |
215faf9c | 131 | static const int delay[] = { |
16f8bd4b CL |
132 | 1, 1, 3, 5, 10, 10, 20, 50, 100 |
133 | }; | |
134 | ||
ae7b7576 | 135 | u16 access = 0, execute = 0; |
10aa3b78 | 136 | u32 v, mbox_data; |
ae7b7576 | 137 | int i, ms, delay_idx, ret; |
16f8bd4b | 138 | const __be64 *p; |
16f8bd4b | 139 | u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; |
ae7b7576 HS |
140 | u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); |
141 | __be64 cmd_rpl[MBOX_LEN / 8]; | |
b38066da | 142 | struct mbox_list entry; |
16f8bd4b | 143 | |
10aa3b78 HS |
144 | /* In T6, mailbox size is changed to 128 bytes to avoid |
145 | * invalidating the entire prefetch buffer. | |
146 | */ | |
147 | if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) | |
148 | mbox_data = T4VF_MBDATA_BASE_ADDR; | |
149 | else | |
150 | mbox_data = T6VF_MBDATA_BASE_ADDR; | |
151 | ||
16f8bd4b CL |
152 | /* |
153 | * Commands must be multiples of 16 bytes in length and may not be | |
154 | * larger than the size of the Mailbox Data register array. | |
155 | */ | |
156 | if ((size % 16) != 0 || | |
157 | size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) | |
158 | return -EINVAL; | |
159 | ||
b38066da HS |
160 | /* Queue ourselves onto the mailbox access list. When our entry is at |
161 | * the front of the list, we have rights to access the mailbox. So we | |
162 | * wait [for a while] till we're at the front [or bail out with an | |
163 | * EBUSY] ... | |
164 | */ | |
165 | spin_lock(&adapter->mbox_lock); | |
166 | list_add_tail(&entry.list, &adapter->mlist.list); | |
167 | spin_unlock(&adapter->mbox_lock); | |
168 | ||
169 | delay_idx = 0; | |
170 | ms = delay[0]; | |
171 | ||
172 | for (i = 0; ; i += ms) { | |
173 | /* If we've waited too long, return a busy indication. This | |
174 | * really ought to be based on our initial position in the | |
175 | * mailbox access list but this is a start. We very rearely | |
176 | * contend on access to the mailbox ... | |
177 | */ | |
178 | if (i > FW_CMD_MAX_TIMEOUT) { | |
179 | spin_lock(&adapter->mbox_lock); | |
180 | list_del(&entry.list); | |
181 | spin_unlock(&adapter->mbox_lock); | |
182 | ret = -EBUSY; | |
183 | t4vf_record_mbox(adapter, cmd, size, access, ret); | |
184 | return ret; | |
185 | } | |
186 | ||
187 | /* If we're at the head, break out and start the mailbox | |
188 | * protocol. | |
189 | */ | |
190 | if (list_first_entry(&adapter->mlist.list, struct mbox_list, | |
191 | list) == &entry) | |
192 | break; | |
193 | ||
194 | /* Delay for a bit before checking again ... */ | |
195 | if (sleep_ok) { | |
196 | ms = delay[delay_idx]; /* last element may repeat */ | |
197 | if (delay_idx < ARRAY_SIZE(delay) - 1) | |
198 | delay_idx++; | |
199 | msleep(ms); | |
200 | } else { | |
201 | mdelay(ms); | |
202 | } | |
203 | } | |
204 | ||
16f8bd4b CL |
205 | /* |
206 | * Loop trying to get ownership of the mailbox. Return an error | |
207 | * if we can't gain ownership. | |
208 | */ | |
89c3a86c | 209 | v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); |
16f8bd4b | 210 | for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) |
89c3a86c | 211 | v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); |
ae7b7576 | 212 | if (v != MBOX_OWNER_DRV) { |
b38066da HS |
213 | spin_lock(&adapter->mbox_lock); |
214 | list_del(&entry.list); | |
215 | spin_unlock(&adapter->mbox_lock); | |
ae7b7576 HS |
216 | ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; |
217 | t4vf_record_mbox(adapter, cmd, size, access, ret); | |
218 | return ret; | |
219 | } | |
16f8bd4b CL |
220 | |
221 | /* | |
222 | * Write the command array into the Mailbox Data register array and | |
223 | * transfer ownership of the mailbox to the firmware. | |
80ce3f67 CL |
224 | * |
225 | * For the VFs, the Mailbox Data "registers" are actually backed by | |
226 | * T4's "MA" interface rather than PL Registers (as is the case for | |
227 | * the PFs). Because these are in different coherency domains, the | |
228 | * write to the VF's PL-register-backed Mailbox Control can race in | |
229 | * front of the writes to the MA-backed VF Mailbox Data "registers". | |
230 | * So we need to do a read-back on at least one byte of the VF Mailbox | |
231 | * Data registers before doing the write to the VF Mailbox Control | |
232 | * register. | |
16f8bd4b | 233 | */ |
ae7b7576 HS |
234 | if (cmd_op != FW_VI_STATS_CMD) |
235 | t4vf_record_mbox(adapter, cmd, size, access, 0); | |
16f8bd4b CL |
236 | for (i = 0, p = cmd; i < size; i += 8) |
237 | t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); | |
80ce3f67 CL |
238 | t4_read_reg(adapter, mbox_data); /* flush write */ |
239 | ||
16f8bd4b | 240 | t4_write_reg(adapter, mbox_ctl, |
89c3a86c | 241 | MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); |
16f8bd4b CL |
242 | t4_read_reg(adapter, mbox_ctl); /* flush write */ |
243 | ||
244 | /* | |
245 | * Spin waiting for firmware to acknowledge processing our command. | |
246 | */ | |
247 | delay_idx = 0; | |
248 | ms = delay[0]; | |
249 | ||
0550769b | 250 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { |
16f8bd4b CL |
251 | if (sleep_ok) { |
252 | ms = delay[delay_idx]; | |
024e6293 | 253 | if (delay_idx < ARRAY_SIZE(delay) - 1) |
16f8bd4b CL |
254 | delay_idx++; |
255 | msleep(ms); | |
256 | } else | |
257 | mdelay(ms); | |
258 | ||
259 | /* | |
260 | * If we're the owner, see if this is the reply we wanted. | |
261 | */ | |
262 | v = t4_read_reg(adapter, mbox_ctl); | |
89c3a86c | 263 | if (MBOWNER_G(v) == MBOX_OWNER_DRV) { |
16f8bd4b CL |
264 | /* |
265 | * If the Message Valid bit isn't on, revoke ownership | |
266 | * of the mailbox and continue waiting for our reply. | |
267 | */ | |
89c3a86c | 268 | if ((v & MBMSGVALID_F) == 0) { |
16f8bd4b | 269 | t4_write_reg(adapter, mbox_ctl, |
89c3a86c | 270 | MBOWNER_V(MBOX_OWNER_NONE)); |
16f8bd4b CL |
271 | continue; |
272 | } | |
273 | ||
274 | /* | |
275 | * We now have our reply. Extract the command return | |
276 | * value, copy the reply back to our caller's buffer | |
277 | * (if specified) and revoke ownership of the mailbox. | |
278 | * We return the (negated) firmware command return | |
279 | * code (this depends on FW_SUCCESS == 0). | |
280 | */ | |
ae7b7576 | 281 | get_mbox_rpl(adapter, cmd_rpl, size, mbox_data); |
16f8bd4b CL |
282 | |
283 | /* return value in low-order little-endian word */ | |
ae7b7576 | 284 | v = be64_to_cpu(cmd_rpl[0]); |
16f8bd4b CL |
285 | |
286 | if (rpl) { | |
287 | /* request bit in high-order BE word */ | |
2ff2acf1 | 288 | WARN_ON((be32_to_cpu(*(const __be32 *)cmd) |
e2ac9628 | 289 | & FW_CMD_REQUEST_F) == 0); |
ae7b7576 | 290 | memcpy(rpl, cmd_rpl, size); |
2ff2acf1 | 291 | WARN_ON((be32_to_cpu(*(__be32 *)rpl) |
e2ac9628 | 292 | & FW_CMD_REQUEST_F) != 0); |
16f8bd4b CL |
293 | } |
294 | t4_write_reg(adapter, mbox_ctl, | |
89c3a86c | 295 | MBOWNER_V(MBOX_OWNER_NONE)); |
ae7b7576 HS |
296 | execute = i + ms; |
297 | if (cmd_op != FW_VI_STATS_CMD) | |
298 | t4vf_record_mbox(adapter, cmd_rpl, size, access, | |
299 | execute); | |
b38066da HS |
300 | spin_lock(&adapter->mbox_lock); |
301 | list_del(&entry.list); | |
302 | spin_unlock(&adapter->mbox_lock); | |
e2ac9628 | 303 | return -FW_CMD_RETVAL_G(v); |
16f8bd4b CL |
304 | } |
305 | } | |
306 | ||
ae7b7576 HS |
307 | /* We timed out. Return the error ... */ |
308 | ret = -ETIMEDOUT; | |
309 | t4vf_record_mbox(adapter, cmd, size, access, ret); | |
b38066da HS |
310 | spin_lock(&adapter->mbox_lock); |
311 | list_del(&entry.list); | |
312 | spin_unlock(&adapter->mbox_lock); | |
ae7b7576 | 313 | return ret; |
16f8bd4b CL |
314 | } |
315 | ||
5ad24def | 316 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ |
9b86a8d1 HS |
317 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ |
318 | FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ | |
319 | FW_PORT_CAP_ANEG) | |
5ad24def | 320 | |
16f8bd4b CL |
321 | /** |
322 | * init_link_config - initialize a link's SW state | |
323 | * @lc: structure holding the link state | |
324 | * @caps: link capabilities | |
325 | * | |
326 | * Initializes the SW state maintained for each link, including the link's | |
327 | * capabilities and default speed/flow-control/autonegotiation settings. | |
328 | */ | |
1dd06ae8 | 329 | static void init_link_config(struct link_config *lc, unsigned int caps) |
16f8bd4b CL |
330 | { |
331 | lc->supported = caps; | |
eb97ad99 | 332 | lc->lp_advertising = 0; |
16f8bd4b CL |
333 | lc->requested_speed = 0; |
334 | lc->speed = 0; | |
335 | lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; | |
5ad24def HS |
336 | if (lc->supported & FW_PORT_CAP_ANEG) { |
337 | lc->advertising = lc->supported & ADVERT_MASK; | |
16f8bd4b CL |
338 | lc->autoneg = AUTONEG_ENABLE; |
339 | lc->requested_fc |= PAUSE_AUTONEG; | |
340 | } else { | |
341 | lc->advertising = 0; | |
342 | lc->autoneg = AUTONEG_DISABLE; | |
343 | } | |
344 | } | |
345 | ||
346 | /** | |
347 | * t4vf_port_init - initialize port hardware/software state | |
348 | * @adapter: the adapter | |
349 | * @pidx: the adapter port index | |
350 | */ | |
d289f864 | 351 | int t4vf_port_init(struct adapter *adapter, int pidx) |
16f8bd4b CL |
352 | { |
353 | struct port_info *pi = adap2pinfo(adapter, pidx); | |
354 | struct fw_vi_cmd vi_cmd, vi_rpl; | |
355 | struct fw_port_cmd port_cmd, port_rpl; | |
356 | int v; | |
16f8bd4b CL |
357 | |
358 | /* | |
359 | * Execute a VI Read command to get our Virtual Interface information | |
360 | * like MAC address, etc. | |
361 | */ | |
362 | memset(&vi_cmd, 0, sizeof(vi_cmd)); | |
e2ac9628 HS |
363 | vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | |
364 | FW_CMD_REQUEST_F | | |
365 | FW_CMD_READ_F); | |
16f8bd4b | 366 | vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); |
2b5fb1f2 | 367 | vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); |
16f8bd4b CL |
368 | v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); |
369 | if (v) | |
370 | return v; | |
371 | ||
2b5fb1f2 HS |
372 | BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); |
373 | pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); | |
16f8bd4b CL |
374 | t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); |
375 | ||
376 | /* | |
377 | * If we don't have read access to our port information, we're done | |
378 | * now. Otherwise, execute a PORT Read command to get it ... | |
379 | */ | |
380 | if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) | |
381 | return 0; | |
382 | ||
383 | memset(&port_cmd, 0, sizeof(port_cmd)); | |
e2ac9628 HS |
384 | port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | |
385 | FW_CMD_REQUEST_F | | |
386 | FW_CMD_READ_F | | |
2b5fb1f2 | 387 | FW_PORT_CMD_PORTID_V(pi->port_id)); |
16f8bd4b | 388 | port_cmd.action_to_len16 = |
2b5fb1f2 | 389 | cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | |
16f8bd4b CL |
390 | FW_LEN16(port_cmd)); |
391 | v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); | |
392 | if (v) | |
393 | return v; | |
394 | ||
5ad24def | 395 | v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); |
fd48e639 HS |
396 | pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? |
397 | FW_PORT_CMD_MDIOADDR_G(v) : -1; | |
5ad24def HS |
398 | pi->port_type = FW_PORT_CMD_PTYPE_G(v); |
399 | pi->mod_type = FW_PORT_MOD_TYPE_NA; | |
400 | ||
401 | init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); | |
16f8bd4b CL |
402 | |
403 | return 0; | |
404 | } | |
405 | ||
e68e6133 CL |
406 | /** |
407 | * t4vf_fw_reset - issue a reset to FW | |
408 | * @adapter: the adapter | |
409 | * | |
410 | * Issues a reset command to FW. For a Physical Function this would | |
dbedd44e | 411 | * result in the Firmware resetting all of its state. For a Virtual |
e68e6133 CL |
412 | * Function this just resets the state associated with the VF. |
413 | */ | |
414 | int t4vf_fw_reset(struct adapter *adapter) | |
415 | { | |
416 | struct fw_reset_cmd cmd; | |
417 | ||
418 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
419 | cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | |
420 | FW_CMD_WRITE_F); | |
e68e6133 CL |
421 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); |
422 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | |
423 | } | |
424 | ||
16f8bd4b CL |
425 | /** |
426 | * t4vf_query_params - query FW or device parameters | |
427 | * @adapter: the adapter | |
428 | * @nparams: the number of parameters | |
429 | * @params: the parameter names | |
430 | * @vals: the parameter values | |
431 | * | |
432 | * Reads the values of firmware or device parameters. Up to 7 parameters | |
433 | * can be queried at once. | |
434 | */ | |
de5b8677 | 435 | static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, |
436 | const u32 *params, u32 *vals) | |
16f8bd4b CL |
437 | { |
438 | int i, ret; | |
439 | struct fw_params_cmd cmd, rpl; | |
440 | struct fw_params_param *p; | |
441 | size_t len16; | |
442 | ||
443 | if (nparams > 7) | |
444 | return -EINVAL; | |
445 | ||
446 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
447 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | |
448 | FW_CMD_REQUEST_F | | |
449 | FW_CMD_READ_F); | |
16f8bd4b CL |
450 | len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, |
451 | param[nparams].mnem), 16); | |
e2ac9628 | 452 | cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); |
16f8bd4b CL |
453 | for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) |
454 | p->mnem = htonl(*params++); | |
455 | ||
456 | ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
457 | if (ret == 0) | |
458 | for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) | |
459 | *vals++ = be32_to_cpu(p->val); | |
460 | return ret; | |
461 | } | |
462 | ||
463 | /** | |
464 | * t4vf_set_params - sets FW or device parameters | |
465 | * @adapter: the adapter | |
466 | * @nparams: the number of parameters | |
467 | * @params: the parameter names | |
468 | * @vals: the parameter values | |
469 | * | |
470 | * Sets the values of firmware or device parameters. Up to 7 parameters | |
471 | * can be specified at once. | |
472 | */ | |
473 | int t4vf_set_params(struct adapter *adapter, unsigned int nparams, | |
474 | const u32 *params, const u32 *vals) | |
475 | { | |
476 | int i; | |
477 | struct fw_params_cmd cmd; | |
478 | struct fw_params_param *p; | |
479 | size_t len16; | |
480 | ||
481 | if (nparams > 7) | |
482 | return -EINVAL; | |
483 | ||
484 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
485 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | |
486 | FW_CMD_REQUEST_F | | |
487 | FW_CMD_WRITE_F); | |
16f8bd4b CL |
488 | len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, |
489 | param[nparams]), 16); | |
e2ac9628 | 490 | cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); |
16f8bd4b CL |
491 | for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { |
492 | p->mnem = cpu_to_be32(*params++); | |
493 | p->val = cpu_to_be32(*vals++); | |
494 | } | |
495 | ||
496 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | |
497 | } | |
498 | ||
cb440364 HS |
499 | /** |
500 | * t4vf_fl_pkt_align - return the fl packet alignment | |
501 | * @adapter: the adapter | |
502 | * | |
503 | * T4 has a single field to specify the packing and padding boundary. | |
504 | * T5 onwards has separate fields for this and hence the alignment for | |
505 | * next packet offset is maximum of these two. And T6 changes the | |
506 | * Ingress Padding Boundary Shift, so it's all a mess and it's best | |
507 | * if we put this in low-level Common Code ... | |
508 | * | |
509 | */ | |
510 | int t4vf_fl_pkt_align(struct adapter *adapter) | |
511 | { | |
512 | u32 sge_control, sge_control2; | |
513 | unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; | |
514 | ||
515 | sge_control = adapter->params.sge.sge_control; | |
516 | ||
517 | /* T4 uses a single control field to specify both the PCIe Padding and | |
518 | * Packing Boundary. T5 introduced the ability to specify these | |
519 | * separately. The actual Ingress Packet Data alignment boundary | |
520 | * within Packed Buffer Mode is the maximum of these two | |
521 | * specifications. (Note that it makes no real practical sense to | |
522 | * have the Pading Boudary be larger than the Packing Boundary but you | |
523 | * could set the chip up that way and, in fact, legacy T4 code would | |
524 | * end doing this because it would initialize the Padding Boundary and | |
525 | * leave the Packing Boundary initialized to 0 (16 bytes).) | |
526 | * Padding Boundary values in T6 starts from 8B, | |
527 | * where as it is 32B for T4 and T5. | |
528 | */ | |
529 | if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) | |
530 | ingpad_shift = INGPADBOUNDARY_SHIFT_X; | |
531 | else | |
532 | ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; | |
533 | ||
534 | ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); | |
535 | ||
536 | fl_align = ingpadboundary; | |
537 | if (!is_t4(adapter->params.chip)) { | |
538 | /* T5 has a different interpretation of one of the PCIe Packing | |
539 | * Boundary values. | |
540 | */ | |
541 | sge_control2 = adapter->params.sge.sge_control2; | |
542 | ingpackboundary = INGPACKBOUNDARY_G(sge_control2); | |
543 | if (ingpackboundary == INGPACKBOUNDARY_16B_X) | |
544 | ingpackboundary = 16; | |
545 | else | |
546 | ingpackboundary = 1 << (ingpackboundary + | |
547 | INGPACKBOUNDARY_SHIFT_X); | |
548 | ||
549 | fl_align = max(ingpadboundary, ingpackboundary); | |
550 | } | |
551 | return fl_align; | |
552 | } | |
553 | ||
e85c9a7a | 554 | /** |
b2612722 | 555 | * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information |
e85c9a7a HS |
556 | * @adapter: the adapter |
557 | * @qid: the Queue ID | |
558 | * @qtype: the Ingress or Egress type for @qid | |
559 | * @pbar2_qoffset: BAR2 Queue Offset | |
560 | * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues | |
561 | * | |
562 | * Returns the BAR2 SGE Queue Registers information associated with the | |
563 | * indicated Absolute Queue ID. These are passed back in return value | |
564 | * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue | |
565 | * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. | |
566 | * | |
567 | * This may return an error which indicates that BAR2 SGE Queue | |
568 | * registers aren't available. If an error is not returned, then the | |
569 | * following values are returned: | |
570 | * | |
571 | * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers | |
572 | * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid | |
573 | * | |
574 | * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which | |
575 | * require the "Inferred Queue ID" ability may be used. E.g. the | |
576 | * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, | |
577 | * then these "Inferred Queue ID" register may not be used. | |
578 | */ | |
b2612722 HS |
579 | int t4vf_bar2_sge_qregs(struct adapter *adapter, |
580 | unsigned int qid, | |
581 | enum t4_bar2_qtype qtype, | |
582 | u64 *pbar2_qoffset, | |
583 | unsigned int *pbar2_qid) | |
e85c9a7a HS |
584 | { |
585 | unsigned int page_shift, page_size, qpp_shift, qpp_mask; | |
586 | u64 bar2_page_offset, bar2_qoffset; | |
587 | unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; | |
588 | ||
589 | /* T4 doesn't support BAR2 SGE Queue registers. | |
590 | */ | |
591 | if (is_t4(adapter->params.chip)) | |
592 | return -EINVAL; | |
593 | ||
594 | /* Get our SGE Page Size parameters. | |
595 | */ | |
596 | page_shift = adapter->params.sge.sge_vf_hps + 10; | |
597 | page_size = 1 << page_shift; | |
598 | ||
599 | /* Get the right Queues per Page parameters for our Queue. | |
600 | */ | |
601 | qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS | |
602 | ? adapter->params.sge.sge_vf_eq_qpp | |
603 | : adapter->params.sge.sge_vf_iq_qpp); | |
604 | qpp_mask = (1 << qpp_shift) - 1; | |
605 | ||
606 | /* Calculate the basics of the BAR2 SGE Queue register area: | |
607 | * o The BAR2 page the Queue registers will be in. | |
608 | * o The BAR2 Queue ID. | |
609 | * o The BAR2 Queue ID Offset into the BAR2 page. | |
610 | */ | |
2ff2acf1 | 611 | bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); |
e85c9a7a HS |
612 | bar2_qid = qid & qpp_mask; |
613 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; | |
614 | ||
615 | /* If the BAR2 Queue ID Offset is less than the Page Size, then the | |
616 | * hardware will infer the Absolute Queue ID simply from the writes to | |
617 | * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a | |
618 | * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply | |
619 | * write to the first BAR2 SGE Queue Area within the BAR2 Page with | |
620 | * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID | |
621 | * from the BAR2 Page and BAR2 Queue ID. | |
622 | * | |
623 | * One important censequence of this is that some BAR2 SGE registers | |
624 | * have a "Queue ID" field and we can write the BAR2 SGE Queue ID | |
625 | * there. But other registers synthesize the SGE Queue ID purely | |
626 | * from the writes to the registers -- the Write Combined Doorbell | |
627 | * Buffer is a good example. These BAR2 SGE Registers are only | |
628 | * available for those BAR2 SGE Register areas where the SGE Absolute | |
629 | * Queue ID can be inferred from simple writes. | |
630 | */ | |
631 | bar2_qoffset = bar2_page_offset; | |
632 | bar2_qinferred = (bar2_qid_offset < page_size); | |
633 | if (bar2_qinferred) { | |
634 | bar2_qoffset += bar2_qid_offset; | |
635 | bar2_qid = 0; | |
636 | } | |
637 | ||
638 | *pbar2_qoffset = bar2_qoffset; | |
639 | *pbar2_qid = bar2_qid; | |
640 | return 0; | |
641 | } | |
642 | ||
16f8bd4b CL |
643 | /** |
644 | * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters | |
645 | * @adapter: the adapter | |
646 | * | |
647 | * Retrieves various core SGE parameters in the form of hardware SGE | |
648 | * register values. The caller is responsible for decoding these as | |
649 | * needed. The SGE parameters are stored in @adapter->params.sge. | |
650 | */ | |
651 | int t4vf_get_sge_params(struct adapter *adapter) | |
652 | { | |
653 | struct sge_params *sge_params = &adapter->params.sge; | |
654 | u32 params[7], vals[7]; | |
655 | int v; | |
656 | ||
5167865a | 657 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f612b815 | 658 | FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); |
5167865a | 659 | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f612b815 | 660 | FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); |
5167865a | 661 | params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f612b815 | 662 | FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); |
5167865a | 663 | params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f612b815 | 664 | FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); |
5167865a | 665 | params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f061de42 | 666 | FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); |
5167865a | 667 | params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f061de42 | 668 | FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); |
5167865a | 669 | params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f061de42 | 670 | FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); |
16f8bd4b CL |
671 | v = t4vf_query_params(adapter, 7, params, vals); |
672 | if (v) | |
673 | return v; | |
674 | sge_params->sge_control = vals[0]; | |
675 | sge_params->sge_host_page_size = vals[1]; | |
676 | sge_params->sge_fl_buffer_size[0] = vals[2]; | |
677 | sge_params->sge_fl_buffer_size[1] = vals[3]; | |
678 | sge_params->sge_timer_value_0_and_1 = vals[4]; | |
679 | sge_params->sge_timer_value_2_and_3 = vals[5]; | |
680 | sge_params->sge_timer_value_4_and_5 = vals[6]; | |
681 | ||
ce8f407a HS |
682 | /* T4 uses a single control field to specify both the PCIe Padding and |
683 | * Packing Boundary. T5 introduced the ability to specify these | |
684 | * separately with the Padding Boundary in SGE_CONTROL and and Packing | |
685 | * Boundary in SGE_CONTROL2. So for T5 and later we need to grab | |
686 | * SGE_CONTROL in order to determine how ingress packet data will be | |
687 | * laid out in Packed Buffer Mode. Unfortunately, older versions of | |
688 | * the firmware won't let us retrieve SGE_CONTROL2 so if we get a | |
689 | * failure grabbing it we throw an error since we can't figure out the | |
690 | * right value. | |
691 | */ | |
692 | if (!is_t4(adapter->params.chip)) { | |
5167865a HS |
693 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
694 | FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); | |
ce8f407a HS |
695 | v = t4vf_query_params(adapter, 1, params, vals); |
696 | if (v != FW_SUCCESS) { | |
697 | dev_err(adapter->pdev_dev, | |
698 | "Unable to get SGE Control2; " | |
699 | "probably old firmware.\n"); | |
700 | return v; | |
701 | } | |
702 | sge_params->sge_control2 = vals[0]; | |
703 | } | |
704 | ||
5167865a | 705 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f612b815 | 706 | FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); |
5167865a | 707 | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | |
f612b815 | 708 | FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); |
50d21a66 | 709 | v = t4vf_query_params(adapter, 2, params, vals); |
16f8bd4b CL |
710 | if (v) |
711 | return v; | |
712 | sge_params->sge_ingress_rx_threshold = vals[0]; | |
50d21a66 | 713 | sge_params->sge_congestion_control = vals[1]; |
16f8bd4b | 714 | |
e0a8b34a HS |
715 | /* For T5 and later we want to use the new BAR2 Doorbells. |
716 | * Unfortunately, older firmware didn't allow the this register to be | |
717 | * read. | |
718 | */ | |
719 | if (!is_t4(adapter->params.chip)) { | |
720 | u32 whoami; | |
e85c9a7a | 721 | unsigned int pf, s_hps, s_qpp; |
e0a8b34a HS |
722 | |
723 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | | |
724 | FW_PARAMS_PARAM_XYZ_V( | |
725 | SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); | |
726 | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | | |
727 | FW_PARAMS_PARAM_XYZ_V( | |
728 | SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); | |
729 | v = t4vf_query_params(adapter, 2, params, vals); | |
730 | if (v != FW_SUCCESS) { | |
731 | dev_warn(adapter->pdev_dev, | |
732 | "Unable to get VF SGE Queues/Page; " | |
733 | "probably old firmware.\n"); | |
734 | return v; | |
735 | } | |
736 | sge_params->sge_egress_queues_per_page = vals[0]; | |
737 | sge_params->sge_ingress_queues_per_page = vals[1]; | |
738 | ||
739 | /* We need the Queues/Page for our VF. This is based on the | |
740 | * PF from which we're instantiated and is indexed in the | |
741 | * register we just read. Do it once here so other code in | |
742 | * the driver can just use it. | |
743 | */ | |
744 | whoami = t4_read_reg(adapter, | |
0d804338 | 745 | T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); |
d86bd29e HS |
746 | pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? |
747 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); | |
e85c9a7a HS |
748 | |
749 | s_hps = (HOSTPAGESIZEPF0_S + | |
750 | (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); | |
751 | sge_params->sge_vf_hps = | |
752 | ((sge_params->sge_host_page_size >> s_hps) | |
753 | & HOSTPAGESIZEPF0_M); | |
754 | ||
e0a8b34a HS |
755 | s_qpp = (QUEUESPERPAGEPF0_S + |
756 | (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); | |
757 | sge_params->sge_vf_eq_qpp = | |
758 | ((sge_params->sge_egress_queues_per_page >> s_qpp) | |
f612b815 | 759 | & QUEUESPERPAGEPF0_M); |
e0a8b34a HS |
760 | sge_params->sge_vf_iq_qpp = |
761 | ((sge_params->sge_ingress_queues_per_page >> s_qpp) | |
f612b815 | 762 | & QUEUESPERPAGEPF0_M); |
e0a8b34a HS |
763 | } |
764 | ||
16f8bd4b CL |
765 | return 0; |
766 | } | |
767 | ||
768 | /** | |
769 | * t4vf_get_vpd_params - retrieve device VPD paremeters | |
770 | * @adapter: the adapter | |
771 | * | |
772 | * Retrives various device Vital Product Data parameters. The parameters | |
773 | * are stored in @adapter->params.vpd. | |
774 | */ | |
775 | int t4vf_get_vpd_params(struct adapter *adapter) | |
776 | { | |
777 | struct vpd_params *vpd_params = &adapter->params.vpd; | |
778 | u32 params[7], vals[7]; | |
779 | int v; | |
780 | ||
5167865a HS |
781 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
782 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); | |
16f8bd4b CL |
783 | v = t4vf_query_params(adapter, 1, params, vals); |
784 | if (v) | |
785 | return v; | |
786 | vpd_params->cclk = vals[0]; | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
791 | /** | |
792 | * t4vf_get_dev_params - retrieve device paremeters | |
793 | * @adapter: the adapter | |
794 | * | |
795 | * Retrives various device parameters. The parameters are stored in | |
796 | * @adapter->params.dev. | |
797 | */ | |
798 | int t4vf_get_dev_params(struct adapter *adapter) | |
799 | { | |
800 | struct dev_params *dev_params = &adapter->params.dev; | |
801 | u32 params[7], vals[7]; | |
802 | int v; | |
803 | ||
5167865a HS |
804 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
805 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); | |
806 | params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | | |
807 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); | |
16f8bd4b CL |
808 | v = t4vf_query_params(adapter, 2, params, vals); |
809 | if (v) | |
810 | return v; | |
811 | dev_params->fwrev = vals[0]; | |
812 | dev_params->tprev = vals[1]; | |
813 | ||
814 | return 0; | |
815 | } | |
816 | ||
817 | /** | |
818 | * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration | |
819 | * @adapter: the adapter | |
820 | * | |
821 | * Retrieves global RSS mode and parameters with which we have to live | |
822 | * and stores them in the @adapter's RSS parameters. | |
823 | */ | |
824 | int t4vf_get_rss_glb_config(struct adapter *adapter) | |
825 | { | |
826 | struct rss_params *rss = &adapter->params.rss; | |
827 | struct fw_rss_glb_config_cmd cmd, rpl; | |
828 | int v; | |
829 | ||
830 | /* | |
831 | * Execute an RSS Global Configuration read command to retrieve | |
832 | * our RSS configuration. | |
833 | */ | |
834 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
835 | cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | |
836 | FW_CMD_REQUEST_F | | |
837 | FW_CMD_READ_F); | |
16f8bd4b CL |
838 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); |
839 | v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
840 | if (v) | |
841 | return v; | |
842 | ||
843 | /* | |
844 | * Transate the big-endian RSS Global Configuration into our | |
845 | * cpu-endian format based on the RSS mode. We also do first level | |
846 | * filtering at this point to weed out modes which don't support | |
847 | * VF Drivers ... | |
848 | */ | |
b2e1a3f0 | 849 | rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( |
16f8bd4b CL |
850 | be32_to_cpu(rpl.u.manual.mode_pkd)); |
851 | switch (rss->mode) { | |
852 | case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { | |
853 | u32 word = be32_to_cpu( | |
854 | rpl.u.basicvirtual.synmapen_to_hashtoeplitz); | |
855 | ||
856 | rss->u.basicvirtual.synmapen = | |
b2e1a3f0 | 857 | ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); |
16f8bd4b | 858 | rss->u.basicvirtual.syn4tupenipv6 = |
b2e1a3f0 | 859 | ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); |
16f8bd4b | 860 | rss->u.basicvirtual.syn2tupenipv6 = |
b2e1a3f0 | 861 | ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); |
16f8bd4b | 862 | rss->u.basicvirtual.syn4tupenipv4 = |
b2e1a3f0 | 863 | ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); |
16f8bd4b | 864 | rss->u.basicvirtual.syn2tupenipv4 = |
b2e1a3f0 | 865 | ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); |
16f8bd4b CL |
866 | |
867 | rss->u.basicvirtual.ofdmapen = | |
b2e1a3f0 | 868 | ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); |
16f8bd4b CL |
869 | |
870 | rss->u.basicvirtual.tnlmapen = | |
b2e1a3f0 | 871 | ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); |
16f8bd4b | 872 | rss->u.basicvirtual.tnlalllookup = |
b2e1a3f0 | 873 | ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); |
16f8bd4b CL |
874 | |
875 | rss->u.basicvirtual.hashtoeplitz = | |
b2e1a3f0 | 876 | ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); |
16f8bd4b CL |
877 | |
878 | /* we need at least Tunnel Map Enable to be set */ | |
879 | if (!rss->u.basicvirtual.tnlmapen) | |
880 | return -EINVAL; | |
881 | break; | |
882 | } | |
883 | ||
884 | default: | |
885 | /* all unknown/unsupported RSS modes result in an error */ | |
886 | return -EINVAL; | |
887 | } | |
888 | ||
889 | return 0; | |
890 | } | |
891 | ||
892 | /** | |
893 | * t4vf_get_vfres - retrieve VF resource limits | |
894 | * @adapter: the adapter | |
895 | * | |
896 | * Retrieves configured resource limits and capabilities for a virtual | |
897 | * function. The results are stored in @adapter->vfres. | |
898 | */ | |
899 | int t4vf_get_vfres(struct adapter *adapter) | |
900 | { | |
901 | struct vf_resources *vfres = &adapter->params.vfres; | |
902 | struct fw_pfvf_cmd cmd, rpl; | |
903 | int v; | |
904 | u32 word; | |
905 | ||
906 | /* | |
907 | * Execute PFVF Read command to get VF resource limits; bail out early | |
908 | * with error on command failure. | |
909 | */ | |
910 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
911 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | |
912 | FW_CMD_REQUEST_F | | |
913 | FW_CMD_READ_F); | |
16f8bd4b CL |
914 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); |
915 | v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
916 | if (v) | |
917 | return v; | |
918 | ||
919 | /* | |
920 | * Extract VF resource limits and return success. | |
921 | */ | |
922 | word = be32_to_cpu(rpl.niqflint_niq); | |
5167865a HS |
923 | vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); |
924 | vfres->niq = FW_PFVF_CMD_NIQ_G(word); | |
16f8bd4b CL |
925 | |
926 | word = be32_to_cpu(rpl.type_to_neq); | |
5167865a HS |
927 | vfres->neq = FW_PFVF_CMD_NEQ_G(word); |
928 | vfres->pmask = FW_PFVF_CMD_PMASK_G(word); | |
16f8bd4b CL |
929 | |
930 | word = be32_to_cpu(rpl.tc_to_nexactf); | |
5167865a HS |
931 | vfres->tc = FW_PFVF_CMD_TC_G(word); |
932 | vfres->nvi = FW_PFVF_CMD_NVI_G(word); | |
933 | vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); | |
16f8bd4b CL |
934 | |
935 | word = be32_to_cpu(rpl.r_caps_to_nethctrl); | |
5167865a HS |
936 | vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); |
937 | vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); | |
938 | vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); | |
16f8bd4b CL |
939 | |
940 | return 0; | |
941 | } | |
942 | ||
943 | /** | |
944 | * t4vf_read_rss_vi_config - read a VI's RSS configuration | |
945 | * @adapter: the adapter | |
946 | * @viid: Virtual Interface ID | |
947 | * @config: pointer to host-native VI RSS Configuration buffer | |
948 | * | |
949 | * Reads the Virtual Interface's RSS configuration information and | |
950 | * translates it into CPU-native format. | |
951 | */ | |
952 | int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, | |
953 | union rss_vi_config *config) | |
954 | { | |
955 | struct fw_rss_vi_config_cmd cmd, rpl; | |
956 | int v; | |
957 | ||
958 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
959 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | |
960 | FW_CMD_REQUEST_F | | |
961 | FW_CMD_READ_F | | |
16f8bd4b CL |
962 | FW_RSS_VI_CONFIG_CMD_VIID(viid)); |
963 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); | |
964 | v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
965 | if (v) | |
966 | return v; | |
967 | ||
968 | switch (adapter->params.rss.mode) { | |
969 | case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { | |
970 | u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); | |
971 | ||
972 | config->basicvirtual.ip6fourtupen = | |
b2e1a3f0 | 973 | ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); |
16f8bd4b | 974 | config->basicvirtual.ip6twotupen = |
b2e1a3f0 | 975 | ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); |
16f8bd4b | 976 | config->basicvirtual.ip4fourtupen = |
b2e1a3f0 | 977 | ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); |
16f8bd4b | 978 | config->basicvirtual.ip4twotupen = |
b2e1a3f0 | 979 | ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); |
16f8bd4b | 980 | config->basicvirtual.udpen = |
b2e1a3f0 | 981 | ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); |
16f8bd4b | 982 | config->basicvirtual.defaultq = |
b2e1a3f0 | 983 | FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); |
16f8bd4b CL |
984 | break; |
985 | } | |
986 | ||
987 | default: | |
988 | return -EINVAL; | |
989 | } | |
990 | ||
991 | return 0; | |
992 | } | |
993 | ||
994 | /** | |
995 | * t4vf_write_rss_vi_config - write a VI's RSS configuration | |
996 | * @adapter: the adapter | |
997 | * @viid: Virtual Interface ID | |
998 | * @config: pointer to host-native VI RSS Configuration buffer | |
999 | * | |
1000 | * Write the Virtual Interface's RSS configuration information | |
1001 | * (translating it into firmware-native format before writing). | |
1002 | */ | |
1003 | int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, | |
1004 | union rss_vi_config *config) | |
1005 | { | |
1006 | struct fw_rss_vi_config_cmd cmd, rpl; | |
1007 | ||
1008 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1009 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | |
1010 | FW_CMD_REQUEST_F | | |
1011 | FW_CMD_WRITE_F | | |
16f8bd4b CL |
1012 | FW_RSS_VI_CONFIG_CMD_VIID(viid)); |
1013 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); | |
1014 | switch (adapter->params.rss.mode) { | |
1015 | case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { | |
1016 | u32 word = 0; | |
1017 | ||
1018 | if (config->basicvirtual.ip6fourtupen) | |
b2e1a3f0 | 1019 | word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; |
16f8bd4b | 1020 | if (config->basicvirtual.ip6twotupen) |
b2e1a3f0 | 1021 | word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; |
16f8bd4b | 1022 | if (config->basicvirtual.ip4fourtupen) |
b2e1a3f0 | 1023 | word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; |
16f8bd4b | 1024 | if (config->basicvirtual.ip4twotupen) |
b2e1a3f0 | 1025 | word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; |
16f8bd4b | 1026 | if (config->basicvirtual.udpen) |
b2e1a3f0 HS |
1027 | word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; |
1028 | word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( | |
16f8bd4b CL |
1029 | config->basicvirtual.defaultq); |
1030 | cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); | |
1031 | break; | |
1032 | } | |
1033 | ||
1034 | default: | |
1035 | return -EINVAL; | |
1036 | } | |
1037 | ||
1038 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
1039 | } | |
1040 | ||
1041 | /** | |
1042 | * t4vf_config_rss_range - configure a portion of the RSS mapping table | |
1043 | * @adapter: the adapter | |
1044 | * @viid: Virtual Interface of RSS Table Slice | |
1045 | * @start: starting entry in the table to write | |
1046 | * @n: how many table entries to write | |
1047 | * @rspq: values for the "Response Queue" (Ingress Queue) lookup table | |
1048 | * @nrspq: number of values in @rspq | |
1049 | * | |
1050 | * Programs the selected part of the VI's RSS mapping table with the | |
1051 | * provided values. If @nrspq < @n the supplied values are used repeatedly | |
1052 | * until the full table range is populated. | |
1053 | * | |
1054 | * The caller must ensure the values in @rspq are in the range 0..1023. | |
1055 | */ | |
1056 | int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, | |
1057 | int start, int n, const u16 *rspq, int nrspq) | |
1058 | { | |
1059 | const u16 *rsp = rspq; | |
1060 | const u16 *rsp_end = rspq+nrspq; | |
1061 | struct fw_rss_ind_tbl_cmd cmd; | |
1062 | ||
1063 | /* | |
1064 | * Initialize firmware command template to write the RSS table. | |
1065 | */ | |
1066 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1067 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | |
1068 | FW_CMD_REQUEST_F | | |
1069 | FW_CMD_WRITE_F | | |
b2e1a3f0 | 1070 | FW_RSS_IND_TBL_CMD_VIID_V(viid)); |
16f8bd4b CL |
1071 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); |
1072 | ||
1073 | /* | |
1074 | * Each firmware RSS command can accommodate up to 32 RSS Ingress | |
1075 | * Queue Identifiers. These Ingress Queue IDs are packed three to | |
1076 | * a 32-bit word as 10-bit values with the upper remaining 2 bits | |
1077 | * reserved. | |
1078 | */ | |
1079 | while (n > 0) { | |
1080 | __be32 *qp = &cmd.iq0_to_iq2; | |
1081 | int nq = min(n, 32); | |
1082 | int ret; | |
1083 | ||
1084 | /* | |
1085 | * Set up the firmware RSS command header to send the next | |
1086 | * "nq" Ingress Queue IDs to the firmware. | |
1087 | */ | |
1088 | cmd.niqid = cpu_to_be16(nq); | |
1089 | cmd.startidx = cpu_to_be16(start); | |
1090 | ||
1091 | /* | |
1092 | * "nq" more done for the start of the next loop. | |
1093 | */ | |
1094 | start += nq; | |
1095 | n -= nq; | |
1096 | ||
1097 | /* | |
1098 | * While there are still Ingress Queue IDs to stuff into the | |
1099 | * current firmware RSS command, retrieve them from the | |
1100 | * Ingress Queue ID array and insert them into the command. | |
1101 | */ | |
1102 | while (nq > 0) { | |
1103 | /* | |
1104 | * Grab up to the next 3 Ingress Queue IDs (wrapping | |
1105 | * around the Ingress Queue ID array if necessary) and | |
1106 | * insert them into the firmware RSS command at the | |
1107 | * current 3-tuple position within the commad. | |
1108 | */ | |
1109 | u16 qbuf[3]; | |
1110 | u16 *qbp = qbuf; | |
1111 | int nqbuf = min(3, nq); | |
1112 | ||
1113 | nq -= nqbuf; | |
1114 | qbuf[0] = qbuf[1] = qbuf[2] = 0; | |
1115 | while (nqbuf) { | |
1116 | nqbuf--; | |
1117 | *qbp++ = *rsp++; | |
1118 | if (rsp >= rsp_end) | |
1119 | rsp = rspq; | |
1120 | } | |
b2e1a3f0 HS |
1121 | *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | |
1122 | FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | | |
1123 | FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); | |
16f8bd4b CL |
1124 | } |
1125 | ||
1126 | /* | |
1127 | * Send this portion of the RRS table update to the firmware; | |
1128 | * bail out on any errors. | |
1129 | */ | |
1130 | ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | |
1131 | if (ret) | |
1132 | return ret; | |
1133 | } | |
1134 | return 0; | |
1135 | } | |
1136 | ||
1137 | /** | |
1138 | * t4vf_alloc_vi - allocate a virtual interface on a port | |
1139 | * @adapter: the adapter | |
1140 | * @port_id: physical port associated with the VI | |
1141 | * | |
1142 | * Allocate a new Virtual Interface and bind it to the indicated | |
1143 | * physical port. Return the new Virtual Interface Identifier on | |
1144 | * success, or a [negative] error number on failure. | |
1145 | */ | |
1146 | int t4vf_alloc_vi(struct adapter *adapter, int port_id) | |
1147 | { | |
1148 | struct fw_vi_cmd cmd, rpl; | |
1149 | int v; | |
1150 | ||
1151 | /* | |
1152 | * Execute a VI command to allocate Virtual Interface and return its | |
1153 | * VIID. | |
1154 | */ | |
1155 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1156 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | |
1157 | FW_CMD_REQUEST_F | | |
1158 | FW_CMD_WRITE_F | | |
1159 | FW_CMD_EXEC_F); | |
16f8bd4b | 1160 | cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | |
2b5fb1f2 HS |
1161 | FW_VI_CMD_ALLOC_F); |
1162 | cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); | |
16f8bd4b CL |
1163 | v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); |
1164 | if (v) | |
1165 | return v; | |
1166 | ||
2b5fb1f2 | 1167 | return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); |
16f8bd4b CL |
1168 | } |
1169 | ||
1170 | /** | |
1171 | * t4vf_free_vi -- free a virtual interface | |
1172 | * @adapter: the adapter | |
1173 | * @viid: the virtual interface identifier | |
1174 | * | |
1175 | * Free a previously allocated Virtual Interface. Return an error on | |
1176 | * failure. | |
1177 | */ | |
1178 | int t4vf_free_vi(struct adapter *adapter, int viid) | |
1179 | { | |
1180 | struct fw_vi_cmd cmd; | |
1181 | ||
1182 | /* | |
1183 | * Execute a VI command to free the Virtual Interface. | |
1184 | */ | |
1185 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1186 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | |
1187 | FW_CMD_REQUEST_F | | |
1188 | FW_CMD_EXEC_F); | |
16f8bd4b | 1189 | cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | |
2b5fb1f2 HS |
1190 | FW_VI_CMD_FREE_F); |
1191 | cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); | |
16f8bd4b CL |
1192 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); |
1193 | } | |
1194 | ||
1195 | /** | |
1196 | * t4vf_enable_vi - enable/disable a virtual interface | |
1197 | * @adapter: the adapter | |
1198 | * @viid: the Virtual Interface ID | |
1199 | * @rx_en: 1=enable Rx, 0=disable Rx | |
1200 | * @tx_en: 1=enable Tx, 0=disable Tx | |
1201 | * | |
1202 | * Enables/disables a virtual interface. | |
1203 | */ | |
1204 | int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, | |
1205 | bool rx_en, bool tx_en) | |
1206 | { | |
1207 | struct fw_vi_enable_cmd cmd; | |
1208 | ||
1209 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1210 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | |
1211 | FW_CMD_REQUEST_F | | |
1212 | FW_CMD_EXEC_F | | |
2b5fb1f2 HS |
1213 | FW_VI_ENABLE_CMD_VIID_V(viid)); |
1214 | cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | | |
1215 | FW_VI_ENABLE_CMD_EEN_V(tx_en) | | |
16f8bd4b CL |
1216 | FW_LEN16(cmd)); |
1217 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | |
1218 | } | |
1219 | ||
1220 | /** | |
1221 | * t4vf_identify_port - identify a VI's port by blinking its LED | |
1222 | * @adapter: the adapter | |
1223 | * @viid: the Virtual Interface ID | |
1224 | * @nblinks: how many times to blink LED at 2.5 Hz | |
1225 | * | |
1226 | * Identifies a VI's port by blinking its LED. | |
1227 | */ | |
1228 | int t4vf_identify_port(struct adapter *adapter, unsigned int viid, | |
1229 | unsigned int nblinks) | |
1230 | { | |
1231 | struct fw_vi_enable_cmd cmd; | |
1232 | ||
1233 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1234 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | |
1235 | FW_CMD_REQUEST_F | | |
1236 | FW_CMD_EXEC_F | | |
2b5fb1f2 HS |
1237 | FW_VI_ENABLE_CMD_VIID_V(viid)); |
1238 | cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | | |
16f8bd4b CL |
1239 | FW_LEN16(cmd)); |
1240 | cmd.blinkdur = cpu_to_be16(nblinks); | |
1241 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | |
1242 | } | |
1243 | ||
1244 | /** | |
1245 | * t4vf_set_rxmode - set Rx properties of a virtual interface | |
1246 | * @adapter: the adapter | |
1247 | * @viid: the VI id | |
1248 | * @mtu: the new MTU or -1 for no change | |
1249 | * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change | |
1250 | * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change | |
1251 | * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change | |
1252 | * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, | |
1253 | * -1 no change | |
1254 | * | |
1255 | * Sets Rx properties of a virtual interface. | |
1256 | */ | |
1257 | int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, | |
1258 | int mtu, int promisc, int all_multi, int bcast, int vlanex, | |
1259 | bool sleep_ok) | |
1260 | { | |
1261 | struct fw_vi_rxmode_cmd cmd; | |
1262 | ||
1263 | /* convert to FW values */ | |
1264 | if (mtu < 0) | |
2b5fb1f2 | 1265 | mtu = FW_VI_RXMODE_CMD_MTU_M; |
16f8bd4b | 1266 | if (promisc < 0) |
2b5fb1f2 | 1267 | promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; |
16f8bd4b | 1268 | if (all_multi < 0) |
2b5fb1f2 | 1269 | all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; |
16f8bd4b | 1270 | if (bcast < 0) |
2b5fb1f2 | 1271 | bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; |
16f8bd4b | 1272 | if (vlanex < 0) |
2b5fb1f2 | 1273 | vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; |
16f8bd4b CL |
1274 | |
1275 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1276 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | |
1277 | FW_CMD_REQUEST_F | | |
1278 | FW_CMD_WRITE_F | | |
2b5fb1f2 | 1279 | FW_VI_RXMODE_CMD_VIID_V(viid)); |
16f8bd4b CL |
1280 | cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); |
1281 | cmd.mtu_to_vlanexen = | |
2b5fb1f2 HS |
1282 | cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | |
1283 | FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | | |
1284 | FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | | |
1285 | FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | | |
1286 | FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); | |
16f8bd4b CL |
1287 | return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); |
1288 | } | |
1289 | ||
1290 | /** | |
1291 | * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses | |
1292 | * @adapter: the adapter | |
1293 | * @viid: the Virtual Interface Identifier | |
1294 | * @free: if true any existing filters for this VI id are first removed | |
1295 | * @naddr: the number of MAC addresses to allocate filters for (up to 7) | |
1296 | * @addr: the MAC address(es) | |
1297 | * @idx: where to store the index of each allocated filter | |
1298 | * @hash: pointer to hash address filter bitmap | |
1299 | * @sleep_ok: call is allowed to sleep | |
1300 | * | |
1301 | * Allocates an exact-match filter for each of the supplied addresses and | |
1302 | * sets it to the corresponding address. If @idx is not %NULL it should | |
1303 | * have at least @naddr entries, each of which will be set to the index of | |
1304 | * the filter allocated for the corresponding MAC address. If a filter | |
1305 | * could not be allocated for an address its index is set to 0xffff. | |
1306 | * If @hash is not %NULL addresses that fail to allocate an exact filter | |
1307 | * are hashed and update the hash filter bitmap pointed at by @hash. | |
1308 | * | |
1309 | * Returns a negative error number or the number of filters allocated. | |
1310 | */ | |
1311 | int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, | |
1312 | unsigned int naddr, const u8 **addr, u16 *idx, | |
1313 | u64 *hash, bool sleep_ok) | |
1314 | { | |
42eb59d3 CL |
1315 | int offset, ret = 0; |
1316 | unsigned nfilters = 0; | |
1317 | unsigned int rem = naddr; | |
16f8bd4b | 1318 | struct fw_vi_mac_cmd cmd, rpl; |
41fc2e41 | 1319 | unsigned int max_naddr = adapter->params.arch.mps_tcam_size; |
16f8bd4b | 1320 | |
622c62b5 | 1321 | if (naddr > max_naddr) |
16f8bd4b | 1322 | return -EINVAL; |
16f8bd4b | 1323 | |
42eb59d3 CL |
1324 | for (offset = 0; offset < naddr; /**/) { |
1325 | unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) | |
1326 | ? rem | |
1327 | : ARRAY_SIZE(cmd.u.exact)); | |
1328 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, | |
1329 | u.exact[fw_naddr]), 16); | |
1330 | struct fw_vi_mac_exact *p; | |
1331 | int i; | |
1332 | ||
1333 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1334 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | |
1335 | FW_CMD_REQUEST_F | | |
1336 | FW_CMD_WRITE_F | | |
1337 | (free ? FW_CMD_EXEC_F : 0) | | |
2b5fb1f2 | 1338 | FW_VI_MAC_CMD_VIID_V(viid)); |
42eb59d3 | 1339 | cmd.freemacs_to_len16 = |
2b5fb1f2 | 1340 | cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | |
e2ac9628 | 1341 | FW_CMD_LEN16_V(len16)); |
42eb59d3 CL |
1342 | |
1343 | for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { | |
1344 | p->valid_to_idx = cpu_to_be16( | |
2b5fb1f2 HS |
1345 | FW_VI_MAC_CMD_VALID_F | |
1346 | FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); | |
42eb59d3 CL |
1347 | memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); |
1348 | } | |
16f8bd4b | 1349 | |
42eb59d3 CL |
1350 | |
1351 | ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, | |
1352 | sleep_ok); | |
1353 | if (ret && ret != -ENOMEM) | |
1354 | break; | |
1355 | ||
1356 | for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { | |
2b5fb1f2 | 1357 | u16 index = FW_VI_MAC_CMD_IDX_G( |
42eb59d3 CL |
1358 | be16_to_cpu(p->valid_to_idx)); |
1359 | ||
1360 | if (idx) | |
1361 | idx[offset+i] = | |
622c62b5 | 1362 | (index >= max_naddr |
42eb59d3 CL |
1363 | ? 0xffff |
1364 | : index); | |
622c62b5 | 1365 | if (index < max_naddr) |
42eb59d3 CL |
1366 | nfilters++; |
1367 | else if (hash) | |
1368 | *hash |= (1ULL << hash_mac_addr(addr[offset+i])); | |
1369 | } | |
1370 | ||
1371 | free = false; | |
1372 | offset += fw_naddr; | |
1373 | rem -= fw_naddr; | |
16f8bd4b | 1374 | } |
42eb59d3 CL |
1375 | |
1376 | /* | |
1377 | * If there were no errors or we merely ran out of room in our MAC | |
1378 | * address arena, return the number of filters actually written. | |
1379 | */ | |
1380 | if (ret == 0 || ret == -ENOMEM) | |
1381 | ret = nfilters; | |
16f8bd4b CL |
1382 | return ret; |
1383 | } | |
1384 | ||
fe5d2709 HS |
1385 | /** |
1386 | * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses | |
1387 | * @adapter: the adapter | |
1388 | * @viid: the VI id | |
1389 | * @naddr: the number of MAC addresses to allocate filters for (up to 7) | |
1390 | * @addr: the MAC address(es) | |
1391 | * @sleep_ok: call is allowed to sleep | |
1392 | * | |
1393 | * Frees the exact-match filter for each of the supplied addresses | |
1394 | * | |
1395 | * Returns a negative error number or the number of filters freed. | |
1396 | */ | |
1397 | int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, | |
1398 | unsigned int naddr, const u8 **addr, bool sleep_ok) | |
1399 | { | |
1400 | int offset, ret = 0; | |
1401 | struct fw_vi_mac_cmd cmd; | |
1402 | unsigned int nfilters = 0; | |
1403 | unsigned int max_naddr = adapter->params.arch.mps_tcam_size; | |
1404 | unsigned int rem = naddr; | |
1405 | ||
1406 | if (naddr > max_naddr) | |
1407 | return -EINVAL; | |
1408 | ||
1409 | for (offset = 0; offset < (int)naddr ; /**/) { | |
1410 | unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? | |
1411 | rem : ARRAY_SIZE(cmd.u.exact)); | |
1412 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, | |
1413 | u.exact[fw_naddr]), 16); | |
1414 | struct fw_vi_mac_exact *p; | |
1415 | int i; | |
1416 | ||
1417 | memset(&cmd, 0, sizeof(cmd)); | |
1418 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | | |
1419 | FW_CMD_REQUEST_F | | |
1420 | FW_CMD_WRITE_F | | |
1421 | FW_CMD_EXEC_V(0) | | |
1422 | FW_VI_MAC_CMD_VIID_V(viid)); | |
1423 | cmd.freemacs_to_len16 = | |
1424 | cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | | |
1425 | FW_CMD_LEN16_V(len16)); | |
1426 | ||
1427 | for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { | |
1428 | p->valid_to_idx = cpu_to_be16( | |
1429 | FW_VI_MAC_CMD_VALID_F | | |
1430 | FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); | |
1431 | memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); | |
1432 | } | |
1433 | ||
1434 | ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, | |
1435 | sleep_ok); | |
1436 | if (ret) | |
1437 | break; | |
1438 | ||
1439 | for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { | |
1440 | u16 index = FW_VI_MAC_CMD_IDX_G( | |
1441 | be16_to_cpu(p->valid_to_idx)); | |
1442 | ||
1443 | if (index < max_naddr) | |
1444 | nfilters++; | |
1445 | } | |
1446 | ||
1447 | offset += fw_naddr; | |
1448 | rem -= fw_naddr; | |
1449 | } | |
1450 | ||
1451 | if (ret == 0) | |
1452 | ret = nfilters; | |
1453 | return ret; | |
1454 | } | |
1455 | ||
16f8bd4b CL |
1456 | /** |
1457 | * t4vf_change_mac - modifies the exact-match filter for a MAC address | |
1458 | * @adapter: the adapter | |
1459 | * @viid: the Virtual Interface ID | |
1460 | * @idx: index of existing filter for old value of MAC address, or -1 | |
1461 | * @addr: the new MAC address value | |
1462 | * @persist: if idx < 0, the new MAC allocation should be persistent | |
1463 | * | |
1464 | * Modifies an exact-match filter and sets it to the new MAC address. | |
1465 | * Note that in general it is not possible to modify the value of a given | |
1466 | * filter so the generic way to modify an address filter is to free the | |
1467 | * one being used by the old address value and allocate a new filter for | |
1468 | * the new address value. @idx can be -1 if the address is a new | |
1469 | * addition. | |
1470 | * | |
1471 | * Returns a negative error number or the index of the filter with the new | |
1472 | * MAC value. | |
1473 | */ | |
1474 | int t4vf_change_mac(struct adapter *adapter, unsigned int viid, | |
1475 | int idx, const u8 *addr, bool persist) | |
1476 | { | |
1477 | int ret; | |
1478 | struct fw_vi_mac_cmd cmd, rpl; | |
1479 | struct fw_vi_mac_exact *p = &cmd.u.exact[0]; | |
1480 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, | |
1481 | u.exact[1]), 16); | |
41fc2e41 | 1482 | unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; |
16f8bd4b CL |
1483 | |
1484 | /* | |
1485 | * If this is a new allocation, determine whether it should be | |
1486 | * persistent (across a "freemacs" operation) or not. | |
1487 | */ | |
1488 | if (idx < 0) | |
1489 | idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; | |
1490 | ||
1491 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1492 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | |
1493 | FW_CMD_REQUEST_F | | |
1494 | FW_CMD_WRITE_F | | |
2b5fb1f2 | 1495 | FW_VI_MAC_CMD_VIID_V(viid)); |
e2ac9628 | 1496 | cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); |
2b5fb1f2 HS |
1497 | p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | |
1498 | FW_VI_MAC_CMD_IDX_V(idx)); | |
16f8bd4b CL |
1499 | memcpy(p->macaddr, addr, sizeof(p->macaddr)); |
1500 | ||
1501 | ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
1502 | if (ret == 0) { | |
1503 | p = &rpl.u.exact[0]; | |
2b5fb1f2 | 1504 | ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); |
41fc2e41 | 1505 | if (ret >= max_mac_addr) |
16f8bd4b CL |
1506 | ret = -ENOMEM; |
1507 | } | |
1508 | return ret; | |
1509 | } | |
1510 | ||
1511 | /** | |
1512 | * t4vf_set_addr_hash - program the MAC inexact-match hash filter | |
1513 | * @adapter: the adapter | |
1514 | * @viid: the Virtual Interface Identifier | |
1515 | * @ucast: whether the hash filter should also match unicast addresses | |
1516 | * @vec: the value to be written to the hash filter | |
1517 | * @sleep_ok: call is allowed to sleep | |
1518 | * | |
1519 | * Sets the 64-bit inexact-match hash filter for a virtual interface. | |
1520 | */ | |
1521 | int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, | |
1522 | bool ucast, u64 vec, bool sleep_ok) | |
1523 | { | |
1524 | struct fw_vi_mac_cmd cmd; | |
1525 | size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, | |
1526 | u.exact[0]), 16); | |
1527 | ||
1528 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1529 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | |
1530 | FW_CMD_REQUEST_F | | |
1531 | FW_CMD_WRITE_F | | |
2b5fb1f2 HS |
1532 | FW_VI_ENABLE_CMD_VIID_V(viid)); |
1533 | cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | | |
1534 | FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | | |
e2ac9628 | 1535 | FW_CMD_LEN16_V(len16)); |
16f8bd4b CL |
1536 | cmd.u.hash.hashvec = cpu_to_be64(vec); |
1537 | return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); | |
1538 | } | |
1539 | ||
1540 | /** | |
1541 | * t4vf_get_port_stats - collect "port" statistics | |
1542 | * @adapter: the adapter | |
1543 | * @pidx: the port index | |
1544 | * @s: the stats structure to fill | |
1545 | * | |
1546 | * Collect statistics for the "port"'s Virtual Interface. | |
1547 | */ | |
1548 | int t4vf_get_port_stats(struct adapter *adapter, int pidx, | |
1549 | struct t4vf_port_stats *s) | |
1550 | { | |
1551 | struct port_info *pi = adap2pinfo(adapter, pidx); | |
1552 | struct fw_vi_stats_vf fwstats; | |
1553 | unsigned int rem = VI_VF_NUM_STATS; | |
1554 | __be64 *fwsp = (__be64 *)&fwstats; | |
1555 | ||
1556 | /* | |
1557 | * Grab the Virtual Interface statistics a chunk at a time via mailbox | |
1558 | * commands. We could use a Work Request and get all of them at once | |
1559 | * but that's an asynchronous interface which is awkward to use. | |
1560 | */ | |
1561 | while (rem) { | |
1562 | unsigned int ix = VI_VF_NUM_STATS - rem; | |
1563 | unsigned int nstats = min(6U, rem); | |
1564 | struct fw_vi_stats_cmd cmd, rpl; | |
1565 | size_t len = (offsetof(struct fw_vi_stats_cmd, u) + | |
1566 | sizeof(struct fw_vi_stats_ctl)); | |
1567 | size_t len16 = DIV_ROUND_UP(len, 16); | |
1568 | int ret; | |
1569 | ||
1570 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 | 1571 | cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | |
2b5fb1f2 | 1572 | FW_VI_STATS_CMD_VIID_V(pi->viid) | |
e2ac9628 HS |
1573 | FW_CMD_REQUEST_F | |
1574 | FW_CMD_READ_F); | |
1575 | cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); | |
16f8bd4b | 1576 | cmd.u.ctl.nstats_ix = |
2b5fb1f2 HS |
1577 | cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | |
1578 | FW_VI_STATS_CMD_NSTATS_V(nstats)); | |
16f8bd4b CL |
1579 | ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); |
1580 | if (ret) | |
1581 | return ret; | |
1582 | ||
1583 | memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); | |
1584 | ||
1585 | rem -= nstats; | |
1586 | fwsp += nstats; | |
1587 | } | |
1588 | ||
1589 | /* | |
1590 | * Translate firmware statistics into host native statistics. | |
1591 | */ | |
1592 | s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); | |
1593 | s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); | |
1594 | s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); | |
1595 | s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); | |
1596 | s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); | |
1597 | s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); | |
1598 | s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); | |
1599 | s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); | |
1600 | s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); | |
1601 | ||
1602 | s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); | |
1603 | s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); | |
1604 | s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); | |
1605 | s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); | |
1606 | s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); | |
1607 | s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); | |
1608 | ||
1609 | s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); | |
1610 | ||
1611 | return 0; | |
1612 | } | |
1613 | ||
1614 | /** | |
1615 | * t4vf_iq_free - free an ingress queue and its free lists | |
1616 | * @adapter: the adapter | |
1617 | * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) | |
1618 | * @iqid: ingress queue ID | |
1619 | * @fl0id: FL0 queue ID or 0xffff if no attached FL0 | |
1620 | * @fl1id: FL1 queue ID or 0xffff if no attached FL1 | |
1621 | * | |
1622 | * Frees an ingress queue and its associated free lists, if any. | |
1623 | */ | |
1624 | int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, | |
1625 | unsigned int iqid, unsigned int fl0id, unsigned int fl1id) | |
1626 | { | |
1627 | struct fw_iq_cmd cmd; | |
1628 | ||
1629 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1630 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | |
1631 | FW_CMD_REQUEST_F | | |
1632 | FW_CMD_EXEC_F); | |
6e4b51a6 | 1633 | cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | |
16f8bd4b CL |
1634 | FW_LEN16(cmd)); |
1635 | cmd.type_to_iqandstindex = | |
6e4b51a6 | 1636 | cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); |
16f8bd4b CL |
1637 | |
1638 | cmd.iqid = cpu_to_be16(iqid); | |
1639 | cmd.fl0id = cpu_to_be16(fl0id); | |
1640 | cmd.fl1id = cpu_to_be16(fl1id); | |
1641 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); | |
1642 | } | |
1643 | ||
1644 | /** | |
1645 | * t4vf_eth_eq_free - free an Ethernet egress queue | |
1646 | * @adapter: the adapter | |
1647 | * @eqid: egress queue ID | |
1648 | * | |
1649 | * Frees an Ethernet egress queue. | |
1650 | */ | |
1651 | int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) | |
1652 | { | |
1653 | struct fw_eq_eth_cmd cmd; | |
1654 | ||
1655 | memset(&cmd, 0, sizeof(cmd)); | |
e2ac9628 HS |
1656 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | |
1657 | FW_CMD_REQUEST_F | | |
1658 | FW_CMD_EXEC_F); | |
6e4b51a6 | 1659 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | |
16f8bd4b | 1660 | FW_LEN16(cmd)); |
6e4b51a6 | 1661 | cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); |
16f8bd4b CL |
1662 | return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); |
1663 | } | |
1664 | ||
1665 | /** | |
1666 | * t4vf_handle_fw_rpl - process a firmware reply message | |
1667 | * @adapter: the adapter | |
1668 | * @rpl: start of the firmware message | |
1669 | * | |
1670 | * Processes a firmware message, such as link state change messages. | |
1671 | */ | |
1672 | int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) | |
1673 | { | |
caedda35 | 1674 | const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; |
e2ac9628 | 1675 | u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); |
16f8bd4b CL |
1676 | |
1677 | switch (opcode) { | |
1678 | case FW_PORT_CMD: { | |
1679 | /* | |
1680 | * Link/module state change message. | |
1681 | */ | |
caedda35 CL |
1682 | const struct fw_port_cmd *port_cmd = |
1683 | (const struct fw_port_cmd *)rpl; | |
5ad24def | 1684 | u32 stat, mod; |
16f8bd4b CL |
1685 | int action, port_id, link_ok, speed, fc, pidx; |
1686 | ||
1687 | /* | |
1688 | * Extract various fields from port status change message. | |
1689 | */ | |
2b5fb1f2 | 1690 | action = FW_PORT_CMD_ACTION_G( |
16f8bd4b CL |
1691 | be32_to_cpu(port_cmd->action_to_len16)); |
1692 | if (action != FW_PORT_ACTION_GET_PORT_INFO) { | |
1693 | dev_err(adapter->pdev_dev, | |
1694 | "Unknown firmware PORT reply action %x\n", | |
1695 | action); | |
1696 | break; | |
1697 | } | |
1698 | ||
2b5fb1f2 | 1699 | port_id = FW_PORT_CMD_PORTID_G( |
16f8bd4b CL |
1700 | be32_to_cpu(port_cmd->op_to_portid)); |
1701 | ||
5ad24def HS |
1702 | stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); |
1703 | link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; | |
16f8bd4b CL |
1704 | speed = 0; |
1705 | fc = 0; | |
5ad24def | 1706 | if (stat & FW_PORT_CMD_RXPAUSE_F) |
16f8bd4b | 1707 | fc |= PAUSE_RX; |
5ad24def | 1708 | if (stat & FW_PORT_CMD_TXPAUSE_F) |
16f8bd4b | 1709 | fc |= PAUSE_TX; |
5ad24def | 1710 | if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) |
897d55df | 1711 | speed = 100; |
5ad24def | 1712 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) |
897d55df | 1713 | speed = 1000; |
5ad24def | 1714 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) |
897d55df | 1715 | speed = 10000; |
9b86a8d1 HS |
1716 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) |
1717 | speed = 25000; | |
5ad24def | 1718 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) |
897d55df | 1719 | speed = 40000; |
9b86a8d1 HS |
1720 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) |
1721 | speed = 100000; | |
16f8bd4b CL |
1722 | |
1723 | /* | |
1724 | * Scan all of our "ports" (Virtual Interfaces) looking for | |
1725 | * those bound to the physical port which has changed. If | |
1726 | * our recorded state doesn't match the current state, | |
1727 | * signal that change to the OS code. | |
1728 | */ | |
1729 | for_each_port(adapter, pidx) { | |
1730 | struct port_info *pi = adap2pinfo(adapter, pidx); | |
1731 | struct link_config *lc; | |
1732 | ||
1733 | if (pi->port_id != port_id) | |
1734 | continue; | |
1735 | ||
1736 | lc = &pi->link_cfg; | |
5ad24def HS |
1737 | |
1738 | mod = FW_PORT_CMD_MODTYPE_G(stat); | |
1739 | if (mod != pi->mod_type) { | |
1740 | pi->mod_type = mod; | |
1741 | t4vf_os_portmod_changed(adapter, pidx); | |
1742 | } | |
1743 | ||
16f8bd4b CL |
1744 | if (link_ok != lc->link_ok || speed != lc->speed || |
1745 | fc != lc->fc) { | |
1746 | /* something changed */ | |
1747 | lc->link_ok = link_ok; | |
1748 | lc->speed = speed; | |
1749 | lc->fc = fc; | |
5ad24def HS |
1750 | lc->supported = |
1751 | be16_to_cpu(port_cmd->u.info.pcap); | |
eb97ad99 GG |
1752 | lc->lp_advertising = |
1753 | be16_to_cpu(port_cmd->u.info.lpacap); | |
16f8bd4b CL |
1754 | t4vf_os_link_changed(adapter, pidx, link_ok); |
1755 | } | |
1756 | } | |
1757 | break; | |
1758 | } | |
1759 | ||
1760 | default: | |
1761 | dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", | |
1762 | opcode); | |
1763 | } | |
1764 | return 0; | |
1765 | } | |
e0a8b34a HS |
1766 | |
1767 | /** | |
1768 | */ | |
1769 | int t4vf_prep_adapter(struct adapter *adapter) | |
1770 | { | |
1771 | int err; | |
1772 | unsigned int chipid; | |
1773 | ||
1774 | /* Wait for the device to become ready before proceeding ... | |
1775 | */ | |
1776 | err = t4vf_wait_dev_ready(adapter); | |
1777 | if (err) | |
1778 | return err; | |
1779 | ||
1780 | /* Default port and clock for debugging in case we can't reach | |
1781 | * firmware. | |
1782 | */ | |
1783 | adapter->params.nports = 1; | |
1784 | adapter->params.vfres.pmask = 1; | |
1785 | adapter->params.vpd.cclk = 50000; | |
1786 | ||
1787 | adapter->params.chip = 0; | |
1788 | switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { | |
1789 | case CHELSIO_T4: | |
1790 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); | |
41fc2e41 HS |
1791 | adapter->params.arch.sge_fl_db = DBPRIO_F; |
1792 | adapter->params.arch.mps_tcam_size = | |
1793 | NUM_MPS_CLS_SRAM_L_INSTANCES; | |
e0a8b34a HS |
1794 | break; |
1795 | ||
1796 | case CHELSIO_T5: | |
0d804338 | 1797 | chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); |
e0a8b34a | 1798 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); |
41fc2e41 HS |
1799 | adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; |
1800 | adapter->params.arch.mps_tcam_size = | |
1801 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | |
1802 | break; | |
1803 | ||
1804 | case CHELSIO_T6: | |
1805 | chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); | |
1806 | adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); | |
1807 | adapter->params.arch.sge_fl_db = 0; | |
1808 | adapter->params.arch.mps_tcam_size = | |
1809 | NUM_MPS_T5_CLS_SRAM_L_INSTANCES; | |
e0a8b34a HS |
1810 | break; |
1811 | } | |
1812 | ||
1813 | return 0; | |
1814 | } |