Merge remote-tracking branch 'cifs/for-next'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / cmd.c
1 /*
2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
44
45 #include "mlx5_core.h"
46
47 enum {
48 CMD_IF_REV = 5,
49 };
50
51 enum {
52 CMD_MODE_POLLING,
53 CMD_MODE_EVENTS
54 };
55
56 enum {
57 NUM_LONG_LISTS = 2,
58 NUM_MED_LISTS = 64,
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
62 };
63
64 enum {
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
76 };
77
78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
80 struct mlx5_cmd_msg *out,
81 void *uout, int uout_size,
82 mlx5_cmd_cbk_t cbk,
83 void *context, int page_queue)
84 {
85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
86 struct mlx5_cmd_work_ent *ent;
87
88 ent = kzalloc(sizeof(*ent), alloc_flags);
89 if (!ent)
90 return ERR_PTR(-ENOMEM);
91
92 ent->in = in;
93 ent->out = out;
94 ent->uout = uout;
95 ent->uout_size = uout_size;
96 ent->callback = cbk;
97 ent->context = context;
98 ent->cmd = cmd;
99 ent->page_queue = page_queue;
100
101 return ent;
102 }
103
104 static u8 alloc_token(struct mlx5_cmd *cmd)
105 {
106 u8 token;
107
108 spin_lock(&cmd->token_lock);
109 cmd->token++;
110 if (cmd->token == 0)
111 cmd->token++;
112 token = cmd->token;
113 spin_unlock(&cmd->token_lock);
114
115 return token;
116 }
117
118 static int alloc_ent(struct mlx5_cmd *cmd)
119 {
120 unsigned long flags;
121 int ret;
122
123 spin_lock_irqsave(&cmd->alloc_lock, flags);
124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
125 if (ret < cmd->max_reg_cmds)
126 clear_bit(ret, &cmd->bitmask);
127 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
128
129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
130 }
131
132 static void free_ent(struct mlx5_cmd *cmd, int idx)
133 {
134 unsigned long flags;
135
136 spin_lock_irqsave(&cmd->alloc_lock, flags);
137 set_bit(idx, &cmd->bitmask);
138 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
139 }
140
141 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
142 {
143 return cmd->cmd_buf + (idx << cmd->log_stride);
144 }
145
146 static u8 xor8_buf(void *buf, size_t offset, int len)
147 {
148 u8 *ptr = buf;
149 u8 sum = 0;
150 int i;
151 int end = len + offset;
152
153 for (i = offset; i < end; i++)
154 sum ^= ptr[i];
155
156 return sum;
157 }
158
159 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
160 {
161 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
162 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
163
164 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
165 return -EINVAL;
166
167 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
168 return -EINVAL;
169
170 return 0;
171 }
172
173 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
174 {
175 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
176 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
177
178 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
179 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
180 }
181
182 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
183 {
184 struct mlx5_cmd_mailbox *next = msg->next;
185 int size = msg->len;
186 int blen = size - min_t(int, sizeof(msg->first.data), size);
187 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
188 / MLX5_CMD_DATA_BLOCK_SIZE;
189 int i = 0;
190
191 for (i = 0; i < n && next; i++) {
192 calc_block_sig(next->buf);
193 next = next->next;
194 }
195 }
196
197 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
198 {
199 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
200 if (csum) {
201 calc_chain_sig(ent->in);
202 calc_chain_sig(ent->out);
203 }
204 }
205
206 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
207 {
208 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
209 u8 own;
210
211 do {
212 own = ent->lay->status_own;
213 if (!(own & CMD_OWNER_HW)) {
214 ent->ret = 0;
215 return;
216 }
217 usleep_range(5000, 10000);
218 } while (time_before(jiffies, poll_end));
219
220 ent->ret = -ETIMEDOUT;
221 }
222
223 static void free_cmd(struct mlx5_cmd_work_ent *ent)
224 {
225 kfree(ent);
226 }
227
228
229 static int verify_signature(struct mlx5_cmd_work_ent *ent)
230 {
231 struct mlx5_cmd_mailbox *next = ent->out->next;
232 int err;
233 u8 sig;
234 int size = ent->out->len;
235 int blen = size - min_t(int, sizeof(ent->out->first.data), size);
236 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
237 / MLX5_CMD_DATA_BLOCK_SIZE;
238 int i = 0;
239
240 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
241 if (sig != 0xff)
242 return -EINVAL;
243
244 for (i = 0; i < n && next; i++) {
245 err = verify_block_sig(next->buf);
246 if (err)
247 return err;
248
249 next = next->next;
250 }
251
252 return 0;
253 }
254
255 static void dump_buf(void *buf, int size, int data_only, int offset)
256 {
257 __be32 *p = buf;
258 int i;
259
260 for (i = 0; i < size; i += 16) {
261 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
262 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
263 be32_to_cpu(p[3]));
264 p += 4;
265 offset += 16;
266 }
267 if (!data_only)
268 pr_debug("\n");
269 }
270
271 enum {
272 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
273 MLX5_DRIVER_SYND = 0xbadd00de,
274 };
275
276 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
277 u32 *synd, u8 *status)
278 {
279 *synd = 0;
280 *status = 0;
281
282 switch (op) {
283 case MLX5_CMD_OP_TEARDOWN_HCA:
284 case MLX5_CMD_OP_DISABLE_HCA:
285 case MLX5_CMD_OP_MANAGE_PAGES:
286 case MLX5_CMD_OP_DESTROY_MKEY:
287 case MLX5_CMD_OP_DESTROY_EQ:
288 case MLX5_CMD_OP_DESTROY_CQ:
289 case MLX5_CMD_OP_DESTROY_QP:
290 case MLX5_CMD_OP_DESTROY_PSV:
291 case MLX5_CMD_OP_DESTROY_SRQ:
292 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
293 case MLX5_CMD_OP_DESTROY_DCT:
294 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
295 case MLX5_CMD_OP_DEALLOC_PD:
296 case MLX5_CMD_OP_DEALLOC_UAR:
297 case MLX5_CMD_OP_DETTACH_FROM_MCG:
298 case MLX5_CMD_OP_DEALLOC_XRCD:
299 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
300 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
301 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
302 case MLX5_CMD_OP_DESTROY_TIR:
303 case MLX5_CMD_OP_DESTROY_SQ:
304 case MLX5_CMD_OP_DESTROY_RQ:
305 case MLX5_CMD_OP_DESTROY_RMP:
306 case MLX5_CMD_OP_DESTROY_TIS:
307 case MLX5_CMD_OP_DESTROY_RQT:
308 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
309 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
310 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
311 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
312 case MLX5_CMD_OP_2ERR_QP:
313 case MLX5_CMD_OP_2RST_QP:
314 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
315 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
316 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
317 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
318 return MLX5_CMD_STAT_OK;
319
320 case MLX5_CMD_OP_QUERY_HCA_CAP:
321 case MLX5_CMD_OP_QUERY_ADAPTER:
322 case MLX5_CMD_OP_INIT_HCA:
323 case MLX5_CMD_OP_ENABLE_HCA:
324 case MLX5_CMD_OP_QUERY_PAGES:
325 case MLX5_CMD_OP_SET_HCA_CAP:
326 case MLX5_CMD_OP_QUERY_ISSI:
327 case MLX5_CMD_OP_SET_ISSI:
328 case MLX5_CMD_OP_CREATE_MKEY:
329 case MLX5_CMD_OP_QUERY_MKEY:
330 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
331 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
332 case MLX5_CMD_OP_CREATE_EQ:
333 case MLX5_CMD_OP_QUERY_EQ:
334 case MLX5_CMD_OP_GEN_EQE:
335 case MLX5_CMD_OP_CREATE_CQ:
336 case MLX5_CMD_OP_QUERY_CQ:
337 case MLX5_CMD_OP_MODIFY_CQ:
338 case MLX5_CMD_OP_CREATE_QP:
339 case MLX5_CMD_OP_RST2INIT_QP:
340 case MLX5_CMD_OP_INIT2RTR_QP:
341 case MLX5_CMD_OP_RTR2RTS_QP:
342 case MLX5_CMD_OP_RTS2RTS_QP:
343 case MLX5_CMD_OP_SQERR2RTS_QP:
344 case MLX5_CMD_OP_QUERY_QP:
345 case MLX5_CMD_OP_SQD_RTS_QP:
346 case MLX5_CMD_OP_INIT2INIT_QP:
347 case MLX5_CMD_OP_CREATE_PSV:
348 case MLX5_CMD_OP_CREATE_SRQ:
349 case MLX5_CMD_OP_QUERY_SRQ:
350 case MLX5_CMD_OP_ARM_RQ:
351 case MLX5_CMD_OP_CREATE_XRC_SRQ:
352 case MLX5_CMD_OP_QUERY_XRC_SRQ:
353 case MLX5_CMD_OP_ARM_XRC_SRQ:
354 case MLX5_CMD_OP_CREATE_DCT:
355 case MLX5_CMD_OP_DRAIN_DCT:
356 case MLX5_CMD_OP_QUERY_DCT:
357 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
358 case MLX5_CMD_OP_QUERY_VPORT_STATE:
359 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
360 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
361 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
362 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
363 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
364 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
365 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
366 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
367 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
368 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
369 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
370 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
371 case MLX5_CMD_OP_QUERY_Q_COUNTER:
372 case MLX5_CMD_OP_ALLOC_PD:
373 case MLX5_CMD_OP_ALLOC_UAR:
374 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
375 case MLX5_CMD_OP_ACCESS_REG:
376 case MLX5_CMD_OP_ATTACH_TO_MCG:
377 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
378 case MLX5_CMD_OP_MAD_IFC:
379 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
380 case MLX5_CMD_OP_SET_MAD_DEMUX:
381 case MLX5_CMD_OP_NOP:
382 case MLX5_CMD_OP_ALLOC_XRCD:
383 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
384 case MLX5_CMD_OP_QUERY_CONG_STATUS:
385 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
386 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
387 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
388 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
389 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
390 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
391 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
392 case MLX5_CMD_OP_CREATE_TIR:
393 case MLX5_CMD_OP_MODIFY_TIR:
394 case MLX5_CMD_OP_QUERY_TIR:
395 case MLX5_CMD_OP_CREATE_SQ:
396 case MLX5_CMD_OP_MODIFY_SQ:
397 case MLX5_CMD_OP_QUERY_SQ:
398 case MLX5_CMD_OP_CREATE_RQ:
399 case MLX5_CMD_OP_MODIFY_RQ:
400 case MLX5_CMD_OP_QUERY_RQ:
401 case MLX5_CMD_OP_CREATE_RMP:
402 case MLX5_CMD_OP_MODIFY_RMP:
403 case MLX5_CMD_OP_QUERY_RMP:
404 case MLX5_CMD_OP_CREATE_TIS:
405 case MLX5_CMD_OP_MODIFY_TIS:
406 case MLX5_CMD_OP_QUERY_TIS:
407 case MLX5_CMD_OP_CREATE_RQT:
408 case MLX5_CMD_OP_MODIFY_RQT:
409 case MLX5_CMD_OP_QUERY_RQT:
410
411 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
412 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
413 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
414 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
415
416 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
417 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
418 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
419 *status = MLX5_DRIVER_STATUS_ABORTED;
420 *synd = MLX5_DRIVER_SYND;
421 return -EIO;
422 default:
423 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
424 return -EINVAL;
425 }
426 }
427
428 const char *mlx5_command_str(int command)
429 {
430 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
431
432 switch (command) {
433 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
434 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
435 MLX5_COMMAND_STR_CASE(INIT_HCA);
436 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
437 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
438 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
439 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
440 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
441 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
442 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
443 MLX5_COMMAND_STR_CASE(SET_ISSI);
444 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
445 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
446 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
447 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
448 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
449 MLX5_COMMAND_STR_CASE(CREATE_EQ);
450 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
451 MLX5_COMMAND_STR_CASE(QUERY_EQ);
452 MLX5_COMMAND_STR_CASE(GEN_EQE);
453 MLX5_COMMAND_STR_CASE(CREATE_CQ);
454 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
455 MLX5_COMMAND_STR_CASE(QUERY_CQ);
456 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
457 MLX5_COMMAND_STR_CASE(CREATE_QP);
458 MLX5_COMMAND_STR_CASE(DESTROY_QP);
459 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
460 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
461 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
462 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
463 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
464 MLX5_COMMAND_STR_CASE(2ERR_QP);
465 MLX5_COMMAND_STR_CASE(2RST_QP);
466 MLX5_COMMAND_STR_CASE(QUERY_QP);
467 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
468 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
469 MLX5_COMMAND_STR_CASE(CREATE_PSV);
470 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
471 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
472 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
473 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
474 MLX5_COMMAND_STR_CASE(ARM_RQ);
475 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
476 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
477 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
478 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
479 MLX5_COMMAND_STR_CASE(CREATE_DCT);
480 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
481 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
482 MLX5_COMMAND_STR_CASE(QUERY_DCT);
483 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
484 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
485 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
486 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
487 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
488 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
489 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
490 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
491 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
492 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
493 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
494 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
495 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
496 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
497 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
498 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
499 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
500 MLX5_COMMAND_STR_CASE(ALLOC_PD);
501 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
502 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
503 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
504 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
505 MLX5_COMMAND_STR_CASE(ACCESS_REG);
506 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
507 MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
508 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
509 MLX5_COMMAND_STR_CASE(MAD_IFC);
510 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
511 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
512 MLX5_COMMAND_STR_CASE(NOP);
513 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
514 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
515 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
516 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
517 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
518 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
519 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
520 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
521 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
522 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
523 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
524 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
525 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
526 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
527 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
528 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
529 MLX5_COMMAND_STR_CASE(CREATE_TIR);
530 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
531 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
532 MLX5_COMMAND_STR_CASE(QUERY_TIR);
533 MLX5_COMMAND_STR_CASE(CREATE_SQ);
534 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
535 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
536 MLX5_COMMAND_STR_CASE(QUERY_SQ);
537 MLX5_COMMAND_STR_CASE(CREATE_RQ);
538 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
539 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
540 MLX5_COMMAND_STR_CASE(QUERY_RQ);
541 MLX5_COMMAND_STR_CASE(CREATE_RMP);
542 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
543 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
544 MLX5_COMMAND_STR_CASE(QUERY_RMP);
545 MLX5_COMMAND_STR_CASE(CREATE_TIS);
546 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
547 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
548 MLX5_COMMAND_STR_CASE(QUERY_TIS);
549 MLX5_COMMAND_STR_CASE(CREATE_RQT);
550 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
551 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
552 MLX5_COMMAND_STR_CASE(QUERY_RQT);
553 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
554 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
555 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
556 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
557 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
558 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
559 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
560 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
561 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
562 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
563 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
564 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
565 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
566 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
567 default: return "unknown command opcode";
568 }
569 }
570
571 static void dump_command(struct mlx5_core_dev *dev,
572 struct mlx5_cmd_work_ent *ent, int input)
573 {
574 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
575 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
576 struct mlx5_cmd_mailbox *next = msg->next;
577 int data_only;
578 u32 offset = 0;
579 int dump_len;
580
581 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
582
583 if (data_only)
584 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
585 "dump command data %s(0x%x) %s\n",
586 mlx5_command_str(op), op,
587 input ? "INPUT" : "OUTPUT");
588 else
589 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
590 mlx5_command_str(op), op,
591 input ? "INPUT" : "OUTPUT");
592
593 if (data_only) {
594 if (input) {
595 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
596 offset += sizeof(ent->lay->in);
597 } else {
598 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
599 offset += sizeof(ent->lay->out);
600 }
601 } else {
602 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
603 offset += sizeof(*ent->lay);
604 }
605
606 while (next && offset < msg->len) {
607 if (data_only) {
608 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
609 dump_buf(next->buf, dump_len, 1, offset);
610 offset += MLX5_CMD_DATA_BLOCK_SIZE;
611 } else {
612 mlx5_core_dbg(dev, "command block:\n");
613 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
614 offset += sizeof(struct mlx5_cmd_prot_block);
615 }
616 next = next->next;
617 }
618
619 if (data_only)
620 pr_debug("\n");
621 }
622
623 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
624 {
625 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
626
627 return be16_to_cpu(hdr->opcode);
628 }
629
630 static void cb_timeout_handler(struct work_struct *work)
631 {
632 struct delayed_work *dwork = container_of(work, struct delayed_work,
633 work);
634 struct mlx5_cmd_work_ent *ent = container_of(dwork,
635 struct mlx5_cmd_work_ent,
636 cb_timeout_work);
637 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
638 cmd);
639
640 ent->ret = -ETIMEDOUT;
641 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
642 mlx5_command_str(msg_to_opcode(ent->in)),
643 msg_to_opcode(ent->in));
644 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
645 }
646
647 static void cmd_work_handler(struct work_struct *work)
648 {
649 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
650 struct mlx5_cmd *cmd = ent->cmd;
651 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
652 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
653 struct mlx5_cmd_layout *lay;
654 struct semaphore *sem;
655 unsigned long flags;
656
657 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
658 down(sem);
659 if (!ent->page_queue) {
660 ent->idx = alloc_ent(cmd);
661 if (ent->idx < 0) {
662 mlx5_core_err(dev, "failed to allocate command entry\n");
663 up(sem);
664 return;
665 }
666 } else {
667 ent->idx = cmd->max_reg_cmds;
668 spin_lock_irqsave(&cmd->alloc_lock, flags);
669 clear_bit(ent->idx, &cmd->bitmask);
670 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
671 }
672
673 cmd->ent_arr[ent->idx] = ent;
674 lay = get_inst(cmd, ent->idx);
675 ent->lay = lay;
676 memset(lay, 0, sizeof(*lay));
677 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
678 ent->op = be32_to_cpu(lay->in[0]) >> 16;
679 if (ent->in->next)
680 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
681 lay->inlen = cpu_to_be32(ent->in->len);
682 if (ent->out->next)
683 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
684 lay->outlen = cpu_to_be32(ent->out->len);
685 lay->type = MLX5_PCI_CMD_XPORT;
686 lay->token = ent->token;
687 lay->status_own = CMD_OWNER_HW;
688 set_signature(ent, !cmd->checksum_disabled);
689 dump_command(dev, ent, 1);
690 ent->ts1 = ktime_get_ns();
691
692 if (ent->callback)
693 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
694
695 /* ring doorbell after the descriptor is valid */
696 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
697 wmb();
698 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
699 mmiowb();
700 /* if not in polling don't use ent after this point */
701 if (cmd->mode == CMD_MODE_POLLING) {
702 poll_timeout(ent);
703 /* make sure we read the descriptor after ownership is SW */
704 rmb();
705 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
706 }
707 }
708
709 static const char *deliv_status_to_str(u8 status)
710 {
711 switch (status) {
712 case MLX5_CMD_DELIVERY_STAT_OK:
713 return "no errors";
714 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
715 return "signature error";
716 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
717 return "token error";
718 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
719 return "bad block number";
720 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
721 return "output pointer not aligned to block size";
722 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
723 return "input pointer not aligned to block size";
724 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
725 return "firmware internal error";
726 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
727 return "command input length error";
728 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
729 return "command ouput length error";
730 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
731 return "reserved fields not cleared";
732 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
733 return "bad command descriptor type";
734 default:
735 return "unknown status code";
736 }
737 }
738
739 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
740 {
741 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
742 struct mlx5_cmd *cmd = &dev->cmd;
743 int err;
744
745 if (cmd->mode == CMD_MODE_POLLING) {
746 wait_for_completion(&ent->done);
747 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
748 ent->ret = -ETIMEDOUT;
749 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
750 }
751
752 err = ent->ret;
753
754 if (err == -ETIMEDOUT) {
755 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
756 mlx5_command_str(msg_to_opcode(ent->in)),
757 msg_to_opcode(ent->in));
758 }
759 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
760 err, deliv_status_to_str(ent->status), ent->status);
761
762 return err;
763 }
764
765 static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
766 {
767 return &out->syndrome;
768 }
769
770 static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
771 {
772 return &out->status;
773 }
774
775 /* Notes:
776 * 1. Callback functions may not sleep
777 * 2. page queue commands do not support asynchrous completion
778 */
779 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
780 struct mlx5_cmd_msg *out, void *uout, int uout_size,
781 mlx5_cmd_cbk_t callback,
782 void *context, int page_queue, u8 *status,
783 u8 token)
784 {
785 struct mlx5_cmd *cmd = &dev->cmd;
786 struct mlx5_cmd_work_ent *ent;
787 struct mlx5_cmd_stats *stats;
788 int err = 0;
789 s64 ds;
790 u16 op;
791
792 if (callback && page_queue)
793 return -EINVAL;
794
795 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
796 page_queue);
797 if (IS_ERR(ent))
798 return PTR_ERR(ent);
799
800 ent->token = token;
801
802 if (!callback)
803 init_completion(&ent->done);
804
805 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
806 INIT_WORK(&ent->work, cmd_work_handler);
807 if (page_queue) {
808 cmd_work_handler(&ent->work);
809 } else if (!queue_work(cmd->wq, &ent->work)) {
810 mlx5_core_warn(dev, "failed to queue work\n");
811 err = -ENOMEM;
812 goto out_free;
813 }
814
815 if (callback)
816 goto out;
817
818 err = wait_func(dev, ent);
819 if (err == -ETIMEDOUT)
820 goto out_free;
821
822 ds = ent->ts2 - ent->ts1;
823 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
824 if (op < ARRAY_SIZE(cmd->stats)) {
825 stats = &cmd->stats[op];
826 spin_lock_irq(&stats->lock);
827 stats->sum += ds;
828 ++stats->n;
829 spin_unlock_irq(&stats->lock);
830 }
831 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
832 "fw exec time for %s is %lld nsec\n",
833 mlx5_command_str(op), ds);
834 *status = ent->status;
835
836 out_free:
837 free_cmd(ent);
838 out:
839 return err;
840 }
841
842 static ssize_t dbg_write(struct file *filp, const char __user *buf,
843 size_t count, loff_t *pos)
844 {
845 struct mlx5_core_dev *dev = filp->private_data;
846 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
847 char lbuf[3];
848 int err;
849
850 if (!dbg->in_msg || !dbg->out_msg)
851 return -ENOMEM;
852
853 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
854 return -EFAULT;
855
856 lbuf[sizeof(lbuf) - 1] = 0;
857
858 if (strcmp(lbuf, "go"))
859 return -EINVAL;
860
861 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
862
863 return err ? err : count;
864 }
865
866
867 static const struct file_operations fops = {
868 .owner = THIS_MODULE,
869 .open = simple_open,
870 .write = dbg_write,
871 };
872
873 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
874 u8 token)
875 {
876 struct mlx5_cmd_prot_block *block;
877 struct mlx5_cmd_mailbox *next;
878 int copy;
879
880 if (!to || !from)
881 return -ENOMEM;
882
883 copy = min_t(int, size, sizeof(to->first.data));
884 memcpy(to->first.data, from, copy);
885 size -= copy;
886 from += copy;
887
888 next = to->next;
889 while (size) {
890 if (!next) {
891 /* this is a BUG */
892 return -ENOMEM;
893 }
894
895 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
896 block = next->buf;
897 memcpy(block->data, from, copy);
898 from += copy;
899 size -= copy;
900 block->token = token;
901 next = next->next;
902 }
903
904 return 0;
905 }
906
907 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
908 {
909 struct mlx5_cmd_prot_block *block;
910 struct mlx5_cmd_mailbox *next;
911 int copy;
912
913 if (!to || !from)
914 return -ENOMEM;
915
916 copy = min_t(int, size, sizeof(from->first.data));
917 memcpy(to, from->first.data, copy);
918 size -= copy;
919 to += copy;
920
921 next = from->next;
922 while (size) {
923 if (!next) {
924 /* this is a BUG */
925 return -ENOMEM;
926 }
927
928 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
929 block = next->buf;
930
931 memcpy(to, block->data, copy);
932 to += copy;
933 size -= copy;
934 next = next->next;
935 }
936
937 return 0;
938 }
939
940 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
941 gfp_t flags)
942 {
943 struct mlx5_cmd_mailbox *mailbox;
944
945 mailbox = kmalloc(sizeof(*mailbox), flags);
946 if (!mailbox)
947 return ERR_PTR(-ENOMEM);
948
949 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
950 &mailbox->dma);
951 if (!mailbox->buf) {
952 mlx5_core_dbg(dev, "failed allocation\n");
953 kfree(mailbox);
954 return ERR_PTR(-ENOMEM);
955 }
956 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
957 mailbox->next = NULL;
958
959 return mailbox;
960 }
961
962 static void free_cmd_box(struct mlx5_core_dev *dev,
963 struct mlx5_cmd_mailbox *mailbox)
964 {
965 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
966 kfree(mailbox);
967 }
968
969 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
970 gfp_t flags, int size,
971 u8 token)
972 {
973 struct mlx5_cmd_mailbox *tmp, *head = NULL;
974 struct mlx5_cmd_prot_block *block;
975 struct mlx5_cmd_msg *msg;
976 int blen;
977 int err;
978 int n;
979 int i;
980
981 msg = kzalloc(sizeof(*msg), flags);
982 if (!msg)
983 return ERR_PTR(-ENOMEM);
984
985 blen = size - min_t(int, sizeof(msg->first.data), size);
986 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
987
988 for (i = 0; i < n; i++) {
989 tmp = alloc_cmd_box(dev, flags);
990 if (IS_ERR(tmp)) {
991 mlx5_core_warn(dev, "failed allocating block\n");
992 err = PTR_ERR(tmp);
993 goto err_alloc;
994 }
995
996 block = tmp->buf;
997 tmp->next = head;
998 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
999 block->block_num = cpu_to_be32(n - i - 1);
1000 block->token = token;
1001 head = tmp;
1002 }
1003 msg->next = head;
1004 msg->len = size;
1005 return msg;
1006
1007 err_alloc:
1008 while (head) {
1009 tmp = head->next;
1010 free_cmd_box(dev, head);
1011 head = tmp;
1012 }
1013 kfree(msg);
1014
1015 return ERR_PTR(err);
1016 }
1017
1018 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1019 struct mlx5_cmd_msg *msg)
1020 {
1021 struct mlx5_cmd_mailbox *head = msg->next;
1022 struct mlx5_cmd_mailbox *next;
1023
1024 while (head) {
1025 next = head->next;
1026 free_cmd_box(dev, head);
1027 head = next;
1028 }
1029 kfree(msg);
1030 }
1031
1032 static ssize_t data_write(struct file *filp, const char __user *buf,
1033 size_t count, loff_t *pos)
1034 {
1035 struct mlx5_core_dev *dev = filp->private_data;
1036 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1037 void *ptr;
1038 int err;
1039
1040 if (*pos != 0)
1041 return -EINVAL;
1042
1043 kfree(dbg->in_msg);
1044 dbg->in_msg = NULL;
1045 dbg->inlen = 0;
1046
1047 ptr = kzalloc(count, GFP_KERNEL);
1048 if (!ptr)
1049 return -ENOMEM;
1050
1051 if (copy_from_user(ptr, buf, count)) {
1052 err = -EFAULT;
1053 goto out;
1054 }
1055 dbg->in_msg = ptr;
1056 dbg->inlen = count;
1057
1058 *pos = count;
1059
1060 return count;
1061
1062 out:
1063 kfree(ptr);
1064 return err;
1065 }
1066
1067 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1068 loff_t *pos)
1069 {
1070 struct mlx5_core_dev *dev = filp->private_data;
1071 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1072 int copy;
1073
1074 if (*pos)
1075 return 0;
1076
1077 if (!dbg->out_msg)
1078 return -ENOMEM;
1079
1080 copy = min_t(int, count, dbg->outlen);
1081 if (copy_to_user(buf, dbg->out_msg, copy))
1082 return -EFAULT;
1083
1084 *pos += copy;
1085
1086 return copy;
1087 }
1088
1089 static const struct file_operations dfops = {
1090 .owner = THIS_MODULE,
1091 .open = simple_open,
1092 .write = data_write,
1093 .read = data_read,
1094 };
1095
1096 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1097 loff_t *pos)
1098 {
1099 struct mlx5_core_dev *dev = filp->private_data;
1100 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1101 char outlen[8];
1102 int err;
1103
1104 if (*pos)
1105 return 0;
1106
1107 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1108 if (err < 0)
1109 return err;
1110
1111 if (copy_to_user(buf, &outlen, err))
1112 return -EFAULT;
1113
1114 *pos += err;
1115
1116 return err;
1117 }
1118
1119 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1120 size_t count, loff_t *pos)
1121 {
1122 struct mlx5_core_dev *dev = filp->private_data;
1123 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1124 char outlen_str[8];
1125 int outlen;
1126 void *ptr;
1127 int err;
1128
1129 if (*pos != 0 || count > 6)
1130 return -EINVAL;
1131
1132 kfree(dbg->out_msg);
1133 dbg->out_msg = NULL;
1134 dbg->outlen = 0;
1135
1136 if (copy_from_user(outlen_str, buf, count))
1137 return -EFAULT;
1138
1139 outlen_str[7] = 0;
1140
1141 err = sscanf(outlen_str, "%d", &outlen);
1142 if (err < 0)
1143 return err;
1144
1145 ptr = kzalloc(outlen, GFP_KERNEL);
1146 if (!ptr)
1147 return -ENOMEM;
1148
1149 dbg->out_msg = ptr;
1150 dbg->outlen = outlen;
1151
1152 *pos = count;
1153
1154 return count;
1155 }
1156
1157 static const struct file_operations olfops = {
1158 .owner = THIS_MODULE,
1159 .open = simple_open,
1160 .write = outlen_write,
1161 .read = outlen_read,
1162 };
1163
1164 static void set_wqname(struct mlx5_core_dev *dev)
1165 {
1166 struct mlx5_cmd *cmd = &dev->cmd;
1167
1168 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1169 dev_name(&dev->pdev->dev));
1170 }
1171
1172 static void clean_debug_files(struct mlx5_core_dev *dev)
1173 {
1174 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1175
1176 if (!mlx5_debugfs_root)
1177 return;
1178
1179 mlx5_cmdif_debugfs_cleanup(dev);
1180 debugfs_remove_recursive(dbg->dbg_root);
1181 }
1182
1183 static int create_debugfs_files(struct mlx5_core_dev *dev)
1184 {
1185 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1186 int err = -ENOMEM;
1187
1188 if (!mlx5_debugfs_root)
1189 return 0;
1190
1191 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1192 if (!dbg->dbg_root)
1193 return err;
1194
1195 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1196 dev, &dfops);
1197 if (!dbg->dbg_in)
1198 goto err_dbg;
1199
1200 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1201 dev, &dfops);
1202 if (!dbg->dbg_out)
1203 goto err_dbg;
1204
1205 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1206 dev, &olfops);
1207 if (!dbg->dbg_outlen)
1208 goto err_dbg;
1209
1210 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1211 &dbg->status);
1212 if (!dbg->dbg_status)
1213 goto err_dbg;
1214
1215 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1216 if (!dbg->dbg_run)
1217 goto err_dbg;
1218
1219 mlx5_cmdif_debugfs_init(dev);
1220
1221 return 0;
1222
1223 err_dbg:
1224 clean_debug_files(dev);
1225 return err;
1226 }
1227
1228 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1229 {
1230 struct mlx5_cmd *cmd = &dev->cmd;
1231 int i;
1232
1233 for (i = 0; i < cmd->max_reg_cmds; i++)
1234 down(&cmd->sem);
1235 down(&cmd->pages_sem);
1236
1237 cmd->mode = mode;
1238
1239 up(&cmd->pages_sem);
1240 for (i = 0; i < cmd->max_reg_cmds; i++)
1241 up(&cmd->sem);
1242 }
1243
1244 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1245 {
1246 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1247 }
1248
1249 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1250 {
1251 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1252 }
1253
1254 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1255 {
1256 unsigned long flags;
1257
1258 if (msg->cache) {
1259 spin_lock_irqsave(&msg->cache->lock, flags);
1260 list_add_tail(&msg->list, &msg->cache->head);
1261 spin_unlock_irqrestore(&msg->cache->lock, flags);
1262 } else {
1263 mlx5_free_cmd_msg(dev, msg);
1264 }
1265 }
1266
1267 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1268 {
1269 struct mlx5_cmd *cmd = &dev->cmd;
1270 struct mlx5_cmd_work_ent *ent;
1271 mlx5_cmd_cbk_t callback;
1272 void *context;
1273 int err;
1274 int i;
1275 s64 ds;
1276 struct mlx5_cmd_stats *stats;
1277 unsigned long flags;
1278 unsigned long vector;
1279
1280 /* there can be at most 32 command queues */
1281 vector = vec & 0xffffffff;
1282 for (i = 0; i < (1 << cmd->log_sz); i++) {
1283 if (test_bit(i, &vector)) {
1284 struct semaphore *sem;
1285
1286 ent = cmd->ent_arr[i];
1287 if (ent->callback)
1288 cancel_delayed_work(&ent->cb_timeout_work);
1289 if (ent->page_queue)
1290 sem = &cmd->pages_sem;
1291 else
1292 sem = &cmd->sem;
1293 ent->ts2 = ktime_get_ns();
1294 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1295 dump_command(dev, ent, 0);
1296 if (!ent->ret) {
1297 if (!cmd->checksum_disabled)
1298 ent->ret = verify_signature(ent);
1299 else
1300 ent->ret = 0;
1301 if (vec & MLX5_TRIGGERED_CMD_COMP)
1302 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1303 else
1304 ent->status = ent->lay->status_own >> 1;
1305
1306 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1307 ent->ret, deliv_status_to_str(ent->status), ent->status);
1308 }
1309 free_ent(cmd, ent->idx);
1310
1311 if (ent->callback) {
1312 ds = ent->ts2 - ent->ts1;
1313 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1314 stats = &cmd->stats[ent->op];
1315 spin_lock_irqsave(&stats->lock, flags);
1316 stats->sum += ds;
1317 ++stats->n;
1318 spin_unlock_irqrestore(&stats->lock, flags);
1319 }
1320
1321 callback = ent->callback;
1322 context = ent->context;
1323 err = ent->ret;
1324 if (!err)
1325 err = mlx5_copy_from_msg(ent->uout,
1326 ent->out,
1327 ent->uout_size);
1328
1329 mlx5_free_cmd_msg(dev, ent->out);
1330 free_msg(dev, ent->in);
1331
1332 err = err ? err : ent->status;
1333 free_cmd(ent);
1334 callback(err, context);
1335 } else {
1336 complete(&ent->done);
1337 }
1338 up(sem);
1339 }
1340 }
1341 }
1342 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1343
1344 static int status_to_err(u8 status)
1345 {
1346 return status ? -1 : 0; /* TBD more meaningful codes */
1347 }
1348
1349 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1350 gfp_t gfp)
1351 {
1352 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1353 struct mlx5_cmd *cmd = &dev->cmd;
1354 struct cache_ent *ent = NULL;
1355
1356 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1357 ent = &cmd->cache.large;
1358 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1359 ent = &cmd->cache.med;
1360
1361 if (ent) {
1362 spin_lock_irq(&ent->lock);
1363 if (!list_empty(&ent->head)) {
1364 msg = list_entry(ent->head.next, typeof(*msg), list);
1365 /* For cached lists, we must explicitly state what is
1366 * the real size
1367 */
1368 msg->len = in_size;
1369 list_del(&msg->list);
1370 }
1371 spin_unlock_irq(&ent->lock);
1372 }
1373
1374 if (IS_ERR(msg))
1375 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1376
1377 return msg;
1378 }
1379
1380 static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
1381 {
1382 return be16_to_cpu(in->opcode);
1383 }
1384
1385 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1386 {
1387 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1388 }
1389
1390 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1391 int out_size, mlx5_cmd_cbk_t callback, void *context)
1392 {
1393 struct mlx5_cmd_msg *inb;
1394 struct mlx5_cmd_msg *outb;
1395 int pages_queue;
1396 gfp_t gfp;
1397 int err;
1398 u8 status = 0;
1399 u32 drv_synd;
1400 u8 token;
1401
1402 if (pci_channel_offline(dev->pdev) ||
1403 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1404 err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
1405 *get_synd_ptr(out) = cpu_to_be32(drv_synd);
1406 *get_status_ptr(out) = status;
1407 return err;
1408 }
1409
1410 pages_queue = is_manage_pages(in);
1411 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1412
1413 inb = alloc_msg(dev, in_size, gfp);
1414 if (IS_ERR(inb)) {
1415 err = PTR_ERR(inb);
1416 return err;
1417 }
1418
1419 token = alloc_token(&dev->cmd);
1420
1421 err = mlx5_copy_to_msg(inb, in, in_size, token);
1422 if (err) {
1423 mlx5_core_warn(dev, "err %d\n", err);
1424 goto out_in;
1425 }
1426
1427 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1428 if (IS_ERR(outb)) {
1429 err = PTR_ERR(outb);
1430 goto out_in;
1431 }
1432
1433 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1434 pages_queue, &status, token);
1435 if (err)
1436 goto out_out;
1437
1438 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1439 if (status) {
1440 err = status_to_err(status);
1441 goto out_out;
1442 }
1443
1444 if (!callback)
1445 err = mlx5_copy_from_msg(out, outb, out_size);
1446
1447 out_out:
1448 if (!callback)
1449 mlx5_free_cmd_msg(dev, outb);
1450
1451 out_in:
1452 if (!callback)
1453 free_msg(dev, inb);
1454 return err;
1455 }
1456
1457 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1458 int out_size)
1459 {
1460 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1461 }
1462 EXPORT_SYMBOL(mlx5_cmd_exec);
1463
1464 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1465 void *out, int out_size, mlx5_cmd_cbk_t callback,
1466 void *context)
1467 {
1468 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1469 }
1470 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1471
1472 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1473 {
1474 struct mlx5_cmd *cmd = &dev->cmd;
1475 struct mlx5_cmd_msg *msg;
1476 struct mlx5_cmd_msg *n;
1477
1478 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1479 list_del(&msg->list);
1480 mlx5_free_cmd_msg(dev, msg);
1481 }
1482
1483 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1484 list_del(&msg->list);
1485 mlx5_free_cmd_msg(dev, msg);
1486 }
1487 }
1488
1489 static int create_msg_cache(struct mlx5_core_dev *dev)
1490 {
1491 struct mlx5_cmd *cmd = &dev->cmd;
1492 struct mlx5_cmd_msg *msg;
1493 int err;
1494 int i;
1495
1496 spin_lock_init(&cmd->cache.large.lock);
1497 INIT_LIST_HEAD(&cmd->cache.large.head);
1498 spin_lock_init(&cmd->cache.med.lock);
1499 INIT_LIST_HEAD(&cmd->cache.med.head);
1500
1501 for (i = 0; i < NUM_LONG_LISTS; i++) {
1502 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1503 if (IS_ERR(msg)) {
1504 err = PTR_ERR(msg);
1505 goto ex_err;
1506 }
1507 msg->cache = &cmd->cache.large;
1508 list_add_tail(&msg->list, &cmd->cache.large.head);
1509 }
1510
1511 for (i = 0; i < NUM_MED_LISTS; i++) {
1512 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1513 if (IS_ERR(msg)) {
1514 err = PTR_ERR(msg);
1515 goto ex_err;
1516 }
1517 msg->cache = &cmd->cache.med;
1518 list_add_tail(&msg->list, &cmd->cache.med.head);
1519 }
1520
1521 return 0;
1522
1523 ex_err:
1524 destroy_msg_cache(dev);
1525 return err;
1526 }
1527
1528 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1529 {
1530 struct device *ddev = &dev->pdev->dev;
1531
1532 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1533 &cmd->alloc_dma, GFP_KERNEL);
1534 if (!cmd->cmd_alloc_buf)
1535 return -ENOMEM;
1536
1537 /* make sure it is aligned to 4K */
1538 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1539 cmd->cmd_buf = cmd->cmd_alloc_buf;
1540 cmd->dma = cmd->alloc_dma;
1541 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1542 return 0;
1543 }
1544
1545 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1546 cmd->alloc_dma);
1547 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1548 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1549 &cmd->alloc_dma, GFP_KERNEL);
1550 if (!cmd->cmd_alloc_buf)
1551 return -ENOMEM;
1552
1553 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1554 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1555 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1556 return 0;
1557 }
1558
1559 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1560 {
1561 struct device *ddev = &dev->pdev->dev;
1562
1563 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1564 cmd->alloc_dma);
1565 }
1566
1567 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1568 {
1569 int size = sizeof(struct mlx5_cmd_prot_block);
1570 int align = roundup_pow_of_two(size);
1571 struct mlx5_cmd *cmd = &dev->cmd;
1572 u32 cmd_h, cmd_l;
1573 u16 cmd_if_rev;
1574 int err;
1575 int i;
1576
1577 memset(cmd, 0, sizeof(*cmd));
1578 cmd_if_rev = cmdif_rev(dev);
1579 if (cmd_if_rev != CMD_IF_REV) {
1580 dev_err(&dev->pdev->dev,
1581 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1582 CMD_IF_REV, cmd_if_rev);
1583 return -EINVAL;
1584 }
1585
1586 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1587 if (!cmd->pool)
1588 return -ENOMEM;
1589
1590 err = alloc_cmd_page(dev, cmd);
1591 if (err)
1592 goto err_free_pool;
1593
1594 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1595 cmd->log_sz = cmd_l >> 4 & 0xf;
1596 cmd->log_stride = cmd_l & 0xf;
1597 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1598 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1599 1 << cmd->log_sz);
1600 err = -EINVAL;
1601 goto err_free_page;
1602 }
1603
1604 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1605 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1606 err = -EINVAL;
1607 goto err_free_page;
1608 }
1609
1610 cmd->checksum_disabled = 1;
1611 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1612 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1613
1614 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1615 if (cmd->cmdif_rev > CMD_IF_REV) {
1616 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1617 CMD_IF_REV, cmd->cmdif_rev);
1618 err = -ENOTSUPP;
1619 goto err_free_page;
1620 }
1621
1622 spin_lock_init(&cmd->alloc_lock);
1623 spin_lock_init(&cmd->token_lock);
1624 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1625 spin_lock_init(&cmd->stats[i].lock);
1626
1627 sema_init(&cmd->sem, cmd->max_reg_cmds);
1628 sema_init(&cmd->pages_sem, 1);
1629
1630 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1631 cmd_l = (u32)(cmd->dma);
1632 if (cmd_l & 0xfff) {
1633 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1634 err = -ENOMEM;
1635 goto err_free_page;
1636 }
1637
1638 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1639 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1640
1641 /* Make sure firmware sees the complete address before we proceed */
1642 wmb();
1643
1644 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1645
1646 cmd->mode = CMD_MODE_POLLING;
1647
1648 err = create_msg_cache(dev);
1649 if (err) {
1650 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1651 goto err_free_page;
1652 }
1653
1654 set_wqname(dev);
1655 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1656 if (!cmd->wq) {
1657 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1658 err = -ENOMEM;
1659 goto err_cache;
1660 }
1661
1662 err = create_debugfs_files(dev);
1663 if (err) {
1664 err = -ENOMEM;
1665 goto err_wq;
1666 }
1667
1668 return 0;
1669
1670 err_wq:
1671 destroy_workqueue(cmd->wq);
1672
1673 err_cache:
1674 destroy_msg_cache(dev);
1675
1676 err_free_page:
1677 free_cmd_page(dev, cmd);
1678
1679 err_free_pool:
1680 pci_pool_destroy(cmd->pool);
1681
1682 return err;
1683 }
1684 EXPORT_SYMBOL(mlx5_cmd_init);
1685
1686 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1687 {
1688 struct mlx5_cmd *cmd = &dev->cmd;
1689
1690 clean_debug_files(dev);
1691 destroy_workqueue(cmd->wq);
1692 destroy_msg_cache(dev);
1693 free_cmd_page(dev, cmd);
1694 pci_pool_destroy(cmd->pool);
1695 }
1696 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1697
1698 static const char *cmd_status_str(u8 status)
1699 {
1700 switch (status) {
1701 case MLX5_CMD_STAT_OK:
1702 return "OK";
1703 case MLX5_CMD_STAT_INT_ERR:
1704 return "internal error";
1705 case MLX5_CMD_STAT_BAD_OP_ERR:
1706 return "bad operation";
1707 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1708 return "bad parameter";
1709 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1710 return "bad system state";
1711 case MLX5_CMD_STAT_BAD_RES_ERR:
1712 return "bad resource";
1713 case MLX5_CMD_STAT_RES_BUSY:
1714 return "resource busy";
1715 case MLX5_CMD_STAT_LIM_ERR:
1716 return "limits exceeded";
1717 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1718 return "bad resource state";
1719 case MLX5_CMD_STAT_IX_ERR:
1720 return "bad index";
1721 case MLX5_CMD_STAT_NO_RES_ERR:
1722 return "no resources";
1723 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1724 return "bad input length";
1725 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1726 return "bad output length";
1727 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1728 return "bad QP state";
1729 case MLX5_CMD_STAT_BAD_PKT_ERR:
1730 return "bad packet (discarded)";
1731 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1732 return "bad size too many outstanding CQEs";
1733 default:
1734 return "unknown status";
1735 }
1736 }
1737
1738 static int cmd_status_to_err(u8 status)
1739 {
1740 switch (status) {
1741 case MLX5_CMD_STAT_OK: return 0;
1742 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1743 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1744 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1745 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1746 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1747 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1748 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1749 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1750 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1751 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1752 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1753 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1754 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1755 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1756 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1757 default: return -EIO;
1758 }
1759 }
1760
1761 /* this will be available till all the commands use set/get macros */
1762 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1763 {
1764 if (!hdr->status)
1765 return 0;
1766
1767 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1768 cmd_status_str(hdr->status), hdr->status,
1769 be32_to_cpu(hdr->syndrome));
1770
1771 return cmd_status_to_err(hdr->status);
1772 }
1773
1774 int mlx5_cmd_status_to_err_v2(void *ptr)
1775 {
1776 u32 syndrome;
1777 u8 status;
1778
1779 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1780 if (!status)
1781 return 0;
1782
1783 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1784
1785 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1786 cmd_status_str(status), status, syndrome);
1787
1788 return cmd_status_to_err(status);
1789 }
This page took 0.086187 seconds and 6 git commands to generate.