2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE
= (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE
) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE
,
61 MED_LIST_SIZE
= 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
65 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
78 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
79 struct mlx5_cmd_msg
*in
,
80 struct mlx5_cmd_msg
*out
,
81 void *uout
, int uout_size
,
83 void *context
, int page_queue
)
85 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
86 struct mlx5_cmd_work_ent
*ent
;
88 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
90 return ERR_PTR(-ENOMEM
);
95 ent
->uout_size
= uout_size
;
97 ent
->context
= context
;
99 ent
->page_queue
= page_queue
;
104 static u8
alloc_token(struct mlx5_cmd
*cmd
)
108 spin_lock(&cmd
->token_lock
);
113 spin_unlock(&cmd
->token_lock
);
118 static int alloc_ent(struct mlx5_cmd
*cmd
)
123 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
124 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
125 if (ret
< cmd
->max_reg_cmds
)
126 clear_bit(ret
, &cmd
->bitmask
);
127 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
129 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
132 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
136 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
137 set_bit(idx
, &cmd
->bitmask
);
138 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
141 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
143 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
146 static u8
xor8_buf(void *buf
, int len
)
152 for (i
= 0; i
< len
; i
++)
158 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
160 if (xor8_buf(block
->rsvd0
, sizeof(*block
) - sizeof(block
->data
) - 1) != 0xff)
163 if (xor8_buf(block
, sizeof(*block
)) != 0xff)
169 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
, u8 token
,
172 block
->token
= token
;
174 block
->ctrl_sig
= ~xor8_buf(block
->rsvd0
, sizeof(*block
) -
175 sizeof(block
->data
) - 2);
176 block
->sig
= ~xor8_buf(block
, sizeof(*block
) - 1);
180 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
, u8 token
, int csum
)
182 struct mlx5_cmd_mailbox
*next
= msg
->next
;
185 calc_block_sig(next
->buf
, token
, csum
);
190 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
192 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
193 calc_chain_sig(ent
->in
, ent
->token
, csum
);
194 calc_chain_sig(ent
->out
, ent
->token
, csum
);
197 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
199 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
203 own
= ent
->lay
->status_own
;
204 if (!(own
& CMD_OWNER_HW
)) {
208 usleep_range(5000, 10000);
209 } while (time_before(jiffies
, poll_end
));
211 ent
->ret
= -ETIMEDOUT
;
214 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
220 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
222 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
226 sig
= xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
231 err
= verify_block_sig(next
->buf
);
241 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
246 for (i
= 0; i
< size
; i
+= 16) {
247 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
248 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
258 MLX5_DRIVER_STATUS_ABORTED
= 0xfe,
259 MLX5_DRIVER_SYND
= 0xbadd00de,
262 static int mlx5_internal_err_ret_value(struct mlx5_core_dev
*dev
, u16 op
,
263 u32
*synd
, u8
*status
)
269 case MLX5_CMD_OP_TEARDOWN_HCA
:
270 case MLX5_CMD_OP_DISABLE_HCA
:
271 case MLX5_CMD_OP_MANAGE_PAGES
:
272 case MLX5_CMD_OP_DESTROY_MKEY
:
273 case MLX5_CMD_OP_DESTROY_EQ
:
274 case MLX5_CMD_OP_DESTROY_CQ
:
275 case MLX5_CMD_OP_DESTROY_QP
:
276 case MLX5_CMD_OP_DESTROY_PSV
:
277 case MLX5_CMD_OP_DESTROY_SRQ
:
278 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
279 case MLX5_CMD_OP_DESTROY_DCT
:
280 case MLX5_CMD_OP_DEALLOC_Q_COUNTER
:
281 case MLX5_CMD_OP_DEALLOC_PD
:
282 case MLX5_CMD_OP_DEALLOC_UAR
:
283 case MLX5_CMD_OP_DETTACH_FROM_MCG
:
284 case MLX5_CMD_OP_DEALLOC_XRCD
:
285 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
:
286 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
:
287 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
:
288 case MLX5_CMD_OP_DESTROY_TIR
:
289 case MLX5_CMD_OP_DESTROY_SQ
:
290 case MLX5_CMD_OP_DESTROY_RQ
:
291 case MLX5_CMD_OP_DESTROY_RMP
:
292 case MLX5_CMD_OP_DESTROY_TIS
:
293 case MLX5_CMD_OP_DESTROY_RQT
:
294 case MLX5_CMD_OP_DESTROY_FLOW_TABLE
:
295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP
:
296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
:
297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
:
298 case MLX5_CMD_OP_2ERR_QP
:
299 case MLX5_CMD_OP_2RST_QP
:
300 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
:
301 case MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
302 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
303 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
:
304 return MLX5_CMD_STAT_OK
;
306 case MLX5_CMD_OP_QUERY_HCA_CAP
:
307 case MLX5_CMD_OP_QUERY_ADAPTER
:
308 case MLX5_CMD_OP_INIT_HCA
:
309 case MLX5_CMD_OP_ENABLE_HCA
:
310 case MLX5_CMD_OP_QUERY_PAGES
:
311 case MLX5_CMD_OP_SET_HCA_CAP
:
312 case MLX5_CMD_OP_QUERY_ISSI
:
313 case MLX5_CMD_OP_SET_ISSI
:
314 case MLX5_CMD_OP_CREATE_MKEY
:
315 case MLX5_CMD_OP_QUERY_MKEY
:
316 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
317 case MLX5_CMD_OP_PAGE_FAULT_RESUME
:
318 case MLX5_CMD_OP_CREATE_EQ
:
319 case MLX5_CMD_OP_QUERY_EQ
:
320 case MLX5_CMD_OP_GEN_EQE
:
321 case MLX5_CMD_OP_CREATE_CQ
:
322 case MLX5_CMD_OP_QUERY_CQ
:
323 case MLX5_CMD_OP_MODIFY_CQ
:
324 case MLX5_CMD_OP_CREATE_QP
:
325 case MLX5_CMD_OP_RST2INIT_QP
:
326 case MLX5_CMD_OP_INIT2RTR_QP
:
327 case MLX5_CMD_OP_RTR2RTS_QP
:
328 case MLX5_CMD_OP_RTS2RTS_QP
:
329 case MLX5_CMD_OP_SQERR2RTS_QP
:
330 case MLX5_CMD_OP_QUERY_QP
:
331 case MLX5_CMD_OP_SQD_RTS_QP
:
332 case MLX5_CMD_OP_INIT2INIT_QP
:
333 case MLX5_CMD_OP_CREATE_PSV
:
334 case MLX5_CMD_OP_CREATE_SRQ
:
335 case MLX5_CMD_OP_QUERY_SRQ
:
336 case MLX5_CMD_OP_ARM_RQ
:
337 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
338 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
339 case MLX5_CMD_OP_ARM_XRC_SRQ
:
340 case MLX5_CMD_OP_CREATE_DCT
:
341 case MLX5_CMD_OP_DRAIN_DCT
:
342 case MLX5_CMD_OP_QUERY_DCT
:
343 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION
:
344 case MLX5_CMD_OP_QUERY_VPORT_STATE
:
345 case MLX5_CMD_OP_MODIFY_VPORT_STATE
:
346 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
:
347 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
:
348 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
:
349 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS
:
350 case MLX5_CMD_OP_SET_ROCE_ADDRESS
:
351 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
:
352 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
:
353 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID
:
354 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
:
355 case MLX5_CMD_OP_QUERY_VPORT_COUNTER
:
356 case MLX5_CMD_OP_ALLOC_Q_COUNTER
:
357 case MLX5_CMD_OP_QUERY_Q_COUNTER
:
358 case MLX5_CMD_OP_ALLOC_PD
:
359 case MLX5_CMD_OP_ALLOC_UAR
:
360 case MLX5_CMD_OP_CONFIG_INT_MODERATION
:
361 case MLX5_CMD_OP_ACCESS_REG
:
362 case MLX5_CMD_OP_ATTACH_TO_MCG
:
363 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
:
364 case MLX5_CMD_OP_MAD_IFC
:
365 case MLX5_CMD_OP_QUERY_MAD_DEMUX
:
366 case MLX5_CMD_OP_SET_MAD_DEMUX
:
367 case MLX5_CMD_OP_NOP
:
368 case MLX5_CMD_OP_ALLOC_XRCD
:
369 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
:
370 case MLX5_CMD_OP_QUERY_CONG_STATUS
:
371 case MLX5_CMD_OP_MODIFY_CONG_STATUS
:
372 case MLX5_CMD_OP_QUERY_CONG_PARAMS
:
373 case MLX5_CMD_OP_MODIFY_CONG_PARAMS
:
374 case MLX5_CMD_OP_QUERY_CONG_STATISTICS
:
375 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
376 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
377 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
378 case MLX5_CMD_OP_CREATE_TIR
:
379 case MLX5_CMD_OP_MODIFY_TIR
:
380 case MLX5_CMD_OP_QUERY_TIR
:
381 case MLX5_CMD_OP_CREATE_SQ
:
382 case MLX5_CMD_OP_MODIFY_SQ
:
383 case MLX5_CMD_OP_QUERY_SQ
:
384 case MLX5_CMD_OP_CREATE_RQ
:
385 case MLX5_CMD_OP_MODIFY_RQ
:
386 case MLX5_CMD_OP_QUERY_RQ
:
387 case MLX5_CMD_OP_CREATE_RMP
:
388 case MLX5_CMD_OP_MODIFY_RMP
:
389 case MLX5_CMD_OP_QUERY_RMP
:
390 case MLX5_CMD_OP_CREATE_TIS
:
391 case MLX5_CMD_OP_MODIFY_TIS
:
392 case MLX5_CMD_OP_QUERY_TIS
:
393 case MLX5_CMD_OP_CREATE_RQT
:
394 case MLX5_CMD_OP_MODIFY_RQT
:
395 case MLX5_CMD_OP_QUERY_RQT
:
397 case MLX5_CMD_OP_CREATE_FLOW_TABLE
:
398 case MLX5_CMD_OP_QUERY_FLOW_TABLE
:
399 case MLX5_CMD_OP_CREATE_FLOW_GROUP
:
400 case MLX5_CMD_OP_QUERY_FLOW_GROUP
:
402 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
403 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
404 case MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
405 *status
= MLX5_DRIVER_STATUS_ABORTED
;
406 *synd
= MLX5_DRIVER_SYND
;
409 mlx5_core_err(dev
, "Unknown FW command (%d)\n", op
);
414 const char *mlx5_command_str(int command
)
416 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
419 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP
);
420 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER
);
421 MLX5_COMMAND_STR_CASE(INIT_HCA
);
422 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA
);
423 MLX5_COMMAND_STR_CASE(ENABLE_HCA
);
424 MLX5_COMMAND_STR_CASE(DISABLE_HCA
);
425 MLX5_COMMAND_STR_CASE(QUERY_PAGES
);
426 MLX5_COMMAND_STR_CASE(MANAGE_PAGES
);
427 MLX5_COMMAND_STR_CASE(SET_HCA_CAP
);
428 MLX5_COMMAND_STR_CASE(QUERY_ISSI
);
429 MLX5_COMMAND_STR_CASE(SET_ISSI
);
430 MLX5_COMMAND_STR_CASE(CREATE_MKEY
);
431 MLX5_COMMAND_STR_CASE(QUERY_MKEY
);
432 MLX5_COMMAND_STR_CASE(DESTROY_MKEY
);
433 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS
);
434 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME
);
435 MLX5_COMMAND_STR_CASE(CREATE_EQ
);
436 MLX5_COMMAND_STR_CASE(DESTROY_EQ
);
437 MLX5_COMMAND_STR_CASE(QUERY_EQ
);
438 MLX5_COMMAND_STR_CASE(GEN_EQE
);
439 MLX5_COMMAND_STR_CASE(CREATE_CQ
);
440 MLX5_COMMAND_STR_CASE(DESTROY_CQ
);
441 MLX5_COMMAND_STR_CASE(QUERY_CQ
);
442 MLX5_COMMAND_STR_CASE(MODIFY_CQ
);
443 MLX5_COMMAND_STR_CASE(CREATE_QP
);
444 MLX5_COMMAND_STR_CASE(DESTROY_QP
);
445 MLX5_COMMAND_STR_CASE(RST2INIT_QP
);
446 MLX5_COMMAND_STR_CASE(INIT2RTR_QP
);
447 MLX5_COMMAND_STR_CASE(RTR2RTS_QP
);
448 MLX5_COMMAND_STR_CASE(RTS2RTS_QP
);
449 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP
);
450 MLX5_COMMAND_STR_CASE(2ERR_QP
);
451 MLX5_COMMAND_STR_CASE(2RST_QP
);
452 MLX5_COMMAND_STR_CASE(QUERY_QP
);
453 MLX5_COMMAND_STR_CASE(SQD_RTS_QP
);
454 MLX5_COMMAND_STR_CASE(INIT2INIT_QP
);
455 MLX5_COMMAND_STR_CASE(CREATE_PSV
);
456 MLX5_COMMAND_STR_CASE(DESTROY_PSV
);
457 MLX5_COMMAND_STR_CASE(CREATE_SRQ
);
458 MLX5_COMMAND_STR_CASE(DESTROY_SRQ
);
459 MLX5_COMMAND_STR_CASE(QUERY_SRQ
);
460 MLX5_COMMAND_STR_CASE(ARM_RQ
);
461 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ
);
462 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ
);
463 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ
);
464 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ
);
465 MLX5_COMMAND_STR_CASE(CREATE_DCT
);
466 MLX5_COMMAND_STR_CASE(DESTROY_DCT
);
467 MLX5_COMMAND_STR_CASE(DRAIN_DCT
);
468 MLX5_COMMAND_STR_CASE(QUERY_DCT
);
469 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION
);
470 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE
);
471 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE
);
472 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT
);
473 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT
);
474 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT
);
475 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT
);
476 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS
);
477 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS
);
478 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT
);
479 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT
);
480 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID
);
481 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY
);
482 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER
);
483 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER
);
484 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER
);
485 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER
);
486 MLX5_COMMAND_STR_CASE(ALLOC_PD
);
487 MLX5_COMMAND_STR_CASE(DEALLOC_PD
);
488 MLX5_COMMAND_STR_CASE(ALLOC_UAR
);
489 MLX5_COMMAND_STR_CASE(DEALLOC_UAR
);
490 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION
);
491 MLX5_COMMAND_STR_CASE(ACCESS_REG
);
492 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG
);
493 MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG
);
494 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG
);
495 MLX5_COMMAND_STR_CASE(MAD_IFC
);
496 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX
);
497 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX
);
498 MLX5_COMMAND_STR_CASE(NOP
);
499 MLX5_COMMAND_STR_CASE(ALLOC_XRCD
);
500 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD
);
501 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN
);
502 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN
);
503 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS
);
504 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS
);
505 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS
);
506 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS
);
507 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS
);
508 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT
);
509 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT
);
510 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY
);
511 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY
);
512 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY
);
513 MLX5_COMMAND_STR_CASE(SET_WOL_ROL
);
514 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL
);
515 MLX5_COMMAND_STR_CASE(CREATE_TIR
);
516 MLX5_COMMAND_STR_CASE(MODIFY_TIR
);
517 MLX5_COMMAND_STR_CASE(DESTROY_TIR
);
518 MLX5_COMMAND_STR_CASE(QUERY_TIR
);
519 MLX5_COMMAND_STR_CASE(CREATE_SQ
);
520 MLX5_COMMAND_STR_CASE(MODIFY_SQ
);
521 MLX5_COMMAND_STR_CASE(DESTROY_SQ
);
522 MLX5_COMMAND_STR_CASE(QUERY_SQ
);
523 MLX5_COMMAND_STR_CASE(CREATE_RQ
);
524 MLX5_COMMAND_STR_CASE(MODIFY_RQ
);
525 MLX5_COMMAND_STR_CASE(DESTROY_RQ
);
526 MLX5_COMMAND_STR_CASE(QUERY_RQ
);
527 MLX5_COMMAND_STR_CASE(CREATE_RMP
);
528 MLX5_COMMAND_STR_CASE(MODIFY_RMP
);
529 MLX5_COMMAND_STR_CASE(DESTROY_RMP
);
530 MLX5_COMMAND_STR_CASE(QUERY_RMP
);
531 MLX5_COMMAND_STR_CASE(CREATE_TIS
);
532 MLX5_COMMAND_STR_CASE(MODIFY_TIS
);
533 MLX5_COMMAND_STR_CASE(DESTROY_TIS
);
534 MLX5_COMMAND_STR_CASE(QUERY_TIS
);
535 MLX5_COMMAND_STR_CASE(CREATE_RQT
);
536 MLX5_COMMAND_STR_CASE(MODIFY_RQT
);
537 MLX5_COMMAND_STR_CASE(DESTROY_RQT
);
538 MLX5_COMMAND_STR_CASE(QUERY_RQT
);
539 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT
);
540 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE
);
541 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE
);
542 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE
);
543 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP
);
544 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP
);
545 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP
);
546 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY
);
547 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY
);
548 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY
);
549 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER
);
550 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER
);
551 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER
);
552 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE
);
553 default: return "unknown command opcode";
557 static void dump_command(struct mlx5_core_dev
*dev
,
558 struct mlx5_cmd_work_ent
*ent
, int input
)
560 u16 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)(ent
->lay
->in
))->opcode
);
561 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
562 struct mlx5_cmd_mailbox
*next
= msg
->next
;
567 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
570 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
571 "dump command data %s(0x%x) %s\n",
572 mlx5_command_str(op
), op
,
573 input
? "INPUT" : "OUTPUT");
575 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
576 mlx5_command_str(op
), op
,
577 input
? "INPUT" : "OUTPUT");
581 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
582 offset
+= sizeof(ent
->lay
->in
);
584 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
585 offset
+= sizeof(ent
->lay
->out
);
588 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
589 offset
+= sizeof(*ent
->lay
);
592 while (next
&& offset
< msg
->len
) {
594 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
595 dump_buf(next
->buf
, dump_len
, 1, offset
);
596 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
598 mlx5_core_dbg(dev
, "command block:\n");
599 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
600 offset
+= sizeof(struct mlx5_cmd_prot_block
);
609 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
611 struct mlx5_inbox_hdr
*hdr
= (struct mlx5_inbox_hdr
*)(in
->first
.data
);
613 return be16_to_cpu(hdr
->opcode
);
616 static void cb_timeout_handler(struct work_struct
*work
)
618 struct delayed_work
*dwork
= container_of(work
, struct delayed_work
,
620 struct mlx5_cmd_work_ent
*ent
= container_of(dwork
,
621 struct mlx5_cmd_work_ent
,
623 struct mlx5_core_dev
*dev
= container_of(ent
->cmd
, struct mlx5_core_dev
,
626 ent
->ret
= -ETIMEDOUT
;
627 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
628 mlx5_command_str(msg_to_opcode(ent
->in
)),
629 msg_to_opcode(ent
->in
));
630 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
633 static void cmd_work_handler(struct work_struct
*work
)
635 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
636 struct mlx5_cmd
*cmd
= ent
->cmd
;
637 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
638 unsigned long cb_timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
639 struct mlx5_cmd_layout
*lay
;
640 struct semaphore
*sem
;
643 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
645 if (!ent
->page_queue
) {
646 ent
->idx
= alloc_ent(cmd
);
648 mlx5_core_err(dev
, "failed to allocate command entry\n");
653 ent
->idx
= cmd
->max_reg_cmds
;
654 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
655 clear_bit(ent
->idx
, &cmd
->bitmask
);
656 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
659 ent
->token
= alloc_token(cmd
);
660 cmd
->ent_arr
[ent
->idx
] = ent
;
661 lay
= get_inst(cmd
, ent
->idx
);
663 memset(lay
, 0, sizeof(*lay
));
664 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
665 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
667 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
668 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
670 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
671 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
672 lay
->type
= MLX5_PCI_CMD_XPORT
;
673 lay
->token
= ent
->token
;
674 lay
->status_own
= CMD_OWNER_HW
;
675 set_signature(ent
, !cmd
->checksum_disabled
);
676 dump_command(dev
, ent
, 1);
677 ent
->ts1
= ktime_get_ns();
680 schedule_delayed_work(&ent
->cb_timeout_work
, cb_timeout
);
682 /* ring doorbell after the descriptor is valid */
683 mlx5_core_dbg(dev
, "writing 0x%x to command doorbell\n", 1 << ent
->idx
);
685 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
687 /* if not in polling don't use ent after this point */
688 if (cmd
->mode
== CMD_MODE_POLLING
) {
690 /* make sure we read the descriptor after ownership is SW */
692 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
696 static const char *deliv_status_to_str(u8 status
)
699 case MLX5_CMD_DELIVERY_STAT_OK
:
701 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
702 return "signature error";
703 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
704 return "token error";
705 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
706 return "bad block number";
707 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
708 return "output pointer not aligned to block size";
709 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
710 return "input pointer not aligned to block size";
711 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
712 return "firmware internal error";
713 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
714 return "command input length error";
715 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
716 return "command ouput length error";
717 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
718 return "reserved fields not cleared";
719 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
720 return "bad command descriptor type";
722 return "unknown status code";
726 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
728 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
729 struct mlx5_cmd
*cmd
= &dev
->cmd
;
732 if (cmd
->mode
== CMD_MODE_POLLING
) {
733 wait_for_completion(&ent
->done
);
734 } else if (!wait_for_completion_timeout(&ent
->done
, timeout
)) {
735 ent
->ret
= -ETIMEDOUT
;
736 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
741 if (err
== -ETIMEDOUT
) {
742 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
743 mlx5_command_str(msg_to_opcode(ent
->in
)),
744 msg_to_opcode(ent
->in
));
746 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
747 err
, deliv_status_to_str(ent
->status
), ent
->status
);
752 static __be32
*get_synd_ptr(struct mlx5_outbox_hdr
*out
)
754 return &out
->syndrome
;
757 static u8
*get_status_ptr(struct mlx5_outbox_hdr
*out
)
763 * 1. Callback functions may not sleep
764 * 2. page queue commands do not support asynchrous completion
766 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
767 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
768 mlx5_cmd_cbk_t callback
,
769 void *context
, int page_queue
, u8
*status
)
771 struct mlx5_cmd
*cmd
= &dev
->cmd
;
772 struct mlx5_cmd_work_ent
*ent
;
773 struct mlx5_cmd_stats
*stats
;
778 if (callback
&& page_queue
)
781 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
787 init_completion(&ent
->done
);
789 INIT_DELAYED_WORK(&ent
->cb_timeout_work
, cb_timeout_handler
);
790 INIT_WORK(&ent
->work
, cmd_work_handler
);
792 cmd_work_handler(&ent
->work
);
793 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
794 mlx5_core_warn(dev
, "failed to queue work\n");
802 err
= wait_func(dev
, ent
);
803 if (err
== -ETIMEDOUT
)
806 ds
= ent
->ts2
- ent
->ts1
;
807 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)in
->first
.data
)->opcode
);
808 if (op
< ARRAY_SIZE(cmd
->stats
)) {
809 stats
= &cmd
->stats
[op
];
810 spin_lock_irq(&stats
->lock
);
813 spin_unlock_irq(&stats
->lock
);
815 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
816 "fw exec time for %s is %lld nsec\n",
817 mlx5_command_str(op
), ds
);
818 *status
= ent
->status
;
826 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
827 size_t count
, loff_t
*pos
)
829 struct mlx5_core_dev
*dev
= filp
->private_data
;
830 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
834 if (!dbg
->in_msg
|| !dbg
->out_msg
)
837 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
840 lbuf
[sizeof(lbuf
) - 1] = 0;
842 if (strcmp(lbuf
, "go"))
845 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
847 return err
? err
: count
;
851 static const struct file_operations fops
= {
852 .owner
= THIS_MODULE
,
857 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
)
859 struct mlx5_cmd_prot_block
*block
;
860 struct mlx5_cmd_mailbox
*next
;
866 copy
= min_t(int, size
, sizeof(to
->first
.data
));
867 memcpy(to
->first
.data
, from
, copy
);
878 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
880 memcpy(block
->data
, from
, copy
);
889 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
891 struct mlx5_cmd_prot_block
*block
;
892 struct mlx5_cmd_mailbox
*next
;
898 copy
= min_t(int, size
, sizeof(from
->first
.data
));
899 memcpy(to
, from
->first
.data
, copy
);
910 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
913 memcpy(to
, block
->data
, copy
);
922 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
925 struct mlx5_cmd_mailbox
*mailbox
;
927 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
929 return ERR_PTR(-ENOMEM
);
931 mailbox
->buf
= pci_pool_alloc(dev
->cmd
.pool
, flags
,
934 mlx5_core_dbg(dev
, "failed allocation\n");
936 return ERR_PTR(-ENOMEM
);
938 memset(mailbox
->buf
, 0, sizeof(struct mlx5_cmd_prot_block
));
939 mailbox
->next
= NULL
;
944 static void free_cmd_box(struct mlx5_core_dev
*dev
,
945 struct mlx5_cmd_mailbox
*mailbox
)
947 pci_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
951 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
952 gfp_t flags
, int size
)
954 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
955 struct mlx5_cmd_prot_block
*block
;
956 struct mlx5_cmd_msg
*msg
;
962 msg
= kzalloc(sizeof(*msg
), flags
);
964 return ERR_PTR(-ENOMEM
);
966 blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
967 n
= (blen
+ MLX5_CMD_DATA_BLOCK_SIZE
- 1) / MLX5_CMD_DATA_BLOCK_SIZE
;
969 for (i
= 0; i
< n
; i
++) {
970 tmp
= alloc_cmd_box(dev
, flags
);
972 mlx5_core_warn(dev
, "failed allocating block\n");
979 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
980 block
->block_num
= cpu_to_be32(n
- i
- 1);
990 free_cmd_box(dev
, head
);
998 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
999 struct mlx5_cmd_msg
*msg
)
1001 struct mlx5_cmd_mailbox
*head
= msg
->next
;
1002 struct mlx5_cmd_mailbox
*next
;
1006 free_cmd_box(dev
, head
);
1012 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
1013 size_t count
, loff_t
*pos
)
1015 struct mlx5_core_dev
*dev
= filp
->private_data
;
1016 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1027 ptr
= kzalloc(count
, GFP_KERNEL
);
1031 if (copy_from_user(ptr
, buf
, count
)) {
1047 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
1050 struct mlx5_core_dev
*dev
= filp
->private_data
;
1051 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1060 copy
= min_t(int, count
, dbg
->outlen
);
1061 if (copy_to_user(buf
, dbg
->out_msg
, copy
))
1069 static const struct file_operations dfops
= {
1070 .owner
= THIS_MODULE
,
1071 .open
= simple_open
,
1072 .write
= data_write
,
1076 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
1079 struct mlx5_core_dev
*dev
= filp
->private_data
;
1080 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1087 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
1091 if (copy_to_user(buf
, &outlen
, err
))
1099 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
1100 size_t count
, loff_t
*pos
)
1102 struct mlx5_core_dev
*dev
= filp
->private_data
;
1103 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1109 if (*pos
!= 0 || count
> 6)
1112 kfree(dbg
->out_msg
);
1113 dbg
->out_msg
= NULL
;
1116 if (copy_from_user(outlen_str
, buf
, count
))
1121 err
= sscanf(outlen_str
, "%d", &outlen
);
1125 ptr
= kzalloc(outlen
, GFP_KERNEL
);
1130 dbg
->outlen
= outlen
;
1137 static const struct file_operations olfops
= {
1138 .owner
= THIS_MODULE
,
1139 .open
= simple_open
,
1140 .write
= outlen_write
,
1141 .read
= outlen_read
,
1144 static void set_wqname(struct mlx5_core_dev
*dev
)
1146 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1148 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
1149 dev_name(&dev
->pdev
->dev
));
1152 static void clean_debug_files(struct mlx5_core_dev
*dev
)
1154 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1156 if (!mlx5_debugfs_root
)
1159 mlx5_cmdif_debugfs_cleanup(dev
);
1160 debugfs_remove_recursive(dbg
->dbg_root
);
1163 static int create_debugfs_files(struct mlx5_core_dev
*dev
)
1165 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1168 if (!mlx5_debugfs_root
)
1171 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1175 dbg
->dbg_in
= debugfs_create_file("in", 0400, dbg
->dbg_root
,
1180 dbg
->dbg_out
= debugfs_create_file("out", 0200, dbg
->dbg_root
,
1185 dbg
->dbg_outlen
= debugfs_create_file("out_len", 0600, dbg
->dbg_root
,
1187 if (!dbg
->dbg_outlen
)
1190 dbg
->dbg_status
= debugfs_create_u8("status", 0600, dbg
->dbg_root
,
1192 if (!dbg
->dbg_status
)
1195 dbg
->dbg_run
= debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1199 mlx5_cmdif_debugfs_init(dev
);
1204 clean_debug_files(dev
);
1208 static void mlx5_cmd_change_mod(struct mlx5_core_dev
*dev
, int mode
)
1210 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1213 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1215 down(&cmd
->pages_sem
);
1219 up(&cmd
->pages_sem
);
1220 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1224 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1226 mlx5_cmd_change_mod(dev
, CMD_MODE_EVENTS
);
1229 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1231 mlx5_cmd_change_mod(dev
, CMD_MODE_POLLING
);
1234 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1236 unsigned long flags
;
1239 spin_lock_irqsave(&msg
->cache
->lock
, flags
);
1240 list_add_tail(&msg
->list
, &msg
->cache
->head
);
1241 spin_unlock_irqrestore(&msg
->cache
->lock
, flags
);
1243 mlx5_free_cmd_msg(dev
, msg
);
1247 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
)
1249 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1250 struct mlx5_cmd_work_ent
*ent
;
1251 mlx5_cmd_cbk_t callback
;
1256 struct mlx5_cmd_stats
*stats
;
1257 unsigned long flags
;
1258 unsigned long vector
;
1260 /* there can be at most 32 command queues */
1261 vector
= vec
& 0xffffffff;
1262 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1263 if (test_bit(i
, &vector
)) {
1264 struct semaphore
*sem
;
1266 ent
= cmd
->ent_arr
[i
];
1268 cancel_delayed_work(&ent
->cb_timeout_work
);
1269 if (ent
->page_queue
)
1270 sem
= &cmd
->pages_sem
;
1273 ent
->ts2
= ktime_get_ns();
1274 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1275 dump_command(dev
, ent
, 0);
1277 if (!cmd
->checksum_disabled
)
1278 ent
->ret
= verify_signature(ent
);
1281 if (vec
& MLX5_TRIGGERED_CMD_COMP
)
1282 ent
->status
= MLX5_DRIVER_STATUS_ABORTED
;
1284 ent
->status
= ent
->lay
->status_own
>> 1;
1286 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1287 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1289 free_ent(cmd
, ent
->idx
);
1291 if (ent
->callback
) {
1292 ds
= ent
->ts2
- ent
->ts1
;
1293 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1294 stats
= &cmd
->stats
[ent
->op
];
1295 spin_lock_irqsave(&stats
->lock
, flags
);
1298 spin_unlock_irqrestore(&stats
->lock
, flags
);
1301 callback
= ent
->callback
;
1302 context
= ent
->context
;
1305 err
= mlx5_copy_from_msg(ent
->uout
,
1309 mlx5_free_cmd_msg(dev
, ent
->out
);
1310 free_msg(dev
, ent
->in
);
1312 err
= err
? err
: ent
->status
;
1314 callback(err
, context
);
1316 complete(&ent
->done
);
1322 EXPORT_SYMBOL(mlx5_cmd_comp_handler
);
1324 static int status_to_err(u8 status
)
1326 return status
? -1 : 0; /* TBD more meaningful codes */
1329 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1332 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1333 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1334 struct cache_ent
*ent
= NULL
;
1336 if (in_size
> MED_LIST_SIZE
&& in_size
<= LONG_LIST_SIZE
)
1337 ent
= &cmd
->cache
.large
;
1338 else if (in_size
> 16 && in_size
<= MED_LIST_SIZE
)
1339 ent
= &cmd
->cache
.med
;
1342 spin_lock_irq(&ent
->lock
);
1343 if (!list_empty(&ent
->head
)) {
1344 msg
= list_entry(ent
->head
.next
, typeof(*msg
), list
);
1345 /* For cached lists, we must explicitly state what is
1349 list_del(&msg
->list
);
1351 spin_unlock_irq(&ent
->lock
);
1355 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
);
1360 static u16
opcode_from_in(struct mlx5_inbox_hdr
*in
)
1362 return be16_to_cpu(in
->opcode
);
1365 static int is_manage_pages(struct mlx5_inbox_hdr
*in
)
1367 return be16_to_cpu(in
->opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1370 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1371 int out_size
, mlx5_cmd_cbk_t callback
, void *context
)
1373 struct mlx5_cmd_msg
*inb
;
1374 struct mlx5_cmd_msg
*outb
;
1381 if (pci_channel_offline(dev
->pdev
) ||
1382 dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
1383 err
= mlx5_internal_err_ret_value(dev
, opcode_from_in(in
), &drv_synd
, &status
);
1384 *get_synd_ptr(out
) = cpu_to_be32(drv_synd
);
1385 *get_status_ptr(out
) = status
;
1389 pages_queue
= is_manage_pages(in
);
1390 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1392 inb
= alloc_msg(dev
, in_size
, gfp
);
1398 err
= mlx5_copy_to_msg(inb
, in
, in_size
);
1400 mlx5_core_warn(dev
, "err %d\n", err
);
1404 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
);
1406 err
= PTR_ERR(outb
);
1410 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1411 pages_queue
, &status
);
1415 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1417 err
= status_to_err(status
);
1422 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1426 mlx5_free_cmd_msg(dev
, outb
);
1434 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1437 return cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
);
1439 EXPORT_SYMBOL(mlx5_cmd_exec
);
1441 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1442 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
1445 return cmd_exec(dev
, in
, in_size
, out
, out_size
, callback
, context
);
1447 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1449 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1451 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1452 struct mlx5_cmd_msg
*msg
;
1453 struct mlx5_cmd_msg
*n
;
1455 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.large
.head
, list
) {
1456 list_del(&msg
->list
);
1457 mlx5_free_cmd_msg(dev
, msg
);
1460 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.med
.head
, list
) {
1461 list_del(&msg
->list
);
1462 mlx5_free_cmd_msg(dev
, msg
);
1466 static int create_msg_cache(struct mlx5_core_dev
*dev
)
1468 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1469 struct mlx5_cmd_msg
*msg
;
1473 spin_lock_init(&cmd
->cache
.large
.lock
);
1474 INIT_LIST_HEAD(&cmd
->cache
.large
.head
);
1475 spin_lock_init(&cmd
->cache
.med
.lock
);
1476 INIT_LIST_HEAD(&cmd
->cache
.med
.head
);
1478 for (i
= 0; i
< NUM_LONG_LISTS
; i
++) {
1479 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, LONG_LIST_SIZE
);
1484 msg
->cache
= &cmd
->cache
.large
;
1485 list_add_tail(&msg
->list
, &cmd
->cache
.large
.head
);
1488 for (i
= 0; i
< NUM_MED_LISTS
; i
++) {
1489 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, MED_LIST_SIZE
);
1494 msg
->cache
= &cmd
->cache
.med
;
1495 list_add_tail(&msg
->list
, &cmd
->cache
.med
.head
);
1501 destroy_msg_cache(dev
);
1505 static int alloc_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1507 struct device
*ddev
= &dev
->pdev
->dev
;
1509 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
,
1510 &cmd
->alloc_dma
, GFP_KERNEL
);
1511 if (!cmd
->cmd_alloc_buf
)
1514 /* make sure it is aligned to 4K */
1515 if (!((uintptr_t)cmd
->cmd_alloc_buf
& (MLX5_ADAPTER_PAGE_SIZE
- 1))) {
1516 cmd
->cmd_buf
= cmd
->cmd_alloc_buf
;
1517 cmd
->dma
= cmd
->alloc_dma
;
1518 cmd
->alloc_size
= MLX5_ADAPTER_PAGE_SIZE
;
1522 dma_free_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
, cmd
->cmd_alloc_buf
,
1524 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
,
1525 2 * MLX5_ADAPTER_PAGE_SIZE
- 1,
1526 &cmd
->alloc_dma
, GFP_KERNEL
);
1527 if (!cmd
->cmd_alloc_buf
)
1530 cmd
->cmd_buf
= PTR_ALIGN(cmd
->cmd_alloc_buf
, MLX5_ADAPTER_PAGE_SIZE
);
1531 cmd
->dma
= ALIGN(cmd
->alloc_dma
, MLX5_ADAPTER_PAGE_SIZE
);
1532 cmd
->alloc_size
= 2 * MLX5_ADAPTER_PAGE_SIZE
- 1;
1536 static void free_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1538 struct device
*ddev
= &dev
->pdev
->dev
;
1540 dma_free_coherent(ddev
, cmd
->alloc_size
, cmd
->cmd_alloc_buf
,
1544 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1546 int size
= sizeof(struct mlx5_cmd_prot_block
);
1547 int align
= roundup_pow_of_two(size
);
1548 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1554 memset(cmd
, 0, sizeof(*cmd
));
1555 cmd_if_rev
= cmdif_rev(dev
);
1556 if (cmd_if_rev
!= CMD_IF_REV
) {
1557 dev_err(&dev
->pdev
->dev
,
1558 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1559 CMD_IF_REV
, cmd_if_rev
);
1563 cmd
->pool
= pci_pool_create("mlx5_cmd", dev
->pdev
, size
, align
, 0);
1567 err
= alloc_cmd_page(dev
, cmd
);
1571 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1572 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1573 cmd
->log_stride
= cmd_l
& 0xf;
1574 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1575 dev_err(&dev
->pdev
->dev
, "firmware reports too many outstanding commands %d\n",
1581 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
1582 dev_err(&dev
->pdev
->dev
, "command queue size overflow\n");
1587 cmd
->checksum_disabled
= 1;
1588 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1589 cmd
->bitmask
= (1 << cmd
->max_reg_cmds
) - 1;
1591 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1592 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1593 dev_err(&dev
->pdev
->dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1594 CMD_IF_REV
, cmd
->cmdif_rev
);
1599 spin_lock_init(&cmd
->alloc_lock
);
1600 spin_lock_init(&cmd
->token_lock
);
1601 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1602 spin_lock_init(&cmd
->stats
[i
].lock
);
1604 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1605 sema_init(&cmd
->pages_sem
, 1);
1607 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1608 cmd_l
= (u32
)(cmd
->dma
);
1609 if (cmd_l
& 0xfff) {
1610 dev_err(&dev
->pdev
->dev
, "invalid command queue address\n");
1615 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1616 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1618 /* Make sure firmware sees the complete address before we proceed */
1621 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1623 cmd
->mode
= CMD_MODE_POLLING
;
1625 err
= create_msg_cache(dev
);
1627 dev_err(&dev
->pdev
->dev
, "failed to create command cache\n");
1632 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1634 dev_err(&dev
->pdev
->dev
, "failed to create command workqueue\n");
1639 err
= create_debugfs_files(dev
);
1648 destroy_workqueue(cmd
->wq
);
1651 destroy_msg_cache(dev
);
1654 free_cmd_page(dev
, cmd
);
1657 pci_pool_destroy(cmd
->pool
);
1661 EXPORT_SYMBOL(mlx5_cmd_init
);
1663 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
1665 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1667 clean_debug_files(dev
);
1668 destroy_workqueue(cmd
->wq
);
1669 destroy_msg_cache(dev
);
1670 free_cmd_page(dev
, cmd
);
1671 pci_pool_destroy(cmd
->pool
);
1673 EXPORT_SYMBOL(mlx5_cmd_cleanup
);
1675 static const char *cmd_status_str(u8 status
)
1678 case MLX5_CMD_STAT_OK
:
1680 case MLX5_CMD_STAT_INT_ERR
:
1681 return "internal error";
1682 case MLX5_CMD_STAT_BAD_OP_ERR
:
1683 return "bad operation";
1684 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
1685 return "bad parameter";
1686 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
1687 return "bad system state";
1688 case MLX5_CMD_STAT_BAD_RES_ERR
:
1689 return "bad resource";
1690 case MLX5_CMD_STAT_RES_BUSY
:
1691 return "resource busy";
1692 case MLX5_CMD_STAT_LIM_ERR
:
1693 return "limits exceeded";
1694 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
1695 return "bad resource state";
1696 case MLX5_CMD_STAT_IX_ERR
:
1698 case MLX5_CMD_STAT_NO_RES_ERR
:
1699 return "no resources";
1700 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
1701 return "bad input length";
1702 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
1703 return "bad output length";
1704 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
1705 return "bad QP state";
1706 case MLX5_CMD_STAT_BAD_PKT_ERR
:
1707 return "bad packet (discarded)";
1708 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
1709 return "bad size too many outstanding CQEs";
1711 return "unknown status";
1715 static int cmd_status_to_err(u8 status
)
1718 case MLX5_CMD_STAT_OK
: return 0;
1719 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
1720 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
1721 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
1722 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
1723 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
1724 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
1725 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
1726 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
1727 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
1728 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
1729 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
1730 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
1731 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
1732 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
1733 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
1734 default: return -EIO
;
1738 /* this will be available till all the commands use set/get macros */
1739 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
)
1744 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1745 cmd_status_str(hdr
->status
), hdr
->status
,
1746 be32_to_cpu(hdr
->syndrome
));
1748 return cmd_status_to_err(hdr
->status
);
1751 int mlx5_cmd_status_to_err_v2(void *ptr
)
1756 status
= be32_to_cpu(*(__be32
*)ptr
) >> 24;
1760 syndrome
= be32_to_cpu(*(__be32
*)(ptr
+ 4));
1762 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1763 cmd_status_str(status
), status
, syndrome
);
1765 return cmd_status_to_err(status
);