2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/srq.h>
38 #include <rdma/ib_verbs.h>
39 #include "mlx5_core.h"
40 #include <linux/mlx5/transobj.h>
42 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
)
44 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
45 struct mlx5_core_srq
*srq
;
47 spin_lock(&table
->lock
);
49 srq
= radix_tree_lookup(&table
->tree
, srqn
);
51 atomic_inc(&srq
->refcount
);
53 spin_unlock(&table
->lock
);
56 mlx5_core_warn(dev
, "Async event for bogus SRQ 0x%08x\n", srqn
);
60 srq
->event(srq
, event_type
);
62 if (atomic_dec_and_test(&srq
->refcount
))
66 static int get_pas_size(struct mlx5_srq_attr
*in
)
68 u32 log_page_size
= in
->log_page_size
+ 12;
69 u32 log_srq_size
= in
->log_size
;
70 u32 log_rq_stride
= in
->wqe_shift
;
71 u32 page_offset
= in
->page_offset
;
72 u32 po_quanta
= 1 << (log_page_size
- 6);
73 u32 rq_sz
= 1 << (log_srq_size
+ 4 + log_rq_stride
);
74 u32 page_size
= 1 << log_page_size
;
75 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
76 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
78 return rq_num_pas
* sizeof(u64
);
81 static void set_wq(void *wq
, struct mlx5_srq_attr
*in
)
83 MLX5_SET(wq
, wq
, wq_signature
, !!(in
->flags
84 & MLX5_SRQ_FLAG_WQ_SIG
));
85 MLX5_SET(wq
, wq
, log_wq_pg_sz
, in
->log_page_size
);
86 MLX5_SET(wq
, wq
, log_wq_stride
, in
->wqe_shift
+ 4);
87 MLX5_SET(wq
, wq
, log_wq_sz
, in
->log_size
);
88 MLX5_SET(wq
, wq
, page_offset
, in
->page_offset
);
89 MLX5_SET(wq
, wq
, lwm
, in
->lwm
);
90 MLX5_SET(wq
, wq
, pd
, in
->pd
);
91 MLX5_SET64(wq
, wq
, dbr_addr
, in
->db_record
);
94 static void set_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
96 MLX5_SET(srqc
, srqc
, wq_signature
, !!(in
->flags
97 & MLX5_SRQ_FLAG_WQ_SIG
));
98 MLX5_SET(srqc
, srqc
, log_page_size
, in
->log_page_size
);
99 MLX5_SET(srqc
, srqc
, log_rq_stride
, in
->wqe_shift
);
100 MLX5_SET(srqc
, srqc
, log_srq_size
, in
->log_size
);
101 MLX5_SET(srqc
, srqc
, page_offset
, in
->page_offset
);
102 MLX5_SET(srqc
, srqc
, lwm
, in
->lwm
);
103 MLX5_SET(srqc
, srqc
, pd
, in
->pd
);
104 MLX5_SET64(srqc
, srqc
, dbr_addr
, in
->db_record
);
105 MLX5_SET(srqc
, srqc
, xrcd
, in
->xrcd
);
106 MLX5_SET(srqc
, srqc
, cqn
, in
->cqn
);
109 static void get_wq(void *wq
, struct mlx5_srq_attr
*in
)
111 if (MLX5_GET(wq
, wq
, wq_signature
))
112 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
113 in
->log_page_size
= MLX5_GET(wq
, wq
, log_wq_pg_sz
);
114 in
->wqe_shift
= MLX5_GET(wq
, wq
, log_wq_stride
) - 4;
115 in
->log_size
= MLX5_GET(wq
, wq
, log_wq_sz
);
116 in
->page_offset
= MLX5_GET(wq
, wq
, page_offset
);
117 in
->lwm
= MLX5_GET(wq
, wq
, lwm
);
118 in
->pd
= MLX5_GET(wq
, wq
, pd
);
119 in
->db_record
= MLX5_GET64(wq
, wq
, dbr_addr
);
122 static void get_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
124 if (MLX5_GET(srqc
, srqc
, wq_signature
))
125 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
126 in
->log_page_size
= MLX5_GET(srqc
, srqc
, log_page_size
);
127 in
->wqe_shift
= MLX5_GET(srqc
, srqc
, log_rq_stride
);
128 in
->log_size
= MLX5_GET(srqc
, srqc
, log_srq_size
);
129 in
->page_offset
= MLX5_GET(srqc
, srqc
, page_offset
);
130 in
->lwm
= MLX5_GET(srqc
, srqc
, lwm
);
131 in
->pd
= MLX5_GET(srqc
, srqc
, pd
);
132 in
->db_record
= MLX5_GET64(srqc
, srqc
, dbr_addr
);
135 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
)
137 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
138 struct mlx5_core_srq
*srq
;
140 spin_lock(&table
->lock
);
142 srq
= radix_tree_lookup(&table
->tree
, srqn
);
144 atomic_inc(&srq
->refcount
);
146 spin_unlock(&table
->lock
);
150 EXPORT_SYMBOL(mlx5_core_get_srq
);
152 static int create_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
153 struct mlx5_srq_attr
*in
)
155 u32 create_out
[MLX5_ST_SZ_DW(create_srq_out
)] = {0};
163 pas_size
= get_pas_size(in
);
164 inlen
= MLX5_ST_SZ_BYTES(create_srq_in
) + pas_size
;
165 create_in
= mlx5_vzalloc(inlen
);
169 srqc
= MLX5_ADDR_OF(create_srq_in
, create_in
, srq_context_entry
);
170 pas
= MLX5_ADDR_OF(create_srq_in
, create_in
, pas
);
173 memcpy(pas
, in
->pas
, pas_size
);
175 MLX5_SET(create_srq_in
, create_in
, opcode
,
176 MLX5_CMD_OP_CREATE_SRQ
);
178 err
= mlx5_cmd_exec_check_status(dev
, create_in
, inlen
, create_out
,
182 srq
->srqn
= MLX5_GET(create_srq_out
, create_out
, srqn
);
187 static int destroy_srq_cmd(struct mlx5_core_dev
*dev
,
188 struct mlx5_core_srq
*srq
)
190 u32 srq_in
[MLX5_ST_SZ_DW(destroy_srq_in
)] = {0};
191 u32 srq_out
[MLX5_ST_SZ_DW(destroy_srq_out
)] = {0};
193 MLX5_SET(destroy_srq_in
, srq_in
, opcode
,
194 MLX5_CMD_OP_DESTROY_SRQ
);
195 MLX5_SET(destroy_srq_in
, srq_in
, srqn
, srq
->srqn
);
197 return mlx5_cmd_exec_check_status(dev
, srq_in
, sizeof(srq_in
),
198 srq_out
, sizeof(srq_out
));
201 static int arm_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
204 /* arm_srq structs missing using identical xrc ones */
205 u32 srq_in
[MLX5_ST_SZ_DW(arm_xrc_srq_in
)] = {0};
206 u32 srq_out
[MLX5_ST_SZ_DW(arm_xrc_srq_out
)] = {0};
208 MLX5_SET(arm_xrc_srq_in
, srq_in
, opcode
, MLX5_CMD_OP_ARM_XRC_SRQ
);
209 MLX5_SET(arm_xrc_srq_in
, srq_in
, xrc_srqn
, srq
->srqn
);
210 MLX5_SET(arm_xrc_srq_in
, srq_in
, lwm
, lwm
);
212 return mlx5_cmd_exec_check_status(dev
, srq_in
, sizeof(srq_in
),
213 srq_out
, sizeof(srq_out
));
216 static int query_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
217 struct mlx5_srq_attr
*out
)
219 u32 srq_in
[MLX5_ST_SZ_DW(query_srq_in
)] = {0};
224 srq_out
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out
));
228 MLX5_SET(query_srq_in
, srq_in
, opcode
,
229 MLX5_CMD_OP_QUERY_SRQ
);
230 MLX5_SET(query_srq_in
, srq_in
, srqn
, srq
->srqn
);
231 err
= mlx5_cmd_exec_check_status(dev
, srq_in
, sizeof(srq_in
),
233 MLX5_ST_SZ_BYTES(query_srq_out
));
237 srqc
= MLX5_ADDR_OF(query_srq_out
, srq_out
, srq_context_entry
);
239 if (MLX5_GET(srqc
, srqc
, state
) != MLX5_SRQC_STATE_GOOD
)
240 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
246 static int create_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
247 struct mlx5_core_srq
*srq
,
248 struct mlx5_srq_attr
*in
)
250 u32 create_out
[MLX5_ST_SZ_DW(create_xrc_srq_out
)];
258 pas_size
= get_pas_size(in
);
259 inlen
= MLX5_ST_SZ_BYTES(create_xrc_srq_in
) + pas_size
;
260 create_in
= mlx5_vzalloc(inlen
);
264 xrc_srqc
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
,
265 xrc_srq_context_entry
);
266 pas
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
, pas
);
268 set_srqc(xrc_srqc
, in
);
269 MLX5_SET(xrc_srqc
, xrc_srqc
, user_index
, in
->user_index
);
270 memcpy(pas
, in
->pas
, pas_size
);
271 MLX5_SET(create_xrc_srq_in
, create_in
, opcode
,
272 MLX5_CMD_OP_CREATE_XRC_SRQ
);
274 memset(create_out
, 0, sizeof(create_out
));
275 err
= mlx5_cmd_exec_check_status(dev
, create_in
, inlen
, create_out
,
280 srq
->srqn
= MLX5_GET(create_xrc_srq_out
, create_out
, xrc_srqn
);
286 static int destroy_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
287 struct mlx5_core_srq
*srq
)
289 u32 xrcsrq_in
[MLX5_ST_SZ_DW(destroy_xrc_srq_in
)];
290 u32 xrcsrq_out
[MLX5_ST_SZ_DW(destroy_xrc_srq_out
)];
292 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
293 memset(xrcsrq_out
, 0, sizeof(xrcsrq_out
));
295 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, opcode
,
296 MLX5_CMD_OP_DESTROY_XRC_SRQ
);
297 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
299 return mlx5_cmd_exec_check_status(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
300 xrcsrq_out
, sizeof(xrcsrq_out
));
303 static int arm_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
304 struct mlx5_core_srq
*srq
, u16 lwm
)
306 u32 xrcsrq_in
[MLX5_ST_SZ_DW(arm_xrc_srq_in
)];
307 u32 xrcsrq_out
[MLX5_ST_SZ_DW(arm_xrc_srq_out
)];
309 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
310 memset(xrcsrq_out
, 0, sizeof(xrcsrq_out
));
312 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, opcode
, MLX5_CMD_OP_ARM_XRC_SRQ
);
313 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, op_mod
, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
314 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
315 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, lwm
, lwm
);
317 return mlx5_cmd_exec_check_status(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
318 xrcsrq_out
, sizeof(xrcsrq_out
));
321 static int query_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
322 struct mlx5_core_srq
*srq
,
323 struct mlx5_srq_attr
*out
)
325 u32 xrcsrq_in
[MLX5_ST_SZ_DW(query_xrc_srq_in
)];
330 xrcsrq_out
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out
));
333 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
335 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, opcode
,
336 MLX5_CMD_OP_QUERY_XRC_SRQ
);
337 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
338 err
= mlx5_cmd_exec_check_status(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
340 MLX5_ST_SZ_BYTES(query_xrc_srq_out
));
344 xrc_srqc
= MLX5_ADDR_OF(query_xrc_srq_out
, xrcsrq_out
,
345 xrc_srq_context_entry
);
346 get_srqc(xrc_srqc
, out
);
347 if (MLX5_GET(xrc_srqc
, xrc_srqc
, state
) != MLX5_XRC_SRQC_STATE_GOOD
)
348 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
355 static int create_rmp_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
356 struct mlx5_srq_attr
*in
)
365 pas_size
= get_pas_size(in
);
366 inlen
= MLX5_ST_SZ_BYTES(create_rmp_in
) + pas_size
;
367 create_in
= mlx5_vzalloc(inlen
);
371 rmpc
= MLX5_ADDR_OF(create_rmp_in
, create_in
, ctx
);
372 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
374 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
376 memcpy(MLX5_ADDR_OF(rmpc
, rmpc
, wq
.pas
), in
->pas
, pas_size
);
378 err
= mlx5_core_create_rmp(dev
, create_in
, inlen
, &srq
->srqn
);
384 static int destroy_rmp_cmd(struct mlx5_core_dev
*dev
,
385 struct mlx5_core_srq
*srq
)
387 return mlx5_core_destroy_rmp(dev
, srq
->srqn
);
390 static int arm_rmp_cmd(struct mlx5_core_dev
*dev
,
391 struct mlx5_core_srq
*srq
,
400 in
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in
));
404 rmpc
= MLX5_ADDR_OF(modify_rmp_in
, in
, ctx
);
405 bitmask
= MLX5_ADDR_OF(modify_rmp_in
, in
, bitmask
);
406 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
408 MLX5_SET(modify_rmp_in
, in
, rmp_state
, MLX5_RMPC_STATE_RDY
);
409 MLX5_SET(modify_rmp_in
, in
, rmpn
, srq
->srqn
);
410 MLX5_SET(wq
, wq
, lwm
, lwm
);
411 MLX5_SET(rmp_bitmask
, bitmask
, lwm
, 1);
412 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
414 err
= mlx5_core_modify_rmp(dev
, in
, MLX5_ST_SZ_BYTES(modify_rmp_in
));
420 static int query_rmp_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
421 struct mlx5_srq_attr
*out
)
427 rmp_out
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out
));
431 err
= mlx5_core_query_rmp(dev
, srq
->srqn
, rmp_out
);
435 rmpc
= MLX5_ADDR_OF(query_rmp_out
, rmp_out
, rmp_context
);
436 get_wq(MLX5_ADDR_OF(rmpc
, rmpc
, wq
), out
);
437 if (MLX5_GET(rmpc
, rmpc
, state
) != MLX5_RMPC_STATE_RDY
)
438 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
445 static int create_srq_split(struct mlx5_core_dev
*dev
,
446 struct mlx5_core_srq
*srq
,
447 struct mlx5_srq_attr
*in
)
450 return create_srq_cmd(dev
, srq
, in
);
451 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
452 return create_xrc_srq_cmd(dev
, srq
, in
);
454 return create_rmp_cmd(dev
, srq
, in
);
457 static int destroy_srq_split(struct mlx5_core_dev
*dev
,
458 struct mlx5_core_srq
*srq
)
461 return destroy_srq_cmd(dev
, srq
);
462 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
463 return destroy_xrc_srq_cmd(dev
, srq
);
465 return destroy_rmp_cmd(dev
, srq
);
468 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
469 struct mlx5_srq_attr
*in
)
472 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
474 if (in
->type
== IB_SRQT_XRC
)
475 srq
->common
.res
= MLX5_RES_XSRQ
;
477 srq
->common
.res
= MLX5_RES_SRQ
;
479 err
= create_srq_split(dev
, srq
, in
);
483 atomic_set(&srq
->refcount
, 1);
484 init_completion(&srq
->free
);
486 spin_lock_irq(&table
->lock
);
487 err
= radix_tree_insert(&table
->tree
, srq
->srqn
, srq
);
488 spin_unlock_irq(&table
->lock
);
490 mlx5_core_warn(dev
, "err %d, srqn 0x%x\n", err
, srq
->srqn
);
491 goto err_destroy_srq_split
;
496 err_destroy_srq_split
:
497 destroy_srq_split(dev
, srq
);
501 EXPORT_SYMBOL(mlx5_core_create_srq
);
503 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
)
505 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
506 struct mlx5_core_srq
*tmp
;
509 spin_lock_irq(&table
->lock
);
510 tmp
= radix_tree_delete(&table
->tree
, srq
->srqn
);
511 spin_unlock_irq(&table
->lock
);
513 mlx5_core_warn(dev
, "srq 0x%x not found in tree\n", srq
->srqn
);
517 mlx5_core_warn(dev
, "corruption on srqn 0x%x\n", srq
->srqn
);
521 err
= destroy_srq_split(dev
, srq
);
525 if (atomic_dec_and_test(&srq
->refcount
))
526 complete(&srq
->free
);
527 wait_for_completion(&srq
->free
);
531 EXPORT_SYMBOL(mlx5_core_destroy_srq
);
533 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
534 struct mlx5_srq_attr
*out
)
537 return query_srq_cmd(dev
, srq
, out
);
538 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
539 return query_xrc_srq_cmd(dev
, srq
, out
);
541 return query_rmp_cmd(dev
, srq
, out
);
543 EXPORT_SYMBOL(mlx5_core_query_srq
);
545 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
549 return arm_srq_cmd(dev
, srq
, lwm
, is_srq
);
550 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
551 return arm_xrc_srq_cmd(dev
, srq
, lwm
);
553 return arm_rmp_cmd(dev
, srq
, lwm
);
555 EXPORT_SYMBOL(mlx5_core_arm_srq
);
557 void mlx5_init_srq_table(struct mlx5_core_dev
*dev
)
559 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
561 memset(table
, 0, sizeof(*table
));
562 spin_lock_init(&table
->lock
);
563 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
566 void mlx5_cleanup_srq_table(struct mlx5_core_dev
*dev
)