2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature
;
42 MLX5_IB_ACK_REQ_FREQ
= 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
48 MLX5_IB_LINK_TYPE_IB
= 0,
49 MLX5_IB_LINK_TYPE_ETH
= 1
53 MLX5_IB_SQ_STRIDE
= 6,
54 MLX5_IB_CACHE_LINE_SIZE
= 64,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
60 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
61 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
62 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
63 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
64 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
65 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
66 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
67 [IB_WR_FAST_REG_MR
] = MLX5_OPCODE_UMR
,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
70 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
76 unsigned int page_shift
;
83 static int is_qp0(enum ib_qp_type qp_type
)
85 return qp_type
== IB_QPT_SMI
;
88 static int is_qp1(enum ib_qp_type qp_type
)
90 return qp_type
== IB_QPT_GSI
;
93 static int is_sqp(enum ib_qp_type qp_type
)
95 return is_qp0(qp_type
) || is_qp1(qp_type
);
98 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
100 return mlx5_buf_offset(&qp
->buf
, offset
);
103 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
105 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
110 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
115 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
116 struct ib_event event
;
118 if (type
== MLX5_EVENT_TYPE_PATH_MIG
)
119 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
121 if (ibqp
->event_handler
) {
122 event
.device
= ibqp
->device
;
123 event
.element
.qp
= ibqp
;
125 case MLX5_EVENT_TYPE_PATH_MIG
:
126 event
.event
= IB_EVENT_PATH_MIG
;
128 case MLX5_EVENT_TYPE_COMM_EST
:
129 event
.event
= IB_EVENT_COMM_EST
;
131 case MLX5_EVENT_TYPE_SQ_DRAINED
:
132 event
.event
= IB_EVENT_SQ_DRAINED
;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
135 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
138 event
.event
= IB_EVENT_QP_FATAL
;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
141 event
.event
= IB_EVENT_PATH_MIG_ERR
;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
144 event
.event
= IB_EVENT_QP_REQ_ERR
;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
147 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
154 ibqp
->event_handler(&event
, ibqp
->qp_context
);
158 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
159 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
164 /* Sanity check RQ size before proceeding */
165 if (cap
->max_recv_wr
> dev
->mdev
.caps
.max_wqes
)
171 qp
->rq
.wqe_shift
= 0;
174 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
175 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
176 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
177 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
179 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
180 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
181 wqe_size
= roundup_pow_of_two(wqe_size
);
182 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
183 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
184 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
185 if (wqe_size
> dev
->mdev
.caps
.max_rq_desc_sz
) {
186 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
188 dev
->mdev
.caps
.max_rq_desc_sz
);
191 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
192 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
193 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
200 static int sq_overhead(enum ib_qp_type qp_type
)
206 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
209 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
210 sizeof(struct mlx5_wqe_atomic_seg
) +
211 sizeof(struct mlx5_wqe_raddr_seg
);
218 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
219 sizeof(struct mlx5_wqe_raddr_seg
);
225 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
226 sizeof(struct mlx5_wqe_datagram_seg
);
229 case MLX5_IB_QPT_REG_UMR
:
230 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
231 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
232 sizeof(struct mlx5_mkey_seg
);
242 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
247 size
= sq_overhead(attr
->qp_type
);
251 if (attr
->cap
.max_inline_data
) {
252 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
253 attr
->cap
.max_inline_data
;
256 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
258 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
261 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
262 struct mlx5_ib_qp
*qp
)
267 if (!attr
->cap
.max_send_wr
)
270 wqe_size
= calc_send_wqe(attr
);
271 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
275 if (wqe_size
> dev
->mdev
.caps
.max_sq_desc_sz
) {
276 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size
, dev
->mdev
.caps
.max_sq_desc_sz
);
281 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
->qp_type
) -
282 sizeof(struct mlx5_wqe_inline_seg
);
283 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
285 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
286 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
287 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
288 mlx5_ib_dbg(dev
, "wqe count(%d) exceeds limits(%d)\n",
289 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
292 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
293 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
294 qp
->sq
.max_post
= wq_size
/ wqe_size
;
295 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
300 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
301 struct mlx5_ib_qp
*qp
,
302 struct mlx5_ib_create_qp
*ucmd
)
304 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
306 if (desc_sz
> dev
->mdev
.caps
.max_sq_desc_sz
) {
307 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
308 desc_sz
, dev
->mdev
.caps
.max_sq_desc_sz
);
312 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
313 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
314 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
318 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
320 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
321 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
322 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
326 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
327 (qp
->sq
.wqe_cnt
<< 6);
332 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
334 if (attr
->qp_type
== IB_QPT_XRC_INI
||
335 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
336 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
337 !attr
->cap
.max_recv_wr
)
343 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
345 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
349 start_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
350 for (i
= start_uuar
; i
< nuuars
; i
++) {
351 if (!test_bit(i
, uuari
->bitmap
)) {
352 set_bit(i
, uuari
->bitmap
);
361 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
363 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
369 end
= nuuars
- uuari
->num_low_latency_uuars
;
371 for (i
= 1; i
< end
; i
++) {
373 if (uuarn
== 2 || uuarn
== 3)
376 if (uuari
->count
[i
] < uuari
->count
[minidx
])
380 uuari
->count
[minidx
]++;
384 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
385 enum mlx5_ib_latency_class lat
)
389 mutex_lock(&uuari
->lock
);
391 case MLX5_IB_LATENCY_CLASS_LOW
:
393 uuari
->count
[uuarn
]++;
396 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
397 uuarn
= alloc_med_class_uuar(uuari
);
400 case MLX5_IB_LATENCY_CLASS_HIGH
:
401 uuarn
= alloc_high_class_uuar(uuari
);
404 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
408 mutex_unlock(&uuari
->lock
);
413 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
415 clear_bit(uuarn
, uuari
->bitmap
);
416 --uuari
->count
[uuarn
];
419 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
421 clear_bit(uuarn
, uuari
->bitmap
);
422 --uuari
->count
[uuarn
];
425 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
427 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
428 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
430 mutex_lock(&uuari
->lock
);
432 --uuari
->count
[uuarn
];
436 if (uuarn
< high_uuar
) {
437 free_med_class_uuar(uuari
, uuarn
);
441 free_high_class_uuar(uuari
, uuarn
);
444 mutex_unlock(&uuari
->lock
);
447 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
450 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
451 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
452 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
453 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
454 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
455 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
456 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
461 static int to_mlx5_st(enum ib_qp_type type
)
464 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
465 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
466 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
467 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
469 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
470 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
471 case IB_QPT_GSI
: return MLX5_QP_ST_QP1
;
472 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
473 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
474 case IB_QPT_RAW_PACKET
:
476 default: return -EINVAL
;
480 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
482 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
485 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
486 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
487 struct mlx5_create_qp_mbox_in
**in
,
488 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
)
490 struct mlx5_ib_ucontext
*context
;
491 struct mlx5_ib_create_qp ucmd
;
500 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
502 mlx5_ib_dbg(dev
, "copy failed\n");
506 context
= to_mucontext(pd
->uobject
->context
);
508 * TBD: should come from the verbs when we have the API
510 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
512 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
513 mlx5_ib_dbg(dev
, "reverting to high latency\n");
514 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
516 mlx5_ib_dbg(dev
, "uuar allocation failed\n");
521 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
522 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
524 err
= set_user_buf_size(dev
, qp
, &ucmd
);
528 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
530 if (IS_ERR(qp
->umem
)) {
531 mlx5_ib_dbg(dev
, "umem_get failed\n");
532 err
= PTR_ERR(qp
->umem
);
536 mlx5_ib_cont_pages(qp
->umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
538 err
= mlx5_ib_get_buf_offset(ucmd
.buf_addr
, page_shift
, &offset
);
540 mlx5_ib_warn(dev
, "bad offset\n");
543 mlx5_ib_dbg(dev
, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
544 ucmd
.buf_addr
, qp
->buf_size
, npages
, page_shift
, ncont
, offset
);
546 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
547 *in
= mlx5_vzalloc(*inlen
);
552 mlx5_ib_populate_pas(dev
, qp
->umem
, page_shift
, (*in
)->pas
, 0);
553 (*in
)->ctx
.log_pg_sz_remote_qpn
=
554 cpu_to_be32((page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
555 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
557 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
558 resp
->uuar_index
= uuarn
;
561 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
563 mlx5_ib_dbg(dev
, "map failed\n");
567 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
569 mlx5_ib_dbg(dev
, "copy failed\n");
572 qp
->create_type
= MLX5_QP_USER
;
577 mlx5_ib_db_unmap_user(context
, &qp
->db
);
583 ib_umem_release(qp
->umem
);
586 free_uuar(&context
->uuari
, uuarn
);
590 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
592 struct mlx5_ib_ucontext
*context
;
594 context
= to_mucontext(pd
->uobject
->context
);
595 mlx5_ib_db_unmap_user(context
, &qp
->db
);
596 ib_umem_release(qp
->umem
);
597 free_uuar(&context
->uuari
, qp
->uuarn
);
600 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
601 struct ib_qp_init_attr
*init_attr
,
602 struct mlx5_ib_qp
*qp
,
603 struct mlx5_create_qp_mbox_in
**in
, int *inlen
)
605 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
606 struct mlx5_uuar_info
*uuari
;
611 uuari
= &dev
->mdev
.priv
.uuari
;
612 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
613 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
615 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
616 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
618 uuarn
= alloc_uuar(uuari
, lc
);
620 mlx5_ib_dbg(dev
, "\n");
624 qp
->bf
= &uuari
->bfs
[uuarn
];
625 uar_index
= qp
->bf
->uar
->index
;
627 err
= calc_sq_size(dev
, init_attr
, qp
);
629 mlx5_ib_dbg(dev
, "err %d\n", err
);
634 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
635 qp
->buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
637 err
= mlx5_buf_alloc(&dev
->mdev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
);
639 mlx5_ib_dbg(dev
, "err %d\n", err
);
643 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
644 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
645 *in
= mlx5_vzalloc(*inlen
);
650 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
651 (*in
)->ctx
.log_pg_sz_remote_qpn
=
652 cpu_to_be32((qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
653 /* Set "fast registration enabled" for all kernel QPs */
654 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
655 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
657 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
659 err
= mlx5_db_alloc(&dev
->mdev
, &qp
->db
);
661 mlx5_ib_dbg(dev
, "err %d\n", err
);
668 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
669 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
670 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
671 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
672 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
674 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
675 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
679 qp
->create_type
= MLX5_QP_KERNEL
;
684 mlx5_db_free(&dev
->mdev
, &qp
->db
);
685 kfree(qp
->sq
.wqe_head
);
686 kfree(qp
->sq
.w_list
);
688 kfree(qp
->sq
.wr_data
);
695 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
698 free_uuar(&dev
->mdev
.priv
.uuari
, uuarn
);
702 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
704 mlx5_db_free(&dev
->mdev
, &qp
->db
);
705 kfree(qp
->sq
.wqe_head
);
706 kfree(qp
->sq
.w_list
);
708 kfree(qp
->sq
.wr_data
);
710 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
711 free_uuar(&dev
->mdev
.priv
.uuari
, qp
->bf
->uuarn
);
714 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
716 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
717 (attr
->qp_type
== IB_QPT_XRC_INI
))
718 return cpu_to_be32(MLX5_SRQ_RQ
);
719 else if (!qp
->has_rq
)
720 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
722 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
725 static int is_connected(enum ib_qp_type qp_type
)
727 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
733 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
734 struct ib_qp_init_attr
*init_attr
,
735 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
737 struct mlx5_ib_resources
*devr
= &dev
->devr
;
738 struct mlx5_ib_create_qp_resp resp
;
739 struct mlx5_create_qp_mbox_in
*in
;
740 struct mlx5_ib_create_qp ucmd
;
741 int inlen
= sizeof(*in
);
744 mutex_init(&qp
->mutex
);
745 spin_lock_init(&qp
->sq
.lock
);
746 spin_lock_init(&qp
->rq
.lock
);
748 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
749 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
751 if (pd
&& pd
->uobject
) {
752 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
753 mlx5_ib_dbg(dev
, "copy failed\n");
757 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
758 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
760 qp
->wq_sig
= !!wq_signature
;
763 qp
->has_rq
= qp_has_rq(init_attr
);
764 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
765 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
767 mlx5_ib_dbg(dev
, "err %d\n", err
);
773 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
774 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
775 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
776 mlx5_ib_dbg(dev
, "invalid rq params\n");
779 if (ucmd
.sq_wqe_count
> dev
->mdev
.caps
.max_wqes
) {
780 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
781 ucmd
.sq_wqe_count
, dev
->mdev
.caps
.max_wqes
);
784 err
= create_user_qp(dev
, pd
, qp
, udata
, &in
, &resp
, &inlen
);
786 mlx5_ib_dbg(dev
, "err %d\n", err
);
788 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
);
790 mlx5_ib_dbg(dev
, "err %d\n", err
);
792 qp
->pa_lkey
= to_mpd(pd
)->pa_lkey
;
798 in
= mlx5_vzalloc(sizeof(*in
));
802 qp
->create_type
= MLX5_QP_EMPTY
;
805 if (is_sqp(init_attr
->qp_type
))
806 qp
->port
= init_attr
->port_num
;
808 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
809 MLX5_QP_PM_MIGRATED
<< 11);
811 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
812 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
814 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
817 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
819 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
823 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
824 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
827 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
829 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
831 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
833 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
835 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
839 if (qp
->rq
.wqe_cnt
) {
840 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
841 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
844 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
847 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
849 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
851 /* Set default resources */
852 switch (init_attr
->qp_type
) {
854 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
855 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
856 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
857 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
860 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
861 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
862 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
865 if (init_attr
->srq
) {
866 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
867 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
869 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
870 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
874 if (init_attr
->send_cq
)
875 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
877 if (init_attr
->recv_cq
)
878 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
880 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
882 err
= mlx5_core_create_qp(&dev
->mdev
, &qp
->mqp
, in
, inlen
);
884 mlx5_ib_dbg(dev
, "create qp failed\n");
889 /* Hardware wants QPN written in big-endian order (after
890 * shifting) for send doorbell. Precompute this value to save
891 * a little bit when posting sends.
893 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
895 qp
->mqp
.event
= mlx5_ib_qp_event
;
900 if (qp
->create_type
== MLX5_QP_USER
)
901 destroy_qp_user(pd
, qp
);
902 else if (qp
->create_type
== MLX5_QP_KERNEL
)
903 destroy_qp_kernel(dev
, qp
);
909 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
910 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
914 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
915 spin_lock_irq(&send_cq
->lock
);
916 spin_lock_nested(&recv_cq
->lock
,
917 SINGLE_DEPTH_NESTING
);
918 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
919 spin_lock_irq(&send_cq
->lock
);
920 __acquire(&recv_cq
->lock
);
922 spin_lock_irq(&recv_cq
->lock
);
923 spin_lock_nested(&send_cq
->lock
,
924 SINGLE_DEPTH_NESTING
);
927 spin_lock_irq(&send_cq
->lock
);
929 } else if (recv_cq
) {
930 spin_lock_irq(&recv_cq
->lock
);
934 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
935 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
939 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
940 spin_unlock(&recv_cq
->lock
);
941 spin_unlock_irq(&send_cq
->lock
);
942 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
943 __release(&recv_cq
->lock
);
944 spin_unlock_irq(&send_cq
->lock
);
946 spin_unlock(&send_cq
->lock
);
947 spin_unlock_irq(&recv_cq
->lock
);
950 spin_unlock_irq(&send_cq
->lock
);
952 } else if (recv_cq
) {
953 spin_unlock_irq(&recv_cq
->lock
);
957 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
959 return to_mpd(qp
->ibqp
.pd
);
962 static void get_cqs(struct mlx5_ib_qp
*qp
,
963 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
965 switch (qp
->ibqp
.qp_type
) {
970 case MLX5_IB_QPT_REG_UMR
:
972 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
981 case IB_QPT_RAW_IPV6
:
982 case IB_QPT_RAW_ETHERTYPE
:
983 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
984 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
987 case IB_QPT_RAW_PACKET
:
996 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
998 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
999 struct mlx5_modify_qp_mbox_in
*in
;
1002 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1005 if (qp
->state
!= IB_QPS_RESET
)
1006 if (mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(qp
->state
),
1007 MLX5_QP_STATE_RST
, in
, sizeof(*in
), &qp
->mqp
))
1008 mlx5_ib_warn(dev
, "mlx5_ib: modify QP %06x to RESET failed\n",
1011 get_cqs(qp
, &send_cq
, &recv_cq
);
1013 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1014 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1015 __mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1016 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1017 if (send_cq
!= recv_cq
)
1018 __mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1019 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1022 err
= mlx5_core_destroy_qp(&dev
->mdev
, &qp
->mqp
);
1024 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n", qp
->mqp
.qpn
);
1028 if (qp
->create_type
== MLX5_QP_KERNEL
)
1029 destroy_qp_kernel(dev
, qp
);
1030 else if (qp
->create_type
== MLX5_QP_USER
)
1031 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
);
1034 static const char *ib_qp_type_str(enum ib_qp_type type
)
1038 return "IB_QPT_SMI";
1040 return "IB_QPT_GSI";
1047 case IB_QPT_RAW_IPV6
:
1048 return "IB_QPT_RAW_IPV6";
1049 case IB_QPT_RAW_ETHERTYPE
:
1050 return "IB_QPT_RAW_ETHERTYPE";
1051 case IB_QPT_XRC_INI
:
1052 return "IB_QPT_XRC_INI";
1053 case IB_QPT_XRC_TGT
:
1054 return "IB_QPT_XRC_TGT";
1055 case IB_QPT_RAW_PACKET
:
1056 return "IB_QPT_RAW_PACKET";
1057 case MLX5_IB_QPT_REG_UMR
:
1058 return "MLX5_IB_QPT_REG_UMR";
1061 return "Invalid QP type";
1065 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1066 struct ib_qp_init_attr
*init_attr
,
1067 struct ib_udata
*udata
)
1069 struct mlx5_ib_dev
*dev
;
1070 struct mlx5_ib_qp
*qp
;
1075 dev
= to_mdev(pd
->device
);
1077 /* being cautious here */
1078 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1079 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1080 pr_warn("%s: no PD for transport %s\n", __func__
,
1081 ib_qp_type_str(init_attr
->qp_type
));
1082 return ERR_PTR(-EINVAL
);
1084 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1087 switch (init_attr
->qp_type
) {
1088 case IB_QPT_XRC_TGT
:
1089 case IB_QPT_XRC_INI
:
1090 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
)) {
1091 mlx5_ib_dbg(dev
, "XRC not supported\n");
1092 return ERR_PTR(-ENOSYS
);
1094 init_attr
->recv_cq
= NULL
;
1095 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1096 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1097 init_attr
->send_cq
= NULL
;
1106 case MLX5_IB_QPT_REG_UMR
:
1107 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1109 return ERR_PTR(-ENOMEM
);
1111 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1113 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1115 return ERR_PTR(err
);
1118 if (is_qp0(init_attr
->qp_type
))
1119 qp
->ibqp
.qp_num
= 0;
1120 else if (is_qp1(init_attr
->qp_type
))
1121 qp
->ibqp
.qp_num
= 1;
1123 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1125 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1126 qp
->ibqp
.qp_num
, qp
->mqp
.qpn
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1127 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1133 case IB_QPT_RAW_IPV6
:
1134 case IB_QPT_RAW_ETHERTYPE
:
1135 case IB_QPT_RAW_PACKET
:
1138 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1139 init_attr
->qp_type
);
1140 /* Don't support raw QPs */
1141 return ERR_PTR(-EINVAL
);
1147 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1149 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1150 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1152 destroy_qp_common(dev
, mqp
);
1159 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1162 u32 hw_access_flags
= 0;
1166 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1167 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1169 dest_rd_atomic
= qp
->resp_depth
;
1171 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1172 access_flags
= attr
->qp_access_flags
;
1174 access_flags
= qp
->atomic_rd_en
;
1176 if (!dest_rd_atomic
)
1177 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1179 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1180 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1181 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1182 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1183 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1184 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1186 return cpu_to_be32(hw_access_flags
);
1190 MLX5_PATH_FLAG_FL
= 1 << 0,
1191 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1192 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1195 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1197 if (rate
== IB_RATE_PORT_CURRENT
) {
1199 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1202 while (rate
!= IB_RATE_2_5_GBPS
&&
1203 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1204 dev
->mdev
.caps
.stat_rate_support
))
1208 return rate
+ MLX5_STAT_RATE_OFFSET
;
1211 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1212 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1213 u32 path_flags
, const struct ib_qp_attr
*attr
)
1217 path
->fl
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1218 path
->free_ar
= (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x80 : 0;
1220 if (attr_mask
& IB_QP_PKEY_INDEX
)
1221 path
->pkey_index
= attr
->pkey_index
;
1223 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1224 path
->rlid
= cpu_to_be16(ah
->dlid
);
1226 if (ah
->ah_flags
& IB_AH_GRH
) {
1227 path
->grh_mlid
|= 1 << 7;
1228 path
->mgid_index
= ah
->grh
.sgid_index
;
1229 path
->hop_limit
= ah
->grh
.hop_limit
;
1230 path
->tclass_flowlabel
=
1231 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1232 (ah
->grh
.flow_label
));
1233 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1236 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1239 path
->static_rate
= err
;
1242 if (ah
->ah_flags
& IB_AH_GRH
) {
1243 if (ah
->grh
.sgid_index
>= dev
->mdev
.caps
.port
[port
- 1].gid_table_len
) {
1244 pr_err(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
1245 ah
->grh
.sgid_index
, dev
->mdev
.caps
.port
[port
- 1].gid_table_len
);
1249 path
->grh_mlid
|= 1 << 7;
1250 path
->mgid_index
= ah
->grh
.sgid_index
;
1251 path
->hop_limit
= ah
->grh
.hop_limit
;
1252 path
->tclass_flowlabel
=
1253 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1254 (ah
->grh
.flow_label
));
1255 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1258 if (attr_mask
& IB_QP_TIMEOUT
)
1259 path
->ackto_lt
= attr
->timeout
<< 3;
1261 path
->sl
= ah
->sl
& 0xf;
1266 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1267 [MLX5_QP_STATE_INIT
] = {
1268 [MLX5_QP_STATE_INIT
] = {
1269 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1270 MLX5_QP_OPTPAR_RAE
|
1271 MLX5_QP_OPTPAR_RWE
|
1272 MLX5_QP_OPTPAR_PKEY_INDEX
|
1273 MLX5_QP_OPTPAR_PRI_PORT
,
1274 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1275 MLX5_QP_OPTPAR_PKEY_INDEX
|
1276 MLX5_QP_OPTPAR_PRI_PORT
,
1277 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1278 MLX5_QP_OPTPAR_Q_KEY
|
1279 MLX5_QP_OPTPAR_PRI_PORT
,
1281 [MLX5_QP_STATE_RTR
] = {
1282 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1283 MLX5_QP_OPTPAR_RRE
|
1284 MLX5_QP_OPTPAR_RAE
|
1285 MLX5_QP_OPTPAR_RWE
|
1286 MLX5_QP_OPTPAR_PKEY_INDEX
,
1287 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1288 MLX5_QP_OPTPAR_RWE
|
1289 MLX5_QP_OPTPAR_PKEY_INDEX
,
1290 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1291 MLX5_QP_OPTPAR_Q_KEY
,
1292 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1293 MLX5_QP_OPTPAR_Q_KEY
,
1294 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1295 MLX5_QP_OPTPAR_RRE
|
1296 MLX5_QP_OPTPAR_RAE
|
1297 MLX5_QP_OPTPAR_RWE
|
1298 MLX5_QP_OPTPAR_PKEY_INDEX
,
1301 [MLX5_QP_STATE_RTR
] = {
1302 [MLX5_QP_STATE_RTS
] = {
1303 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1304 MLX5_QP_OPTPAR_RRE
|
1305 MLX5_QP_OPTPAR_RAE
|
1306 MLX5_QP_OPTPAR_RWE
|
1307 MLX5_QP_OPTPAR_PM_STATE
|
1308 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1309 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1310 MLX5_QP_OPTPAR_RWE
|
1311 MLX5_QP_OPTPAR_PM_STATE
,
1312 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1315 [MLX5_QP_STATE_RTS
] = {
1316 [MLX5_QP_STATE_RTS
] = {
1317 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1318 MLX5_QP_OPTPAR_RAE
|
1319 MLX5_QP_OPTPAR_RWE
|
1320 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1321 MLX5_QP_OPTPAR_PM_STATE
|
1322 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1323 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1324 MLX5_QP_OPTPAR_PM_STATE
|
1325 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1326 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1327 MLX5_QP_OPTPAR_SRQN
|
1328 MLX5_QP_OPTPAR_CQN_RCV
,
1331 [MLX5_QP_STATE_SQER
] = {
1332 [MLX5_QP_STATE_RTS
] = {
1333 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1334 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1335 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
1336 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1337 MLX5_QP_OPTPAR_RWE
|
1338 MLX5_QP_OPTPAR_RAE
|
1344 static int ib_nr_to_mlx5_nr(int ib_mask
)
1349 case IB_QP_CUR_STATE
:
1351 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
1353 case IB_QP_ACCESS_FLAGS
:
1354 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
1356 case IB_QP_PKEY_INDEX
:
1357 return MLX5_QP_OPTPAR_PKEY_INDEX
;
1359 return MLX5_QP_OPTPAR_PRI_PORT
;
1361 return MLX5_QP_OPTPAR_Q_KEY
;
1363 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1364 MLX5_QP_OPTPAR_PRI_PORT
;
1365 case IB_QP_PATH_MTU
:
1368 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
1369 case IB_QP_RETRY_CNT
:
1370 return MLX5_QP_OPTPAR_RETRY_COUNT
;
1371 case IB_QP_RNR_RETRY
:
1372 return MLX5_QP_OPTPAR_RNR_RETRY
;
1375 case IB_QP_MAX_QP_RD_ATOMIC
:
1376 return MLX5_QP_OPTPAR_SRA_MAX
;
1377 case IB_QP_ALT_PATH
:
1378 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
1379 case IB_QP_MIN_RNR_TIMER
:
1380 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
1383 case IB_QP_MAX_DEST_RD_ATOMIC
:
1384 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
1385 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
1386 case IB_QP_PATH_MIG_STATE
:
1387 return MLX5_QP_OPTPAR_PM_STATE
;
1390 case IB_QP_DEST_QPN
:
1396 static int ib_mask_to_mlx5_opt(int ib_mask
)
1401 for (i
= 0; i
< 8 * sizeof(int); i
++) {
1402 if ((1 << i
) & ib_mask
)
1403 result
|= ib_nr_to_mlx5_nr(1 << i
);
1409 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
1410 const struct ib_qp_attr
*attr
, int attr_mask
,
1411 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1413 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1414 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1415 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1416 struct mlx5_qp_context
*context
;
1417 struct mlx5_modify_qp_mbox_in
*in
;
1418 struct mlx5_ib_pd
*pd
;
1419 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
1420 enum mlx5_qp_optpar optpar
;
1425 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1430 err
= to_mlx5_st(ibqp
->qp_type
);
1434 context
->flags
= cpu_to_be32(err
<< 16);
1436 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
1437 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1439 switch (attr
->path_mig_state
) {
1440 case IB_MIG_MIGRATED
:
1441 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1444 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
1447 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
1452 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1453 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
1454 } else if (ibqp
->qp_type
== IB_QPT_UD
||
1455 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
1456 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1457 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1458 if (attr
->path_mtu
< IB_MTU_256
||
1459 attr
->path_mtu
> IB_MTU_4096
) {
1460 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
1464 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | dev
->mdev
.caps
.log_max_msg
;
1467 if (attr_mask
& IB_QP_DEST_QPN
)
1468 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1470 if (attr_mask
& IB_QP_PKEY_INDEX
)
1471 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1473 /* todo implement counter_index functionality */
1475 if (is_sqp(ibqp
->qp_type
))
1476 context
->pri_path
.port
= qp
->port
;
1478 if (attr_mask
& IB_QP_PORT
)
1479 context
->pri_path
.port
= attr
->port_num
;
1481 if (attr_mask
& IB_QP_AV
) {
1482 err
= mlx5_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1483 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
1484 attr_mask
, 0, attr
);
1489 if (attr_mask
& IB_QP_TIMEOUT
)
1490 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
1492 if (attr_mask
& IB_QP_ALT_PATH
) {
1493 err
= mlx5_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1494 attr
->alt_port_num
, attr_mask
, 0, attr
);
1500 get_cqs(qp
, &send_cq
, &recv_cq
);
1502 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
1503 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
1504 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
1505 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
1507 if (attr_mask
& IB_QP_RNR_RETRY
)
1508 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1510 if (attr_mask
& IB_QP_RETRY_CNT
)
1511 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1513 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1514 if (attr
->max_rd_atomic
)
1516 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1519 if (attr_mask
& IB_QP_SQ_PSN
)
1520 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1522 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1523 if (attr
->max_dest_rd_atomic
)
1525 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1528 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
1529 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
1531 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1532 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1534 if (attr_mask
& IB_QP_RQ_PSN
)
1535 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1537 if (attr_mask
& IB_QP_QKEY
)
1538 context
->qkey
= cpu_to_be32(attr
->qkey
);
1540 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1541 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1543 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1544 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1549 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1550 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
1553 mlx5_cur
= to_mlx5_state(cur_state
);
1554 mlx5_new
= to_mlx5_state(new_state
);
1555 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
1559 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
1560 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
1561 in
->optparam
= cpu_to_be32(optpar
);
1562 err
= mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(cur_state
),
1563 to_mlx5_state(new_state
), in
, sqd_event
,
1568 qp
->state
= new_state
;
1570 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1571 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1572 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1573 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1574 if (attr_mask
& IB_QP_PORT
)
1575 qp
->port
= attr
->port_num
;
1576 if (attr_mask
& IB_QP_ALT_PATH
)
1577 qp
->alt_port
= attr
->alt_port_num
;
1580 * If we moved a kernel QP to RESET, clean up all old CQ
1581 * entries and reinitialize the QP.
1583 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1584 mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1585 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1586 if (send_cq
!= recv_cq
)
1587 mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1593 qp
->sq
.cur_post
= 0;
1594 qp
->sq
.last_poll
= 0;
1595 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
1596 qp
->db
.db
[MLX5_SND_DBR
] = 0;
1604 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1605 int attr_mask
, struct ib_udata
*udata
)
1607 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1608 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1609 enum ib_qp_state cur_state
, new_state
;
1613 mutex_lock(&qp
->mutex
);
1615 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1616 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1618 if (ibqp
->qp_type
!= MLX5_IB_QPT_REG_UMR
&&
1619 !ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
))
1622 if ((attr_mask
& IB_QP_PORT
) &&
1623 (attr
->port_num
== 0 || attr
->port_num
> dev
->mdev
.caps
.num_ports
))
1626 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1627 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1628 if (attr
->pkey_index
>= dev
->mdev
.caps
.port
[port
- 1].pkey_table_len
)
1632 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1633 attr
->max_rd_atomic
> dev
->mdev
.caps
.max_ra_res_qp
)
1636 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1637 attr
->max_dest_rd_atomic
> dev
->mdev
.caps
.max_ra_req_qp
)
1640 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1645 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1648 mutex_unlock(&qp
->mutex
);
1652 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1654 struct mlx5_ib_cq
*cq
;
1657 cur
= wq
->head
- wq
->tail
;
1658 if (likely(cur
+ nreq
< wq
->max_post
))
1662 spin_lock(&cq
->lock
);
1663 cur
= wq
->head
- wq
->tail
;
1664 spin_unlock(&cq
->lock
);
1666 return cur
+ nreq
>= wq
->max_post
;
1669 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
1670 u64 remote_addr
, u32 rkey
)
1672 rseg
->raddr
= cpu_to_be64(remote_addr
);
1673 rseg
->rkey
= cpu_to_be32(rkey
);
1677 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
1678 struct ib_send_wr
*wr
)
1680 memcpy(&dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof(struct mlx5_av
));
1681 dseg
->av
.dqp_dct
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
| MLX5_EXTENDED_UD_AV
);
1682 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1685 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1687 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1688 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1689 dseg
->addr
= cpu_to_be64(sg
->addr
);
1692 static __be16
get_klm_octo(int npages
)
1694 return cpu_to_be16(ALIGN(npages
, 8) / 2);
1697 static __be64
frwr_mkey_mask(void)
1701 result
= MLX5_MKEY_MASK_LEN
|
1702 MLX5_MKEY_MASK_PAGE_SIZE
|
1703 MLX5_MKEY_MASK_START_ADDR
|
1704 MLX5_MKEY_MASK_EN_RINVAL
|
1705 MLX5_MKEY_MASK_KEY
|
1711 MLX5_MKEY_MASK_SMALL_FENCE
|
1712 MLX5_MKEY_MASK_FREE
;
1714 return cpu_to_be64(result
);
1717 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1718 struct ib_send_wr
*wr
, int li
)
1720 memset(umr
, 0, sizeof(*umr
));
1723 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
1724 umr
->flags
= 1 << 7;
1728 umr
->flags
= (1 << 5); /* fail if not free */
1729 umr
->klm_octowords
= get_klm_octo(wr
->wr
.fast_reg
.page_list_len
);
1730 umr
->mkey_mask
= frwr_mkey_mask();
1733 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1734 struct ib_send_wr
*wr
)
1736 struct umr_wr
*umrwr
= (struct umr_wr
*)&wr
->wr
.fast_reg
;
1739 memset(umr
, 0, sizeof(*umr
));
1741 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
1742 umr
->flags
= 1 << 5; /* fail if not free */
1743 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
1744 mask
= MLX5_MKEY_MASK_LEN
|
1745 MLX5_MKEY_MASK_PAGE_SIZE
|
1746 MLX5_MKEY_MASK_START_ADDR
|
1750 MLX5_MKEY_MASK_KEY
|
1754 MLX5_MKEY_MASK_FREE
;
1755 umr
->mkey_mask
= cpu_to_be64(mask
);
1757 umr
->flags
= 2 << 5; /* fail if free */
1758 mask
= MLX5_MKEY_MASK_FREE
;
1759 umr
->mkey_mask
= cpu_to_be64(mask
);
1763 umr
->flags
|= (1 << 7); /* inline */
1766 static u8
get_umr_flags(int acc
)
1768 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1769 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1770 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1771 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1772 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
| MLX5_ACCESS_MODE_MTT
;
1775 static void set_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
,
1778 memset(seg
, 0, sizeof(*seg
));
1780 seg
->status
= 1 << 6;
1784 seg
->flags
= get_umr_flags(wr
->wr
.fast_reg
.access_flags
);
1785 *writ
= seg
->flags
& (MLX5_PERM_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
);
1786 seg
->qpn_mkey7_0
= cpu_to_be32((wr
->wr
.fast_reg
.rkey
& 0xff) | 0xffffff00);
1787 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
1788 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1789 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1790 seg
->xlt_oct_size
= cpu_to_be32((wr
->wr
.fast_reg
.page_list_len
+ 1) / 2);
1791 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1794 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
1796 memset(seg
, 0, sizeof(*seg
));
1797 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
1798 seg
->status
= 1 << 6;
1802 seg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1803 seg
->flags_pd
= cpu_to_be32(to_mpd((struct ib_pd
*)wr
->wr
.fast_reg
.page_list
)->pdn
);
1804 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1805 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1806 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1807 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
1808 mlx5_mkey_variant(wr
->wr
.fast_reg
.rkey
));
1811 static void set_frwr_pages(struct mlx5_wqe_data_seg
*dseg
,
1812 struct ib_send_wr
*wr
,
1813 struct mlx5_core_dev
*mdev
,
1814 struct mlx5_ib_pd
*pd
,
1817 struct mlx5_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1818 u64
*page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
1819 u64 perm
= MLX5_EN_RD
| (writ
? MLX5_EN_WR
: 0);
1822 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++)
1823 mfrpl
->mapped_page_list
[i
] = cpu_to_be64(page_list
[i
] | perm
);
1824 dseg
->addr
= cpu_to_be64(mfrpl
->map
);
1825 dseg
->byte_count
= cpu_to_be32(ALIGN(sizeof(u64
) * wr
->wr
.fast_reg
.page_list_len
, 64));
1826 dseg
->lkey
= cpu_to_be32(pd
->pa_lkey
);
1829 static __be32
send_ieth(struct ib_send_wr
*wr
)
1831 switch (wr
->opcode
) {
1832 case IB_WR_SEND_WITH_IMM
:
1833 case IB_WR_RDMA_WRITE_WITH_IMM
:
1834 return wr
->ex
.imm_data
;
1836 case IB_WR_SEND_WITH_INV
:
1837 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
1844 static u8
calc_sig(void *wqe
, int size
)
1850 for (i
= 0; i
< size
; i
++)
1856 static u8
wq_sig(void *wqe
)
1858 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
1861 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
1864 struct mlx5_wqe_inline_seg
*seg
;
1865 void *qend
= qp
->sq
.qend
;
1873 wqe
+= sizeof(*seg
);
1874 for (i
= 0; i
< wr
->num_sge
; i
++) {
1875 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
1876 len
= wr
->sg_list
[i
].length
;
1879 if (unlikely(inl
> qp
->max_inline_data
))
1882 if (unlikely(wqe
+ len
> qend
)) {
1884 memcpy(wqe
, addr
, copy
);
1887 wqe
= mlx5_get_send_wqe(qp
, 0);
1889 memcpy(wqe
, addr
, len
);
1893 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
1895 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
1900 static int set_frwr_li_wr(void **seg
, struct ib_send_wr
*wr
, int *size
,
1901 struct mlx5_core_dev
*mdev
, struct mlx5_ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
1906 li
= wr
->opcode
== IB_WR_LOCAL_INV
? 1 : 0;
1907 if (unlikely(wr
->send_flags
& IB_SEND_INLINE
))
1910 set_frwr_umr_segment(*seg
, wr
, li
);
1911 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
1912 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
1913 if (unlikely((*seg
== qp
->sq
.qend
)))
1914 *seg
= mlx5_get_send_wqe(qp
, 0);
1915 set_mkey_segment(*seg
, wr
, li
, &writ
);
1916 *seg
+= sizeof(struct mlx5_mkey_seg
);
1917 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
1918 if (unlikely((*seg
== qp
->sq
.qend
)))
1919 *seg
= mlx5_get_send_wqe(qp
, 0);
1921 if (unlikely(wr
->wr
.fast_reg
.page_list_len
>
1922 wr
->wr
.fast_reg
.page_list
->max_page_list_len
))
1925 set_frwr_pages(*seg
, wr
, mdev
, pd
, writ
);
1926 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
1927 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
1932 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
1938 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
1939 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
1940 if ((i
& 0xf) == 0) {
1941 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
1942 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
1946 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
1947 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
1948 be32_to_cpu(p
[j
+ 3]));
1952 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
1953 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
1955 while (bytecnt
> 0) {
1956 __iowrite64_copy(dst
++, src
++, 8);
1957 __iowrite64_copy(dst
++, src
++, 8);
1958 __iowrite64_copy(dst
++, src
++, 8);
1959 __iowrite64_copy(dst
++, src
++, 8);
1960 __iowrite64_copy(dst
++, src
++, 8);
1961 __iowrite64_copy(dst
++, src
++, 8);
1962 __iowrite64_copy(dst
++, src
++, 8);
1963 __iowrite64_copy(dst
++, src
++, 8);
1965 if (unlikely(src
== qp
->sq
.qend
))
1966 src
= mlx5_get_send_wqe(qp
, 0);
1970 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
1972 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
1973 wr
->send_flags
& IB_SEND_FENCE
))
1974 return MLX5_FENCE_MODE_STRONG_ORDERING
;
1976 if (unlikely(fence
)) {
1977 if (wr
->send_flags
& IB_SEND_FENCE
)
1978 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
1987 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1988 struct ib_send_wr
**bad_wr
)
1990 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
1991 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1992 struct mlx5_core_dev
*mdev
= &dev
->mdev
;
1993 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1994 struct mlx5_wqe_data_seg
*dpseg
;
1995 struct mlx5_wqe_xrc_seg
*xrc
;
1996 struct mlx5_bf
*bf
= qp
->bf
;
1997 int uninitialized_var(size
);
1998 void *qend
= qp
->sq
.qend
;
1999 unsigned long flags
;
2012 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2014 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2015 if (unlikely(wr
->opcode
>= sizeof(mlx5_ib_opcode
) / sizeof(mlx5_ib_opcode
[0]))) {
2016 mlx5_ib_warn(dev
, "\n");
2022 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
2023 mlx5_ib_warn(dev
, "\n");
2029 fence
= qp
->fm_cache
;
2030 num_sge
= wr
->num_sge
;
2031 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
2032 mlx5_ib_warn(dev
, "\n");
2038 idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
2039 seg
= mlx5_get_send_wqe(qp
, idx
);
2041 *(uint32_t *)(seg
+ 8) = 0;
2042 ctrl
->imm
= send_ieth(wr
);
2043 ctrl
->fm_ce_se
= qp
->sq_signal_bits
|
2044 (wr
->send_flags
& IB_SEND_SIGNALED
?
2045 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
2046 (wr
->send_flags
& IB_SEND_SOLICITED
?
2047 MLX5_WQE_CTRL_SOLICITED
: 0);
2049 seg
+= sizeof(*ctrl
);
2050 size
= sizeof(*ctrl
) / 16;
2052 switch (ibqp
->qp_type
) {
2053 case IB_QPT_XRC_INI
:
2055 xrc
->xrc_srqn
= htonl(wr
->xrc_remote_srq_num
);
2056 seg
+= sizeof(*xrc
);
2057 size
+= sizeof(*xrc
) / 16;
2060 switch (wr
->opcode
) {
2061 case IB_WR_RDMA_READ
:
2062 case IB_WR_RDMA_WRITE
:
2063 case IB_WR_RDMA_WRITE_WITH_IMM
:
2064 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2066 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2067 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2070 case IB_WR_ATOMIC_CMP_AND_SWP
:
2071 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2072 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2073 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
2078 case IB_WR_LOCAL_INV
:
2079 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2080 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
2081 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
2082 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2084 mlx5_ib_warn(dev
, "\n");
2091 case IB_WR_FAST_REG_MR
:
2092 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2093 qp
->sq
.wr_data
[idx
] = IB_WR_FAST_REG_MR
;
2094 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2095 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2097 mlx5_ib_warn(dev
, "\n");
2110 switch (wr
->opcode
) {
2111 case IB_WR_RDMA_WRITE
:
2112 case IB_WR_RDMA_WRITE_WITH_IMM
:
2113 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2115 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2116 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2127 set_datagram_seg(seg
, wr
);
2128 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
2129 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
2130 if (unlikely((seg
== qend
)))
2131 seg
= mlx5_get_send_wqe(qp
, 0);
2134 case MLX5_IB_QPT_REG_UMR
:
2135 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
2137 mlx5_ib_warn(dev
, "bad opcode\n");
2140 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
2141 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2142 set_reg_umr_segment(seg
, wr
);
2143 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2144 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2145 if (unlikely((seg
== qend
)))
2146 seg
= mlx5_get_send_wqe(qp
, 0);
2147 set_reg_mkey_segment(seg
, wr
);
2148 seg
+= sizeof(struct mlx5_mkey_seg
);
2149 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2150 if (unlikely((seg
== qend
)))
2151 seg
= mlx5_get_send_wqe(qp
, 0);
2158 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
2159 int uninitialized_var(sz
);
2161 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
2162 if (unlikely(err
)) {
2163 mlx5_ib_warn(dev
, "\n");
2171 for (i
= 0; i
< num_sge
; i
++) {
2172 if (unlikely(dpseg
== qend
)) {
2173 seg
= mlx5_get_send_wqe(qp
, 0);
2176 if (likely(wr
->sg_list
[i
].length
)) {
2177 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
2178 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
2184 mlx5_opcode
= mlx5_ib_opcode
[wr
->opcode
];
2185 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
2187 ((u32
)opmod
<< 24));
2188 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->mqp
.qpn
<< 8));
2189 ctrl
->fm_ce_se
|= get_fence(fence
, wr
);
2190 qp
->fm_cache
= next_fence
;
2191 if (unlikely(qp
->wq_sig
))
2192 ctrl
->signature
= wq_sig(ctrl
);
2194 qp
->sq
.wrid
[idx
] = wr
->wr_id
;
2195 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
2196 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
2197 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
2198 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
2201 dump_wqe(qp
, idx
, size
);
2206 qp
->sq
.head
+= nreq
;
2208 /* Make sure that descriptors are written before
2209 * updating doorbell record and ringing the doorbell
2213 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
2216 spin_lock(&bf
->lock
);
2219 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
2220 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
2223 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
2224 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
2225 /* Make sure doorbells don't leak out of SQ spinlock
2226 * and reach the HCA out of order.
2230 bf
->offset
^= bf
->buf_size
;
2232 spin_unlock(&bf
->lock
);
2235 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2240 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
2242 sig
->signature
= calc_sig(sig
, size
);
2245 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2246 struct ib_recv_wr
**bad_wr
)
2248 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2249 struct mlx5_wqe_data_seg
*scat
;
2250 struct mlx5_rwqe_sig
*sig
;
2251 unsigned long flags
;
2257 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2259 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2261 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2262 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2268 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2274 scat
= get_recv_wqe(qp
, ind
);
2278 for (i
= 0; i
< wr
->num_sge
; i
++)
2279 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
2281 if (i
< qp
->rq
.max_gs
) {
2282 scat
[i
].byte_count
= 0;
2283 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
2288 sig
= (struct mlx5_rwqe_sig
*)scat
;
2289 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
2292 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2294 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2299 qp
->rq
.head
+= nreq
;
2301 /* Make sure that descriptors are written before
2306 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2309 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2314 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
2316 switch (mlx5_state
) {
2317 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
2318 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
2319 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
2320 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
2321 case MLX5_QP_STATE_SQ_DRAINING
:
2322 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
2323 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
2324 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
2329 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
2331 switch (mlx5_mig_state
) {
2332 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
2333 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
2334 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2339 static int to_ib_qp_access_flags(int mlx5_flags
)
2343 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
2344 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2345 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
2346 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2347 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
2348 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2353 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2354 struct mlx5_qp_path
*path
)
2356 struct mlx5_core_dev
*dev
= &ibdev
->mdev
;
2358 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
2359 ib_ah_attr
->port_num
= path
->port
;
2361 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
2364 ib_ah_attr
->sl
= path
->sl
& 0xf;
2366 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2367 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
2368 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2369 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
2370 if (ib_ah_attr
->ah_flags
) {
2371 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2372 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2373 ib_ah_attr
->grh
.traffic_class
=
2374 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2375 ib_ah_attr
->grh
.flow_label
=
2376 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2377 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2378 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
2382 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2383 struct ib_qp_init_attr
*qp_init_attr
)
2385 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2386 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2387 struct mlx5_query_qp_mbox_out
*outb
;
2388 struct mlx5_qp_context
*context
;
2392 mutex_lock(&qp
->mutex
);
2393 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
2398 context
= &outb
->ctx
;
2399 err
= mlx5_core_qp_query(&dev
->mdev
, &qp
->mqp
, outb
, sizeof(*outb
));
2403 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
2405 qp
->state
= to_ib_qp_state(mlx5_state
);
2406 qp_attr
->qp_state
= qp
->state
;
2407 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
2408 qp_attr
->path_mig_state
=
2409 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
2410 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
2411 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
2412 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
2413 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
2414 qp_attr
->qp_access_flags
=
2415 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
2417 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
2418 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
2419 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
2420 qp_attr
->alt_pkey_index
= context
->alt_path
.pkey_index
& 0x7f;
2421 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
2424 qp_attr
->pkey_index
= context
->pri_path
.pkey_index
& 0x7f;
2425 qp_attr
->port_num
= context
->pri_path
.port
;
2427 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2428 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
2430 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
2432 qp_attr
->max_dest_rd_atomic
=
2433 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
2434 qp_attr
->min_rnr_timer
=
2435 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
2436 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
2437 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
2438 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
2439 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
2440 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
2441 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
2442 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
2444 if (!ibqp
->uobject
) {
2445 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
2446 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
2448 qp_attr
->cap
.max_send_wr
= 0;
2449 qp_attr
->cap
.max_send_sge
= 0;
2452 /* We don't support inline sends for kernel QPs (yet), and we
2453 * don't know what userspace's value should be.
2455 qp_attr
->cap
.max_inline_data
= 0;
2457 qp_init_attr
->cap
= qp_attr
->cap
;
2459 qp_init_attr
->create_flags
= 0;
2460 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2461 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
2463 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
2464 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2470 mutex_unlock(&qp
->mutex
);
2474 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
2475 struct ib_ucontext
*context
,
2476 struct ib_udata
*udata
)
2478 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
2479 struct mlx5_ib_xrcd
*xrcd
;
2482 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
))
2483 return ERR_PTR(-ENOSYS
);
2485 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
2487 return ERR_PTR(-ENOMEM
);
2489 err
= mlx5_core_xrcd_alloc(&dev
->mdev
, &xrcd
->xrcdn
);
2492 return ERR_PTR(-ENOMEM
);
2495 return &xrcd
->ibxrcd
;
2498 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
2500 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
2501 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
2504 err
= mlx5_core_xrcd_dealloc(&dev
->mdev
, xrcdn
);
2506 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);