2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
40 /* not supported currently */
41 static int wq_signature
;
44 MLX5_IB_ACK_REQ_FREQ
= 8,
48 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
49 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
50 MLX5_IB_LINK_TYPE_IB
= 0,
51 MLX5_IB_LINK_TYPE_ETH
= 1
55 MLX5_IB_SQ_STRIDE
= 6,
56 MLX5_IB_CACHE_LINE_SIZE
= 64,
59 static const u32 mlx5_ib_opcode
[] = {
60 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
61 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
62 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
63 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
64 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
65 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
66 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
67 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
68 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
69 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
70 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
71 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
72 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
73 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
76 struct mlx5_wqe_eth_pad
{
80 static int is_qp0(enum ib_qp_type qp_type
)
82 return qp_type
== IB_QPT_SMI
;
85 static int is_sqp(enum ib_qp_type qp_type
)
87 return is_qp0(qp_type
) || is_qp1(qp_type
);
90 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
92 return mlx5_buf_offset(&qp
->buf
, offset
);
95 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
97 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
100 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
102 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
106 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
108 * @qp: QP to copy from.
109 * @send: copy from the send queue when non-zero, use the receive queue
111 * @wqe_index: index to start copying from. For send work queues, the
112 * wqe_index is in units of MLX5_SEND_WQE_BB.
113 * For receive work queue, it is the number of work queue
114 * element in the queue.
115 * @buffer: destination buffer.
116 * @length: maximum number of bytes to copy.
118 * Copies at least a single WQE, but may copy more data.
120 * Return: the number of bytes copied, or an error code.
122 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
123 void *buffer
, u32 length
,
124 struct mlx5_ib_qp_base
*base
)
126 struct ib_device
*ibdev
= qp
->ibqp
.device
;
127 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
128 struct mlx5_ib_wq
*wq
= send
? &qp
->sq
: &qp
->rq
;
131 struct ib_umem
*umem
= base
->ubuffer
.umem
;
132 u32 first_copy_length
;
136 if (wq
->wqe_cnt
== 0) {
137 mlx5_ib_dbg(dev
, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
142 offset
= wq
->offset
+ ((wqe_index
% wq
->wqe_cnt
) << wq
->wqe_shift
);
143 wq_end
= wq
->offset
+ (wq
->wqe_cnt
<< wq
->wqe_shift
);
145 if (send
&& length
< sizeof(struct mlx5_wqe_ctrl_seg
))
148 if (offset
> umem
->length
||
149 (send
&& offset
+ sizeof(struct mlx5_wqe_ctrl_seg
) > umem
->length
))
152 first_copy_length
= min_t(u32
, offset
+ length
, wq_end
) - offset
;
153 ret
= ib_umem_copy_from(buffer
, umem
, offset
, first_copy_length
);
158 struct mlx5_wqe_ctrl_seg
*ctrl
= buffer
;
159 int ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
161 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
163 wqe_length
= 1 << wq
->wqe_shift
;
166 if (wqe_length
<= first_copy_length
)
167 return first_copy_length
;
169 ret
= ib_umem_copy_from(buffer
+ first_copy_length
, umem
, wq
->offset
,
170 wqe_length
- first_copy_length
);
177 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
179 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
180 struct ib_event event
;
182 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
183 /* This event is only valid for trans_qps */
184 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
187 if (ibqp
->event_handler
) {
188 event
.device
= ibqp
->device
;
189 event
.element
.qp
= ibqp
;
191 case MLX5_EVENT_TYPE_PATH_MIG
:
192 event
.event
= IB_EVENT_PATH_MIG
;
194 case MLX5_EVENT_TYPE_COMM_EST
:
195 event
.event
= IB_EVENT_COMM_EST
;
197 case MLX5_EVENT_TYPE_SQ_DRAINED
:
198 event
.event
= IB_EVENT_SQ_DRAINED
;
200 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
201 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
203 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
204 event
.event
= IB_EVENT_QP_FATAL
;
206 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
207 event
.event
= IB_EVENT_PATH_MIG_ERR
;
209 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
210 event
.event
= IB_EVENT_QP_REQ_ERR
;
212 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
213 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
216 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
220 ibqp
->event_handler(&event
, ibqp
->qp_context
);
224 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
225 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
230 /* Sanity check RQ size before proceeding */
231 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
237 qp
->rq
.wqe_shift
= 0;
238 cap
->max_recv_wr
= 0;
239 cap
->max_recv_sge
= 0;
242 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
243 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
244 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
245 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
247 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
248 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
249 wqe_size
= roundup_pow_of_two(wqe_size
);
250 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
251 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
252 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
253 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
254 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
256 MLX5_CAP_GEN(dev
->mdev
,
260 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
261 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
262 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
269 static int sq_overhead(struct ib_qp_init_attr
*attr
)
273 switch (attr
->qp_type
) {
275 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
278 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
279 max(sizeof(struct mlx5_wqe_atomic_seg
) +
280 sizeof(struct mlx5_wqe_raddr_seg
),
281 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
282 sizeof(struct mlx5_mkey_seg
));
289 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
290 max(sizeof(struct mlx5_wqe_raddr_seg
),
291 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
292 sizeof(struct mlx5_mkey_seg
));
296 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
297 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
298 sizeof(struct mlx5_wqe_eth_seg
);
301 case MLX5_IB_QPT_HW_GSI
:
302 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
303 sizeof(struct mlx5_wqe_datagram_seg
);
306 case MLX5_IB_QPT_REG_UMR
:
307 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
308 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
309 sizeof(struct mlx5_mkey_seg
);
319 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
324 size
= sq_overhead(attr
);
328 if (attr
->cap
.max_inline_data
) {
329 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
330 attr
->cap
.max_inline_data
;
333 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
334 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
335 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
336 return MLX5_SIG_WQE_SIZE
;
338 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
341 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
342 struct mlx5_ib_qp
*qp
)
347 if (!attr
->cap
.max_send_wr
)
350 wqe_size
= calc_send_wqe(attr
);
351 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
355 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
356 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
357 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
361 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
362 sizeof(struct mlx5_wqe_inline_seg
);
363 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
365 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
366 qp
->signature_en
= true;
368 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
369 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
370 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
371 mlx5_ib_dbg(dev
, "wqe count(%d) exceeds limits(%d)\n",
373 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
376 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
377 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
378 qp
->sq
.max_post
= wq_size
/ wqe_size
;
379 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
384 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
385 struct mlx5_ib_qp
*qp
,
386 struct mlx5_ib_create_qp
*ucmd
,
387 struct mlx5_ib_qp_base
*base
,
388 struct ib_qp_init_attr
*attr
)
390 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
392 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
393 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
394 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
398 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
399 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
400 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
404 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
406 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
407 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
409 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
413 if (attr
->qp_type
== IB_QPT_RAW_PACKET
) {
414 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
415 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
417 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
418 (qp
->sq
.wqe_cnt
<< 6);
424 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
426 if (attr
->qp_type
== IB_QPT_XRC_INI
||
427 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
428 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
429 !attr
->cap
.max_recv_wr
)
435 static int first_med_uuar(void)
440 static int next_uuar(int n
)
444 while (((n
% 4) & 2))
450 static int num_med_uuar(struct mlx5_uuar_info
*uuari
)
454 n
= uuari
->num_uars
* MLX5_NON_FP_BF_REGS_PER_PAGE
-
455 uuari
->num_low_latency_uuars
- 1;
457 return n
>= 0 ? n
: 0;
460 static int max_uuari(struct mlx5_uuar_info
*uuari
)
462 return uuari
->num_uars
* 4;
465 static int first_hi_uuar(struct mlx5_uuar_info
*uuari
)
471 med
= num_med_uuar(uuari
);
472 for (t
= 0, i
= first_med_uuar();; i
= next_uuar(i
)) {
481 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
485 for (i
= first_hi_uuar(uuari
); i
< max_uuari(uuari
); i
= next_uuar(i
)) {
486 if (!test_bit(i
, uuari
->bitmap
)) {
487 set_bit(i
, uuari
->bitmap
);
496 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
498 int minidx
= first_med_uuar();
501 for (i
= first_med_uuar(); i
< first_hi_uuar(uuari
); i
= next_uuar(i
)) {
502 if (uuari
->count
[i
] < uuari
->count
[minidx
])
506 uuari
->count
[minidx
]++;
510 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
511 enum mlx5_ib_latency_class lat
)
515 mutex_lock(&uuari
->lock
);
517 case MLX5_IB_LATENCY_CLASS_LOW
:
519 uuari
->count
[uuarn
]++;
522 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
526 uuarn
= alloc_med_class_uuar(uuari
);
529 case MLX5_IB_LATENCY_CLASS_HIGH
:
533 uuarn
= alloc_high_class_uuar(uuari
);
536 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
540 mutex_unlock(&uuari
->lock
);
545 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
547 clear_bit(uuarn
, uuari
->bitmap
);
548 --uuari
->count
[uuarn
];
551 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
553 clear_bit(uuarn
, uuari
->bitmap
);
554 --uuari
->count
[uuarn
];
557 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
559 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
560 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
562 mutex_lock(&uuari
->lock
);
564 --uuari
->count
[uuarn
];
568 if (uuarn
< high_uuar
) {
569 free_med_class_uuar(uuari
, uuarn
);
573 free_high_class_uuar(uuari
, uuarn
);
576 mutex_unlock(&uuari
->lock
);
579 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
582 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
583 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
584 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
585 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
586 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
587 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
588 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
593 static int to_mlx5_st(enum ib_qp_type type
)
596 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
597 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
598 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
599 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
601 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
602 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
603 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
604 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
605 case IB_QPT_RAW_PACKET
:
606 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
608 default: return -EINVAL
;
612 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
614 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
617 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
,
619 unsigned long addr
, size_t size
,
620 struct ib_umem
**umem
,
621 int *npages
, int *page_shift
, int *ncont
,
626 *umem
= ib_umem_get(pd
->uobject
->context
, addr
, size
, 0, 0);
628 mlx5_ib_dbg(dev
, "umem_get failed\n");
629 return PTR_ERR(*umem
);
632 mlx5_ib_cont_pages(*umem
, addr
, npages
, page_shift
, ncont
, NULL
);
634 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
636 mlx5_ib_warn(dev
, "bad offset\n");
640 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
641 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
646 ib_umem_release(*umem
);
652 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
653 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
654 struct ib_qp_init_attr
*attr
,
655 struct mlx5_create_qp_mbox_in
**in
,
656 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
657 struct mlx5_ib_qp_base
*base
)
659 struct mlx5_ib_ucontext
*context
;
660 struct mlx5_ib_create_qp ucmd
;
661 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
670 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
672 mlx5_ib_dbg(dev
, "copy failed\n");
676 context
= to_mucontext(pd
->uobject
->context
);
678 * TBD: should come from the verbs when we have the API
680 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
681 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
682 uuarn
= MLX5_CROSS_CHANNEL_UUAR
;
684 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
686 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
687 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
688 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
690 mlx5_ib_dbg(dev
, "failed to allocate medium latency UUAR\n");
691 mlx5_ib_dbg(dev
, "reverting to high latency\n");
692 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
694 mlx5_ib_warn(dev
, "uuar allocation failed\n");
701 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
702 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
705 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
706 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
708 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
712 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
713 ubuffer
->buf_addr
= ucmd
.buf_addr
;
714 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
,
716 &ubuffer
->umem
, &npages
, &page_shift
,
721 ubuffer
->umem
= NULL
;
724 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
725 *in
= mlx5_vzalloc(*inlen
);
731 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
,
733 (*in
)->ctx
.log_pg_sz_remote_qpn
=
734 cpu_to_be32((page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
735 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
737 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
738 resp
->uuar_index
= uuarn
;
741 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
743 mlx5_ib_dbg(dev
, "map failed\n");
747 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
749 mlx5_ib_dbg(dev
, "copy failed\n");
752 qp
->create_type
= MLX5_QP_USER
;
757 mlx5_ib_db_unmap_user(context
, &qp
->db
);
764 ib_umem_release(ubuffer
->umem
);
767 free_uuar(&context
->uuari
, uuarn
);
771 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
,
772 struct mlx5_ib_qp_base
*base
)
774 struct mlx5_ib_ucontext
*context
;
776 context
= to_mucontext(pd
->uobject
->context
);
777 mlx5_ib_db_unmap_user(context
, &qp
->db
);
778 if (base
->ubuffer
.umem
)
779 ib_umem_release(base
->ubuffer
.umem
);
780 free_uuar(&context
->uuari
, qp
->uuarn
);
783 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
784 struct ib_qp_init_attr
*init_attr
,
785 struct mlx5_ib_qp
*qp
,
786 struct mlx5_create_qp_mbox_in
**in
, int *inlen
,
787 struct mlx5_ib_qp_base
*base
)
789 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
790 struct mlx5_uuar_info
*uuari
;
795 uuari
= &dev
->mdev
->priv
.uuari
;
796 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
|
797 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
798 IB_QP_CREATE_IPOIB_UD_LSO
|
799 mlx5_ib_create_qp_sqpn_qp1()))
802 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
803 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
805 uuarn
= alloc_uuar(uuari
, lc
);
807 mlx5_ib_dbg(dev
, "\n");
811 qp
->bf
= &uuari
->bfs
[uuarn
];
812 uar_index
= qp
->bf
->uar
->index
;
814 err
= calc_sq_size(dev
, init_attr
, qp
);
816 mlx5_ib_dbg(dev
, "err %d\n", err
);
821 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
822 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
824 err
= mlx5_buf_alloc(dev
->mdev
, base
->ubuffer
.buf_size
, &qp
->buf
);
826 mlx5_ib_dbg(dev
, "err %d\n", err
);
830 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
831 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
832 *in
= mlx5_vzalloc(*inlen
);
837 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
838 (*in
)->ctx
.log_pg_sz_remote_qpn
=
839 cpu_to_be32((qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
840 /* Set "fast registration enabled" for all kernel QPs */
841 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
842 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
844 if (init_attr
->create_flags
& mlx5_ib_create_qp_sqpn_qp1()) {
845 (*in
)->ctx
.deth_sqpn
= cpu_to_be32(1);
846 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
849 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
851 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
853 mlx5_ib_dbg(dev
, "err %d\n", err
);
857 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
858 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
859 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
860 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
861 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
863 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
864 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
868 qp
->create_type
= MLX5_QP_KERNEL
;
873 mlx5_db_free(dev
->mdev
, &qp
->db
);
874 kfree(qp
->sq
.wqe_head
);
875 kfree(qp
->sq
.w_list
);
877 kfree(qp
->sq
.wr_data
);
884 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
887 free_uuar(&dev
->mdev
->priv
.uuari
, uuarn
);
891 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
893 mlx5_db_free(dev
->mdev
, &qp
->db
);
894 kfree(qp
->sq
.wqe_head
);
895 kfree(qp
->sq
.w_list
);
897 kfree(qp
->sq
.wr_data
);
899 mlx5_buf_free(dev
->mdev
, &qp
->buf
);
900 free_uuar(&dev
->mdev
->priv
.uuari
, qp
->bf
->uuarn
);
903 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
905 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
906 (attr
->qp_type
== IB_QPT_XRC_INI
))
907 return cpu_to_be32(MLX5_SRQ_RQ
);
908 else if (!qp
->has_rq
)
909 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
911 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
914 static int is_connected(enum ib_qp_type qp_type
)
916 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
922 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
923 struct mlx5_ib_sq
*sq
, u32 tdn
)
925 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)];
926 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
928 memset(in
, 0, sizeof(in
));
930 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
932 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
935 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
936 struct mlx5_ib_sq
*sq
)
938 mlx5_core_destroy_tis(dev
->mdev
, sq
->tisn
);
941 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
942 struct mlx5_ib_sq
*sq
, void *qpin
,
945 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
949 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
958 err
= mlx5_ib_umem_get(dev
, pd
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
959 &sq
->ubuffer
.umem
, &npages
, &page_shift
,
964 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
965 in
= mlx5_vzalloc(inlen
);
971 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
972 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
973 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
974 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
975 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
976 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
977 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
979 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
980 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
981 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
982 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
983 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
984 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
985 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
986 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
987 MLX5_SET(wq
, wq
, page_offset
, offset
);
989 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
990 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
992 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1002 ib_umem_release(sq
->ubuffer
.umem
);
1003 sq
->ubuffer
.umem
= NULL
;
1008 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1009 struct mlx5_ib_sq
*sq
)
1011 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1012 ib_umem_release(sq
->ubuffer
.umem
);
1015 static int get_rq_pas_size(void *qpc
)
1017 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1018 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1019 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1020 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1021 u32 po_quanta
= 1 << (log_page_size
- 6);
1022 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1023 u32 page_size
= 1 << log_page_size
;
1024 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1025 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1027 return rq_num_pas
* sizeof(u64
);
1030 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1031 struct mlx5_ib_rq
*rq
, void *qpin
)
1033 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1039 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1042 u32 rq_pas_size
= get_rq_pas_size(qpc
);
1044 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1045 in
= mlx5_vzalloc(inlen
);
1049 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1050 MLX5_SET(rqc
, rqc
, vsd
, 1);
1051 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1052 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1053 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1054 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1055 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1057 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1058 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1060 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1061 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1062 MLX5_SET(wq
, wq
, end_padding_mode
,
1063 MLX5_GET(qpc
, qpc
, end_padding_mode
));
1064 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1065 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1066 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1067 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1068 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1069 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1071 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1072 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1073 memcpy(pas
, qp_pas
, rq_pas_size
);
1075 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1082 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1083 struct mlx5_ib_rq
*rq
)
1085 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1088 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1089 struct mlx5_ib_rq
*rq
, u32 tdn
)
1096 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1097 in
= mlx5_vzalloc(inlen
);
1101 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1102 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1103 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1104 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1106 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &rq
->tirn
);
1113 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1114 struct mlx5_ib_rq
*rq
)
1116 mlx5_core_destroy_tir(dev
->mdev
, rq
->tirn
);
1119 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1120 struct mlx5_create_qp_mbox_in
*in
,
1123 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1124 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1125 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1126 struct ib_uobject
*uobj
= pd
->uobject
;
1127 struct ib_ucontext
*ucontext
= uobj
->context
;
1128 struct mlx5_ib_ucontext
*mucontext
= to_mucontext(ucontext
);
1130 u32 tdn
= mucontext
->tdn
;
1132 if (qp
->sq
.wqe_cnt
) {
1133 err
= create_raw_packet_qp_tis(dev
, sq
, tdn
);
1137 err
= create_raw_packet_qp_sq(dev
, sq
, in
, pd
);
1139 goto err_destroy_tis
;
1141 sq
->base
.container_mibqp
= qp
;
1144 if (qp
->rq
.wqe_cnt
) {
1145 rq
->base
.container_mibqp
= qp
;
1147 err
= create_raw_packet_qp_rq(dev
, rq
, in
);
1149 goto err_destroy_sq
;
1152 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
);
1154 goto err_destroy_rq
;
1157 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1163 destroy_raw_packet_qp_rq(dev
, rq
);
1165 if (!qp
->sq
.wqe_cnt
)
1167 destroy_raw_packet_qp_sq(dev
, sq
);
1169 destroy_raw_packet_qp_tis(dev
, sq
);
1174 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1175 struct mlx5_ib_qp
*qp
)
1177 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1178 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1179 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1181 if (qp
->rq
.wqe_cnt
) {
1182 destroy_raw_packet_qp_tir(dev
, rq
);
1183 destroy_raw_packet_qp_rq(dev
, rq
);
1186 if (qp
->sq
.wqe_cnt
) {
1187 destroy_raw_packet_qp_sq(dev
, sq
);
1188 destroy_raw_packet_qp_tis(dev
, sq
);
1192 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1193 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1195 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1196 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1200 sq
->doorbell
= &qp
->db
;
1201 rq
->doorbell
= &qp
->db
;
1204 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1205 struct ib_qp_init_attr
*init_attr
,
1206 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
1208 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1209 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1210 struct mlx5_ib_qp_base
*base
;
1211 struct mlx5_ib_create_qp_resp resp
;
1212 struct mlx5_create_qp_mbox_in
*in
;
1213 struct mlx5_ib_create_qp ucmd
;
1214 int inlen
= sizeof(*in
);
1216 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
1219 base
= init_attr
->qp_type
== IB_QPT_RAW_PACKET
?
1220 &qp
->raw_packet_qp
.rq
.base
:
1223 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1224 mlx5_ib_odp_create_qp(qp
);
1226 mutex_init(&qp
->mutex
);
1227 spin_lock_init(&qp
->sq
.lock
);
1228 spin_lock_init(&qp
->rq
.lock
);
1230 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
1231 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
1232 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
1235 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1239 if (init_attr
->create_flags
&
1240 (IB_QP_CREATE_CROSS_CHANNEL
|
1241 IB_QP_CREATE_MANAGED_SEND
|
1242 IB_QP_CREATE_MANAGED_RECV
)) {
1243 if (!MLX5_CAP_GEN(mdev
, cd
)) {
1244 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
1247 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1248 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
1249 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
1250 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
1251 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
1252 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
1255 if (init_attr
->qp_type
== IB_QPT_UD
&&
1256 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
1257 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
1258 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
1262 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1263 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1264 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
1267 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
1268 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
1269 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
1272 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
1275 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1276 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1278 if (pd
&& pd
->uobject
) {
1279 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
1280 mlx5_ib_dbg(dev
, "copy failed\n");
1284 err
= get_qp_user_index(to_mucontext(pd
->uobject
->context
),
1285 &ucmd
, udata
->inlen
, &uidx
);
1289 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
1290 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
1292 qp
->wq_sig
= !!wq_signature
;
1295 qp
->has_rq
= qp_has_rq(init_attr
);
1296 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
1297 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
1299 mlx5_ib_dbg(dev
, "err %d\n", err
);
1306 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
1307 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
1308 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
1309 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
1310 mlx5_ib_dbg(dev
, "invalid rq params\n");
1313 if (ucmd
.sq_wqe_count
> max_wqes
) {
1314 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
1315 ucmd
.sq_wqe_count
, max_wqes
);
1318 if (init_attr
->create_flags
&
1319 mlx5_ib_create_qp_sqpn_qp1()) {
1320 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
1323 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
1324 &resp
, &inlen
, base
);
1326 mlx5_ib_dbg(dev
, "err %d\n", err
);
1328 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
1331 mlx5_ib_dbg(dev
, "err %d\n", err
);
1337 in
= mlx5_vzalloc(sizeof(*in
));
1341 qp
->create_type
= MLX5_QP_EMPTY
;
1344 if (is_sqp(init_attr
->qp_type
))
1345 qp
->port
= init_attr
->port_num
;
1347 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
1348 MLX5_QP_PM_MIGRATED
<< 11);
1350 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
1351 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
1353 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
1356 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
1358 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
1359 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_BLOCK_MCAST
);
1361 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
1362 in
->ctx
.params2
|= cpu_to_be32(MLX5_QP_BIT_CC_MASTER
);
1363 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
1364 in
->ctx
.params2
|= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND
);
1365 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
1366 in
->ctx
.params2
|= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV
);
1368 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
1372 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
1373 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
1376 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
1378 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
1380 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
1382 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
1384 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
1388 if (qp
->rq
.wqe_cnt
) {
1389 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
1390 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
1393 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
1396 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
1398 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
1400 /* Set default resources */
1401 switch (init_attr
->qp_type
) {
1402 case IB_QPT_XRC_TGT
:
1403 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
1404 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
1405 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
1406 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
1408 case IB_QPT_XRC_INI
:
1409 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
1410 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
1411 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
1414 if (init_attr
->srq
) {
1415 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
1416 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
1418 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
1419 in
->ctx
.rq_type_srqn
|=
1420 cpu_to_be32(to_msrq(devr
->s1
)->msrq
.srqn
);
1424 if (init_attr
->send_cq
)
1425 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1427 if (init_attr
->recv_cq
)
1428 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
1430 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1432 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
) {
1433 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1434 /* 0xffffff means we ask to work with cqe version 0 */
1435 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
1437 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
1438 if (init_attr
->qp_type
== IB_QPT_UD
&&
1439 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
1440 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
1441 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
1442 qp
->flags
|= MLX5_IB_QP_LSO
;
1445 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
1446 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
1447 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
1448 err
= create_raw_packet_qp(dev
, qp
, in
, pd
);
1450 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
1454 mlx5_ib_dbg(dev
, "create qp failed\n");
1460 base
->container_mibqp
= qp
;
1461 base
->mqp
.event
= mlx5_ib_qp_event
;
1466 if (qp
->create_type
== MLX5_QP_USER
)
1467 destroy_qp_user(pd
, qp
, base
);
1468 else if (qp
->create_type
== MLX5_QP_KERNEL
)
1469 destroy_qp_kernel(dev
, qp
);
1475 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1476 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1480 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1481 spin_lock_irq(&send_cq
->lock
);
1482 spin_lock_nested(&recv_cq
->lock
,
1483 SINGLE_DEPTH_NESTING
);
1484 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1485 spin_lock_irq(&send_cq
->lock
);
1486 __acquire(&recv_cq
->lock
);
1488 spin_lock_irq(&recv_cq
->lock
);
1489 spin_lock_nested(&send_cq
->lock
,
1490 SINGLE_DEPTH_NESTING
);
1493 spin_lock_irq(&send_cq
->lock
);
1494 __acquire(&recv_cq
->lock
);
1496 } else if (recv_cq
) {
1497 spin_lock_irq(&recv_cq
->lock
);
1498 __acquire(&send_cq
->lock
);
1500 __acquire(&send_cq
->lock
);
1501 __acquire(&recv_cq
->lock
);
1505 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
1506 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1510 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1511 spin_unlock(&recv_cq
->lock
);
1512 spin_unlock_irq(&send_cq
->lock
);
1513 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
1514 __release(&recv_cq
->lock
);
1515 spin_unlock_irq(&send_cq
->lock
);
1517 spin_unlock(&send_cq
->lock
);
1518 spin_unlock_irq(&recv_cq
->lock
);
1521 __release(&recv_cq
->lock
);
1522 spin_unlock_irq(&send_cq
->lock
);
1524 } else if (recv_cq
) {
1525 __release(&send_cq
->lock
);
1526 spin_unlock_irq(&recv_cq
->lock
);
1528 __release(&recv_cq
->lock
);
1529 __release(&send_cq
->lock
);
1533 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1535 return to_mpd(qp
->ibqp
.pd
);
1538 static void get_cqs(struct mlx5_ib_qp
*qp
,
1539 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1541 switch (qp
->ibqp
.qp_type
) {
1542 case IB_QPT_XRC_TGT
:
1546 case MLX5_IB_QPT_REG_UMR
:
1547 case IB_QPT_XRC_INI
:
1548 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1553 case MLX5_IB_QPT_HW_GSI
:
1557 case IB_QPT_RAW_IPV6
:
1558 case IB_QPT_RAW_ETHERTYPE
:
1559 case IB_QPT_RAW_PACKET
:
1560 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1561 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1572 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1575 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1577 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1578 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
1579 struct mlx5_modify_qp_mbox_in
*in
;
1582 base
= qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
?
1583 &qp
->raw_packet_qp
.rq
.base
:
1586 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1590 if (qp
->state
!= IB_QPS_RESET
) {
1591 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
) {
1592 mlx5_ib_qp_disable_pagefaults(qp
);
1593 err
= mlx5_core_qp_modify(dev
->mdev
,
1594 MLX5_CMD_OP_2RST_QP
, in
, 0,
1597 err
= modify_raw_packet_qp(dev
, qp
,
1598 MLX5_CMD_OP_2RST_QP
);
1601 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
1605 get_cqs(qp
, &send_cq
, &recv_cq
);
1607 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1608 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1609 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
1610 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1611 if (send_cq
!= recv_cq
)
1612 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
1614 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1617 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
1618 destroy_raw_packet_qp(dev
, qp
);
1620 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
1622 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
1628 if (qp
->create_type
== MLX5_QP_KERNEL
)
1629 destroy_qp_kernel(dev
, qp
);
1630 else if (qp
->create_type
== MLX5_QP_USER
)
1631 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
, base
);
1634 static const char *ib_qp_type_str(enum ib_qp_type type
)
1638 return "IB_QPT_SMI";
1640 return "IB_QPT_GSI";
1647 case IB_QPT_RAW_IPV6
:
1648 return "IB_QPT_RAW_IPV6";
1649 case IB_QPT_RAW_ETHERTYPE
:
1650 return "IB_QPT_RAW_ETHERTYPE";
1651 case IB_QPT_XRC_INI
:
1652 return "IB_QPT_XRC_INI";
1653 case IB_QPT_XRC_TGT
:
1654 return "IB_QPT_XRC_TGT";
1655 case IB_QPT_RAW_PACKET
:
1656 return "IB_QPT_RAW_PACKET";
1657 case MLX5_IB_QPT_REG_UMR
:
1658 return "MLX5_IB_QPT_REG_UMR";
1661 return "Invalid QP type";
1665 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1666 struct ib_qp_init_attr
*init_attr
,
1667 struct ib_udata
*udata
)
1669 struct mlx5_ib_dev
*dev
;
1670 struct mlx5_ib_qp
*qp
;
1675 dev
= to_mdev(pd
->device
);
1677 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
1679 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
1680 return ERR_PTR(-EINVAL
);
1681 } else if (!to_mucontext(pd
->uobject
->context
)->cqe_version
) {
1682 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
1683 return ERR_PTR(-EINVAL
);
1687 /* being cautious here */
1688 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1689 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1690 pr_warn("%s: no PD for transport %s\n", __func__
,
1691 ib_qp_type_str(init_attr
->qp_type
));
1692 return ERR_PTR(-EINVAL
);
1694 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1697 switch (init_attr
->qp_type
) {
1698 case IB_QPT_XRC_TGT
:
1699 case IB_QPT_XRC_INI
:
1700 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
1701 mlx5_ib_dbg(dev
, "XRC not supported\n");
1702 return ERR_PTR(-ENOSYS
);
1704 init_attr
->recv_cq
= NULL
;
1705 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1706 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1707 init_attr
->send_cq
= NULL
;
1711 case IB_QPT_RAW_PACKET
:
1716 case MLX5_IB_QPT_HW_GSI
:
1717 case MLX5_IB_QPT_REG_UMR
:
1718 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1720 return ERR_PTR(-ENOMEM
);
1722 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1724 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1726 return ERR_PTR(err
);
1729 if (is_qp0(init_attr
->qp_type
))
1730 qp
->ibqp
.qp_num
= 0;
1731 else if (is_qp1(init_attr
->qp_type
))
1732 qp
->ibqp
.qp_num
= 1;
1734 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
1736 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1737 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
1738 to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1739 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1741 qp
->trans_qp
.xrcdn
= xrcdn
;
1746 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
1748 case IB_QPT_RAW_IPV6
:
1749 case IB_QPT_RAW_ETHERTYPE
:
1752 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1753 init_attr
->qp_type
);
1754 /* Don't support raw QPs */
1755 return ERR_PTR(-EINVAL
);
1761 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1763 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1764 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1766 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
1767 return mlx5_ib_gsi_destroy_qp(qp
);
1769 destroy_qp_common(dev
, mqp
);
1776 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1779 u32 hw_access_flags
= 0;
1783 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1784 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1786 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
1788 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1789 access_flags
= attr
->qp_access_flags
;
1791 access_flags
= qp
->trans_qp
.atomic_rd_en
;
1793 if (!dest_rd_atomic
)
1794 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1796 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1797 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1798 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1799 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1800 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1801 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1803 return cpu_to_be32(hw_access_flags
);
1807 MLX5_PATH_FLAG_FL
= 1 << 0,
1808 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1809 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1812 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1814 if (rate
== IB_RATE_PORT_CURRENT
) {
1816 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1819 while (rate
!= IB_RATE_2_5_GBPS
&&
1820 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1821 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
1825 return rate
+ MLX5_STAT_RATE_OFFSET
;
1828 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
1829 struct mlx5_ib_sq
*sq
, u8 sl
)
1836 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
1837 in
= mlx5_vzalloc(inlen
);
1841 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
1843 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
1844 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
1846 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
1853 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1854 const struct ib_ah_attr
*ah
,
1855 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1856 u32 path_flags
, const struct ib_qp_attr
*attr
,
1859 enum rdma_link_layer ll
= rdma_port_get_link_layer(&dev
->ib_dev
, port
);
1862 if (attr_mask
& IB_QP_PKEY_INDEX
)
1863 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
1866 if (ah
->ah_flags
& IB_AH_GRH
) {
1867 if (ah
->grh
.sgid_index
>=
1868 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
1869 pr_err("sgid_index (%u) too large. max is %d\n",
1871 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
1876 if (ll
== IB_LINK_LAYER_ETHERNET
) {
1877 if (!(ah
->ah_flags
& IB_AH_GRH
))
1879 memcpy(path
->rmac
, ah
->dmac
, sizeof(ah
->dmac
));
1880 path
->udp_sport
= mlx5_get_roce_udp_sport(dev
, port
,
1881 ah
->grh
.sgid_index
);
1882 path
->dci_cfi_prio_sl
= (ah
->sl
& 0x7) << 4;
1884 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1886 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
1887 path
->rlid
= cpu_to_be16(ah
->dlid
);
1888 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1889 if (ah
->ah_flags
& IB_AH_GRH
)
1890 path
->grh_mlid
|= 1 << 7;
1891 path
->dci_cfi_prio_sl
= ah
->sl
& 0xf;
1894 if (ah
->ah_flags
& IB_AH_GRH
) {
1895 path
->mgid_index
= ah
->grh
.sgid_index
;
1896 path
->hop_limit
= ah
->grh
.hop_limit
;
1897 path
->tclass_flowlabel
=
1898 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1899 (ah
->grh
.flow_label
));
1900 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1903 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1906 path
->static_rate
= err
;
1909 if (attr_mask
& IB_QP_TIMEOUT
)
1910 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
1912 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
1913 return modify_raw_packet_eth_prio(dev
->mdev
,
1914 &qp
->raw_packet_qp
.sq
,
1920 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1921 [MLX5_QP_STATE_INIT
] = {
1922 [MLX5_QP_STATE_INIT
] = {
1923 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1924 MLX5_QP_OPTPAR_RAE
|
1925 MLX5_QP_OPTPAR_RWE
|
1926 MLX5_QP_OPTPAR_PKEY_INDEX
|
1927 MLX5_QP_OPTPAR_PRI_PORT
,
1928 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1929 MLX5_QP_OPTPAR_PKEY_INDEX
|
1930 MLX5_QP_OPTPAR_PRI_PORT
,
1931 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1932 MLX5_QP_OPTPAR_Q_KEY
|
1933 MLX5_QP_OPTPAR_PRI_PORT
,
1935 [MLX5_QP_STATE_RTR
] = {
1936 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1937 MLX5_QP_OPTPAR_RRE
|
1938 MLX5_QP_OPTPAR_RAE
|
1939 MLX5_QP_OPTPAR_RWE
|
1940 MLX5_QP_OPTPAR_PKEY_INDEX
,
1941 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1942 MLX5_QP_OPTPAR_RWE
|
1943 MLX5_QP_OPTPAR_PKEY_INDEX
,
1944 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1945 MLX5_QP_OPTPAR_Q_KEY
,
1946 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1947 MLX5_QP_OPTPAR_Q_KEY
,
1948 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1949 MLX5_QP_OPTPAR_RRE
|
1950 MLX5_QP_OPTPAR_RAE
|
1951 MLX5_QP_OPTPAR_RWE
|
1952 MLX5_QP_OPTPAR_PKEY_INDEX
,
1955 [MLX5_QP_STATE_RTR
] = {
1956 [MLX5_QP_STATE_RTS
] = {
1957 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1958 MLX5_QP_OPTPAR_RRE
|
1959 MLX5_QP_OPTPAR_RAE
|
1960 MLX5_QP_OPTPAR_RWE
|
1961 MLX5_QP_OPTPAR_PM_STATE
|
1962 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1963 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1964 MLX5_QP_OPTPAR_RWE
|
1965 MLX5_QP_OPTPAR_PM_STATE
,
1966 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1969 [MLX5_QP_STATE_RTS
] = {
1970 [MLX5_QP_STATE_RTS
] = {
1971 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1972 MLX5_QP_OPTPAR_RAE
|
1973 MLX5_QP_OPTPAR_RWE
|
1974 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1975 MLX5_QP_OPTPAR_PM_STATE
|
1976 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1977 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1978 MLX5_QP_OPTPAR_PM_STATE
|
1979 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1980 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1981 MLX5_QP_OPTPAR_SRQN
|
1982 MLX5_QP_OPTPAR_CQN_RCV
,
1985 [MLX5_QP_STATE_SQER
] = {
1986 [MLX5_QP_STATE_RTS
] = {
1987 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1988 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1989 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
1990 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1991 MLX5_QP_OPTPAR_RWE
|
1992 MLX5_QP_OPTPAR_RAE
|
1998 static int ib_nr_to_mlx5_nr(int ib_mask
)
2003 case IB_QP_CUR_STATE
:
2005 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
2007 case IB_QP_ACCESS_FLAGS
:
2008 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
2010 case IB_QP_PKEY_INDEX
:
2011 return MLX5_QP_OPTPAR_PKEY_INDEX
;
2013 return MLX5_QP_OPTPAR_PRI_PORT
;
2015 return MLX5_QP_OPTPAR_Q_KEY
;
2017 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2018 MLX5_QP_OPTPAR_PRI_PORT
;
2019 case IB_QP_PATH_MTU
:
2022 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
2023 case IB_QP_RETRY_CNT
:
2024 return MLX5_QP_OPTPAR_RETRY_COUNT
;
2025 case IB_QP_RNR_RETRY
:
2026 return MLX5_QP_OPTPAR_RNR_RETRY
;
2029 case IB_QP_MAX_QP_RD_ATOMIC
:
2030 return MLX5_QP_OPTPAR_SRA_MAX
;
2031 case IB_QP_ALT_PATH
:
2032 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
2033 case IB_QP_MIN_RNR_TIMER
:
2034 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
2037 case IB_QP_MAX_DEST_RD_ATOMIC
:
2038 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
2039 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
2040 case IB_QP_PATH_MIG_STATE
:
2041 return MLX5_QP_OPTPAR_PM_STATE
;
2044 case IB_QP_DEST_QPN
:
2050 static int ib_mask_to_mlx5_opt(int ib_mask
)
2055 for (i
= 0; i
< 8 * sizeof(int); i
++) {
2056 if ((1 << i
) & ib_mask
)
2057 result
|= ib_nr_to_mlx5_nr(1 << i
);
2063 static int modify_raw_packet_qp_rq(struct mlx5_core_dev
*dev
,
2064 struct mlx5_ib_rq
*rq
, int new_state
)
2071 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
2072 in
= mlx5_vzalloc(inlen
);
2076 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
2078 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
2079 MLX5_SET(rqc
, rqc
, state
, new_state
);
2081 err
= mlx5_core_modify_rq(dev
, rq
->base
.mqp
.qpn
, in
, inlen
);
2085 rq
->state
= new_state
;
2092 static int modify_raw_packet_qp_sq(struct mlx5_core_dev
*dev
,
2093 struct mlx5_ib_sq
*sq
, int new_state
)
2100 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
2101 in
= mlx5_vzalloc(inlen
);
2105 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
2107 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
2108 MLX5_SET(sqc
, sqc
, state
, new_state
);
2110 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
2114 sq
->state
= new_state
;
2121 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2124 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
2125 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
2126 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
2131 switch (operation
) {
2132 case MLX5_CMD_OP_RST2INIT_QP
:
2133 rq_state
= MLX5_RQC_STATE_RDY
;
2134 sq_state
= MLX5_SQC_STATE_RDY
;
2136 case MLX5_CMD_OP_2ERR_QP
:
2137 rq_state
= MLX5_RQC_STATE_ERR
;
2138 sq_state
= MLX5_SQC_STATE_ERR
;
2140 case MLX5_CMD_OP_2RST_QP
:
2141 rq_state
= MLX5_RQC_STATE_RST
;
2142 sq_state
= MLX5_SQC_STATE_RST
;
2144 case MLX5_CMD_OP_INIT2INIT_QP
:
2145 case MLX5_CMD_OP_INIT2RTR_QP
:
2146 case MLX5_CMD_OP_RTR2RTS_QP
:
2147 case MLX5_CMD_OP_RTS2RTS_QP
:
2148 /* Nothing to do here... */
2155 if (qp
->rq
.wqe_cnt
) {
2156 err
= modify_raw_packet_qp_rq(dev
->mdev
, rq
, rq_state
);
2162 return modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
);
2167 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
2168 const struct ib_qp_attr
*attr
, int attr_mask
,
2169 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
2171 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
2172 [MLX5_QP_STATE_RST
] = {
2173 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2174 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2175 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
2177 [MLX5_QP_STATE_INIT
] = {
2178 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2179 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2180 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
2181 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
2183 [MLX5_QP_STATE_RTR
] = {
2184 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2185 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2186 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
2188 [MLX5_QP_STATE_RTS
] = {
2189 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2190 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2191 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
2193 [MLX5_QP_STATE_SQD
] = {
2194 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2195 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2197 [MLX5_QP_STATE_SQER
] = {
2198 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2199 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2200 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
2202 [MLX5_QP_STATE_ERR
] = {
2203 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
2204 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
2208 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2209 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2210 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
2211 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2212 struct mlx5_qp_context
*context
;
2213 struct mlx5_modify_qp_mbox_in
*in
;
2214 struct mlx5_ib_pd
*pd
;
2215 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
2216 enum mlx5_qp_optpar optpar
;
2222 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
2227 err
= to_mlx5_st(ibqp
->qp_type
);
2229 mlx5_ib_dbg(dev
, "unsupported qp type %d\n", ibqp
->qp_type
);
2233 context
->flags
= cpu_to_be32(err
<< 16);
2235 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
2236 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2238 switch (attr
->path_mig_state
) {
2239 case IB_MIG_MIGRATED
:
2240 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
2243 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
2246 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
2251 if (is_sqp(ibqp
->qp_type
)) {
2252 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
2253 } else if (ibqp
->qp_type
== IB_QPT_UD
||
2254 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
2255 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
2256 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2257 if (attr
->path_mtu
< IB_MTU_256
||
2258 attr
->path_mtu
> IB_MTU_4096
) {
2259 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
2263 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2264 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
2267 if (attr_mask
& IB_QP_DEST_QPN
)
2268 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2270 if (attr_mask
& IB_QP_PKEY_INDEX
)
2271 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
2273 /* todo implement counter_index functionality */
2275 if (is_sqp(ibqp
->qp_type
))
2276 context
->pri_path
.port
= qp
->port
;
2278 if (attr_mask
& IB_QP_PORT
)
2279 context
->pri_path
.port
= attr
->port_num
;
2281 if (attr_mask
& IB_QP_AV
) {
2282 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
2283 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
2284 attr_mask
, 0, attr
, false);
2289 if (attr_mask
& IB_QP_TIMEOUT
)
2290 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
2292 if (attr_mask
& IB_QP_ALT_PATH
) {
2293 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
2296 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
2303 get_cqs(qp
, &send_cq
, &recv_cq
);
2305 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
2306 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
2307 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
2308 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
2310 if (attr_mask
& IB_QP_RNR_RETRY
)
2311 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2313 if (attr_mask
& IB_QP_RETRY_CNT
)
2314 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2316 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2317 if (attr
->max_rd_atomic
)
2319 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2322 if (attr_mask
& IB_QP_SQ_PSN
)
2323 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2325 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2326 if (attr
->max_dest_rd_atomic
)
2328 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2331 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
2332 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
2334 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
2335 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2337 if (attr_mask
& IB_QP_RQ_PSN
)
2338 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2340 if (attr_mask
& IB_QP_QKEY
)
2341 context
->qkey
= cpu_to_be32(attr
->qkey
);
2343 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2344 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2346 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
2347 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
2352 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
2353 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
2355 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
2356 context
->deth_sqpn
= cpu_to_be32(1);
2358 mlx5_cur
= to_mlx5_state(cur_state
);
2359 mlx5_new
= to_mlx5_state(new_state
);
2360 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
2364 /* If moving to a reset or error state, we must disable page faults on
2365 * this QP and flush all current page faults. Otherwise a stale page
2366 * fault may attempt to work on this QP after it is reset and moved
2367 * again to RTS, and may cause the driver and the device to get out of
2369 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
2370 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
) &&
2371 (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
))
2372 mlx5_ib_qp_disable_pagefaults(qp
);
2374 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
2375 !optab
[mlx5_cur
][mlx5_new
])
2378 op
= optab
[mlx5_cur
][mlx5_new
];
2379 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
2380 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
2381 in
->optparam
= cpu_to_be32(optpar
);
2383 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
)
2384 err
= modify_raw_packet_qp(dev
, qp
, op
);
2386 err
= mlx5_core_qp_modify(dev
->mdev
, op
, in
, sqd_event
,
2391 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
&&
2392 (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
))
2393 mlx5_ib_qp_enable_pagefaults(qp
);
2395 qp
->state
= new_state
;
2397 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2398 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
2399 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2400 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
2401 if (attr_mask
& IB_QP_PORT
)
2402 qp
->port
= attr
->port_num
;
2403 if (attr_mask
& IB_QP_ALT_PATH
)
2404 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
2407 * If we moved a kernel QP to RESET, clean up all old CQ
2408 * entries and reinitialize the QP.
2410 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
2411 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2412 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
2413 if (send_cq
!= recv_cq
)
2414 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
2420 qp
->sq
.cur_post
= 0;
2421 qp
->sq
.last_poll
= 0;
2422 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
2423 qp
->db
.db
[MLX5_SND_DBR
] = 0;
2431 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2432 int attr_mask
, struct ib_udata
*udata
)
2434 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2435 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2436 enum ib_qp_type qp_type
;
2437 enum ib_qp_state cur_state
, new_state
;
2440 enum rdma_link_layer ll
= IB_LINK_LAYER_UNSPECIFIED
;
2442 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
2443 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
2445 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
2446 IB_QPT_GSI
: ibqp
->qp_type
;
2448 mutex_lock(&qp
->mutex
);
2450 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
2451 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
2453 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
2454 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2455 ll
= dev
->ib_dev
.get_link_layer(&dev
->ib_dev
, port
);
2458 if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
2459 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
, attr_mask
, ll
)) {
2460 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
2461 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
2465 if ((attr_mask
& IB_QP_PORT
) &&
2466 (attr
->port_num
== 0 ||
2467 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
))) {
2468 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
2469 attr
->port_num
, dev
->num_ports
);
2473 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2474 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2475 if (attr
->pkey_index
>=
2476 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
2477 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
2483 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
2484 attr
->max_rd_atomic
>
2485 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
2486 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
2487 attr
->max_rd_atomic
);
2491 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
2492 attr
->max_dest_rd_atomic
>
2493 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
2494 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
2495 attr
->max_dest_rd_atomic
);
2499 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2504 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
2507 mutex_unlock(&qp
->mutex
);
2511 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
2513 struct mlx5_ib_cq
*cq
;
2516 cur
= wq
->head
- wq
->tail
;
2517 if (likely(cur
+ nreq
< wq
->max_post
))
2521 spin_lock(&cq
->lock
);
2522 cur
= wq
->head
- wq
->tail
;
2523 spin_unlock(&cq
->lock
);
2525 return cur
+ nreq
>= wq
->max_post
;
2528 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
2529 u64 remote_addr
, u32 rkey
)
2531 rseg
->raddr
= cpu_to_be64(remote_addr
);
2532 rseg
->rkey
= cpu_to_be32(rkey
);
2536 static void *set_eth_seg(struct mlx5_wqe_eth_seg
*eseg
,
2537 struct ib_send_wr
*wr
, void *qend
,
2538 struct mlx5_ib_qp
*qp
, int *size
)
2542 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
2544 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
2545 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
2546 MLX5_ETH_WQE_L4_CSUM
;
2548 seg
+= sizeof(struct mlx5_wqe_eth_seg
);
2549 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
2551 if (wr
->opcode
== IB_WR_LSO
) {
2552 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
2553 int size_of_inl_hdr_start
= sizeof(eseg
->inline_hdr_start
);
2554 u64 left
, leftlen
, copysz
;
2555 void *pdata
= ud_wr
->header
;
2558 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
2559 eseg
->inline_hdr_sz
= cpu_to_be16(left
);
2562 * check if there is space till the end of queue, if yes,
2563 * copy all in one shot, otherwise copy till the end of queue,
2564 * rollback and than the copy the left
2566 leftlen
= qend
- (void *)eseg
->inline_hdr_start
;
2567 copysz
= min_t(u64
, leftlen
, left
);
2569 memcpy(seg
- size_of_inl_hdr_start
, pdata
, copysz
);
2571 if (likely(copysz
> size_of_inl_hdr_start
)) {
2572 seg
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16);
2573 *size
+= ALIGN(copysz
- size_of_inl_hdr_start
, 16) / 16;
2576 if (unlikely(copysz
< left
)) { /* the last wqe in the queue */
2577 seg
= mlx5_get_send_wqe(qp
, 0);
2580 memcpy(seg
, pdata
, left
);
2581 seg
+= ALIGN(left
, 16);
2582 *size
+= ALIGN(left
, 16) / 16;
2589 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
2590 struct ib_send_wr
*wr
)
2592 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
2593 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
2594 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
2597 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2599 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2600 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2601 dseg
->addr
= cpu_to_be64(sg
->addr
);
2604 static __be16
get_klm_octo(int npages
)
2606 return cpu_to_be16(ALIGN(npages
, 8) / 2);
2609 static __be64
frwr_mkey_mask(void)
2613 result
= MLX5_MKEY_MASK_LEN
|
2614 MLX5_MKEY_MASK_PAGE_SIZE
|
2615 MLX5_MKEY_MASK_START_ADDR
|
2616 MLX5_MKEY_MASK_EN_RINVAL
|
2617 MLX5_MKEY_MASK_KEY
|
2623 MLX5_MKEY_MASK_SMALL_FENCE
|
2624 MLX5_MKEY_MASK_FREE
;
2626 return cpu_to_be64(result
);
2629 static __be64
sig_mkey_mask(void)
2633 result
= MLX5_MKEY_MASK_LEN
|
2634 MLX5_MKEY_MASK_PAGE_SIZE
|
2635 MLX5_MKEY_MASK_START_ADDR
|
2636 MLX5_MKEY_MASK_EN_SIGERR
|
2637 MLX5_MKEY_MASK_EN_RINVAL
|
2638 MLX5_MKEY_MASK_KEY
|
2643 MLX5_MKEY_MASK_SMALL_FENCE
|
2644 MLX5_MKEY_MASK_FREE
|
2645 MLX5_MKEY_MASK_BSF_EN
;
2647 return cpu_to_be64(result
);
2650 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
2651 struct mlx5_ib_mr
*mr
)
2653 int ndescs
= mr
->ndescs
;
2655 memset(umr
, 0, sizeof(*umr
));
2657 if (mr
->access_mode
== MLX5_ACCESS_MODE_KLM
)
2658 /* KLMs take twice the size of MTTs */
2661 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
2662 umr
->klm_octowords
= get_klm_octo(ndescs
);
2663 umr
->mkey_mask
= frwr_mkey_mask();
2666 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
2668 memset(umr
, 0, sizeof(*umr
));
2669 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
2670 umr
->flags
= 1 << 7;
2673 static __be64
get_umr_reg_mr_mask(void)
2677 result
= MLX5_MKEY_MASK_LEN
|
2678 MLX5_MKEY_MASK_PAGE_SIZE
|
2679 MLX5_MKEY_MASK_START_ADDR
|
2683 MLX5_MKEY_MASK_KEY
|
2687 MLX5_MKEY_MASK_FREE
;
2689 return cpu_to_be64(result
);
2692 static __be64
get_umr_unreg_mr_mask(void)
2696 result
= MLX5_MKEY_MASK_FREE
;
2698 return cpu_to_be64(result
);
2701 static __be64
get_umr_update_mtt_mask(void)
2705 result
= MLX5_MKEY_MASK_FREE
;
2707 return cpu_to_be64(result
);
2710 static __be64
get_umr_update_translation_mask(void)
2714 result
= MLX5_MKEY_MASK_LEN
|
2715 MLX5_MKEY_MASK_PAGE_SIZE
|
2716 MLX5_MKEY_MASK_START_ADDR
|
2717 MLX5_MKEY_MASK_KEY
|
2718 MLX5_MKEY_MASK_FREE
;
2720 return cpu_to_be64(result
);
2723 static __be64
get_umr_update_access_mask(void)
2727 result
= MLX5_MKEY_MASK_LW
|
2731 MLX5_MKEY_MASK_KEY
|
2732 MLX5_MKEY_MASK_FREE
;
2734 return cpu_to_be64(result
);
2737 static __be64
get_umr_update_pd_mask(void)
2741 result
= MLX5_MKEY_MASK_PD
|
2742 MLX5_MKEY_MASK_KEY
|
2743 MLX5_MKEY_MASK_FREE
;
2745 return cpu_to_be64(result
);
2748 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
2749 struct ib_send_wr
*wr
)
2751 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
2753 memset(umr
, 0, sizeof(*umr
));
2755 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
2756 umr
->flags
= MLX5_UMR_CHECK_FREE
; /* fail if free */
2758 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
; /* fail if not free */
2760 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
2761 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
2762 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_MTT
) {
2763 umr
->mkey_mask
= get_umr_update_mtt_mask();
2764 umr
->bsf_octowords
= get_klm_octo(umrwr
->target
.offset
);
2765 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
2767 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
2768 umr
->mkey_mask
|= get_umr_update_translation_mask();
2769 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_ACCESS
)
2770 umr
->mkey_mask
|= get_umr_update_access_mask();
2771 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD
)
2772 umr
->mkey_mask
|= get_umr_update_pd_mask();
2773 if (!umr
->mkey_mask
)
2774 umr
->mkey_mask
= get_umr_reg_mr_mask();
2776 umr
->mkey_mask
= get_umr_unreg_mr_mask();
2780 umr
->flags
|= MLX5_UMR_INLINE
;
2783 static u8
get_umr_flags(int acc
)
2785 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
2786 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
2787 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
2788 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
2789 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
2792 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
2793 struct mlx5_ib_mr
*mr
,
2794 u32 key
, int access
)
2796 int ndescs
= ALIGN(mr
->ndescs
, 8) >> 1;
2798 memset(seg
, 0, sizeof(*seg
));
2800 if (mr
->access_mode
== MLX5_ACCESS_MODE_MTT
)
2801 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
2802 else if (mr
->access_mode
== MLX5_ACCESS_MODE_KLM
)
2803 /* KLMs take twice the size of MTTs */
2806 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
2807 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
2808 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
2809 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
2810 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
2811 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
2814 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
2816 memset(seg
, 0, sizeof(*seg
));
2817 seg
->status
= MLX5_MKEY_STATUS_FREE
;
2820 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
2822 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
2824 memset(seg
, 0, sizeof(*seg
));
2825 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
2826 seg
->status
= MLX5_MKEY_STATUS_FREE
;
2830 seg
->flags
= convert_access(umrwr
->access_flags
);
2831 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_MTT
)) {
2833 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
2834 seg
->start_addr
= cpu_to_be64(umrwr
->target
.virt_addr
);
2836 seg
->len
= cpu_to_be64(umrwr
->length
);
2837 seg
->log2_page_size
= umrwr
->page_shift
;
2838 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
2839 mlx5_mkey_variant(umrwr
->mkey
));
2842 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
2843 struct mlx5_ib_mr
*mr
,
2844 struct mlx5_ib_pd
*pd
)
2846 int bcount
= mr
->desc_size
* mr
->ndescs
;
2848 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
2849 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
2850 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
2853 static __be32
send_ieth(struct ib_send_wr
*wr
)
2855 switch (wr
->opcode
) {
2856 case IB_WR_SEND_WITH_IMM
:
2857 case IB_WR_RDMA_WRITE_WITH_IMM
:
2858 return wr
->ex
.imm_data
;
2860 case IB_WR_SEND_WITH_INV
:
2861 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2868 static u8
calc_sig(void *wqe
, int size
)
2874 for (i
= 0; i
< size
; i
++)
2880 static u8
wq_sig(void *wqe
)
2882 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
2885 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
2888 struct mlx5_wqe_inline_seg
*seg
;
2889 void *qend
= qp
->sq
.qend
;
2897 wqe
+= sizeof(*seg
);
2898 for (i
= 0; i
< wr
->num_sge
; i
++) {
2899 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
2900 len
= wr
->sg_list
[i
].length
;
2903 if (unlikely(inl
> qp
->max_inline_data
))
2906 if (unlikely(wqe
+ len
> qend
)) {
2908 memcpy(wqe
, addr
, copy
);
2911 wqe
= mlx5_get_send_wqe(qp
, 0);
2913 memcpy(wqe
, addr
, len
);
2917 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
2919 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
2924 static u16
prot_field_size(enum ib_signature_type type
)
2927 case IB_SIG_TYPE_T10_DIF
:
2928 return MLX5_DIF_SIZE
;
2934 static u8
bs_selector(int block_size
)
2936 switch (block_size
) {
2937 case 512: return 0x1;
2938 case 520: return 0x2;
2939 case 4096: return 0x3;
2940 case 4160: return 0x4;
2941 case 1073741824: return 0x5;
2946 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
2947 struct mlx5_bsf_inl
*inl
)
2949 /* Valid inline section and allow BSF refresh */
2950 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
2951 MLX5_BSF_REFRESH_DIF
);
2952 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
2953 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
2954 /* repeating block */
2955 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
2956 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
2957 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
2959 if (domain
->sig
.dif
.ref_remap
)
2960 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
2962 if (domain
->sig
.dif
.app_escape
) {
2963 if (domain
->sig
.dif
.ref_escape
)
2964 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
2966 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
2969 inl
->dif_app_bitmask_check
=
2970 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
2973 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
2974 struct ib_sig_attrs
*sig_attrs
,
2975 struct mlx5_bsf
*bsf
, u32 data_size
)
2977 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
2978 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
2979 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
2980 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
2982 memset(bsf
, 0, sizeof(*bsf
));
2984 /* Basic + Extended + Inline */
2985 basic
->bsf_size_sbs
= 1 << 7;
2986 /* Input domain check byte mask */
2987 basic
->check_byte_mask
= sig_attrs
->check_mask
;
2988 basic
->raw_data_size
= cpu_to_be32(data_size
);
2991 switch (sig_attrs
->mem
.sig_type
) {
2992 case IB_SIG_TYPE_NONE
:
2994 case IB_SIG_TYPE_T10_DIF
:
2995 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
2996 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
2997 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
3004 switch (sig_attrs
->wire
.sig_type
) {
3005 case IB_SIG_TYPE_NONE
:
3007 case IB_SIG_TYPE_T10_DIF
:
3008 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
3009 mem
->sig_type
== wire
->sig_type
) {
3010 /* Same block structure */
3011 basic
->bsf_size_sbs
|= 1 << 4;
3012 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
3013 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
3014 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
3015 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
3016 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
3017 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
3019 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
3021 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
3022 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
3031 static int set_sig_data_segment(struct ib_sig_handover_wr
*wr
,
3032 struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3034 struct ib_sig_attrs
*sig_attrs
= wr
->sig_attrs
;
3035 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3036 struct mlx5_bsf
*bsf
;
3037 u32 data_len
= wr
->wr
.sg_list
->length
;
3038 u32 data_key
= wr
->wr
.sg_list
->lkey
;
3039 u64 data_va
= wr
->wr
.sg_list
->addr
;
3044 (data_key
== wr
->prot
->lkey
&&
3045 data_va
== wr
->prot
->addr
&&
3046 data_len
== wr
->prot
->length
)) {
3048 * Source domain doesn't contain signature information
3049 * or data and protection are interleaved in memory.
3050 * So need construct:
3051 * ------------------
3053 * ------------------
3055 * ------------------
3057 struct mlx5_klm
*data_klm
= *seg
;
3059 data_klm
->bcount
= cpu_to_be32(data_len
);
3060 data_klm
->key
= cpu_to_be32(data_key
);
3061 data_klm
->va
= cpu_to_be64(data_va
);
3062 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
3065 * Source domain contains signature information
3066 * So need construct a strided block format:
3067 * ---------------------------
3068 * | stride_block_ctrl |
3069 * ---------------------------
3071 * ---------------------------
3073 * ---------------------------
3075 * ---------------------------
3077 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
3078 struct mlx5_stride_block_entry
*data_sentry
;
3079 struct mlx5_stride_block_entry
*prot_sentry
;
3080 u32 prot_key
= wr
->prot
->lkey
;
3081 u64 prot_va
= wr
->prot
->addr
;
3082 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
3086 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
3087 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
3089 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
3091 pr_err("Bad block size given: %u\n", block_size
);
3094 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
3096 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
3097 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
3098 sblock_ctrl
->num_entries
= cpu_to_be16(2);
3100 data_sentry
->bcount
= cpu_to_be16(block_size
);
3101 data_sentry
->key
= cpu_to_be32(data_key
);
3102 data_sentry
->va
= cpu_to_be64(data_va
);
3103 data_sentry
->stride
= cpu_to_be16(block_size
);
3105 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
3106 prot_sentry
->key
= cpu_to_be32(prot_key
);
3107 prot_sentry
->va
= cpu_to_be64(prot_va
);
3108 prot_sentry
->stride
= cpu_to_be16(prot_size
);
3110 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
3111 sizeof(*prot_sentry
), 64);
3115 *size
+= wqe_size
/ 16;
3116 if (unlikely((*seg
== qp
->sq
.qend
)))
3117 *seg
= mlx5_get_send_wqe(qp
, 0);
3120 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
3124 *seg
+= sizeof(*bsf
);
3125 *size
+= sizeof(*bsf
) / 16;
3126 if (unlikely((*seg
== qp
->sq
.qend
)))
3127 *seg
= mlx5_get_send_wqe(qp
, 0);
3132 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
3133 struct ib_sig_handover_wr
*wr
, u32 nelements
,
3134 u32 length
, u32 pdn
)
3136 struct ib_mr
*sig_mr
= wr
->sig_mr
;
3137 u32 sig_key
= sig_mr
->rkey
;
3138 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
3140 memset(seg
, 0, sizeof(*seg
));
3142 seg
->flags
= get_umr_flags(wr
->access_flags
) |
3143 MLX5_ACCESS_MODE_KLM
;
3144 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
3145 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
3146 MLX5_MKEY_BSF_EN
| pdn
);
3147 seg
->len
= cpu_to_be64(length
);
3148 seg
->xlt_oct_size
= cpu_to_be32(be16_to_cpu(get_klm_octo(nelements
)));
3149 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
3152 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
3155 memset(umr
, 0, sizeof(*umr
));
3157 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
3158 umr
->klm_octowords
= get_klm_octo(nelements
);
3159 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
3160 umr
->mkey_mask
= sig_mkey_mask();
3164 static int set_sig_umr_wr(struct ib_send_wr
*send_wr
, struct mlx5_ib_qp
*qp
,
3165 void **seg
, int *size
)
3167 struct ib_sig_handover_wr
*wr
= sig_handover_wr(send_wr
);
3168 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->sig_mr
);
3169 u32 pdn
= get_pd(qp
)->pdn
;
3171 int region_len
, ret
;
3173 if (unlikely(wr
->wr
.num_sge
!= 1) ||
3174 unlikely(wr
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
) ||
3175 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
3176 unlikely(!sig_mr
->sig
->sig_status_checked
))
3179 /* length of the protected region, data + protection */
3180 region_len
= wr
->wr
.sg_list
->length
;
3182 (wr
->prot
->lkey
!= wr
->wr
.sg_list
->lkey
||
3183 wr
->prot
->addr
!= wr
->wr
.sg_list
->addr
||
3184 wr
->prot
->length
!= wr
->wr
.sg_list
->length
))
3185 region_len
+= wr
->prot
->length
;
3188 * KLM octoword size - if protection was provided
3189 * then we use strided block format (3 octowords),
3190 * else we use single KLM (1 octoword)
3192 klm_oct_size
= wr
->prot
? 3 : 1;
3194 set_sig_umr_segment(*seg
, klm_oct_size
);
3195 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3196 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3197 if (unlikely((*seg
== qp
->sq
.qend
)))
3198 *seg
= mlx5_get_send_wqe(qp
, 0);
3200 set_sig_mkey_segment(*seg
, wr
, klm_oct_size
, region_len
, pdn
);
3201 *seg
+= sizeof(struct mlx5_mkey_seg
);
3202 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3203 if (unlikely((*seg
== qp
->sq
.qend
)))
3204 *seg
= mlx5_get_send_wqe(qp
, 0);
3206 ret
= set_sig_data_segment(wr
, qp
, seg
, size
);
3210 sig_mr
->sig
->sig_status_checked
= false;
3214 static int set_psv_wr(struct ib_sig_domain
*domain
,
3215 u32 psv_idx
, void **seg
, int *size
)
3217 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
3219 memset(psv_seg
, 0, sizeof(*psv_seg
));
3220 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
3221 switch (domain
->sig_type
) {
3222 case IB_SIG_TYPE_NONE
:
3224 case IB_SIG_TYPE_T10_DIF
:
3225 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
3226 domain
->sig
.dif
.app_tag
);
3227 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
3230 pr_err("Bad signature type given.\n");
3234 *seg
+= sizeof(*psv_seg
);
3235 *size
+= sizeof(*psv_seg
) / 16;
3240 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
3241 struct ib_reg_wr
*wr
,
3242 void **seg
, int *size
)
3244 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
3245 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
3247 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
3248 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
3249 "Invalid IB_SEND_INLINE send flag\n");
3253 set_reg_umr_seg(*seg
, mr
);
3254 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3255 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3256 if (unlikely((*seg
== qp
->sq
.qend
)))
3257 *seg
= mlx5_get_send_wqe(qp
, 0);
3259 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
3260 *seg
+= sizeof(struct mlx5_mkey_seg
);
3261 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3262 if (unlikely((*seg
== qp
->sq
.qend
)))
3263 *seg
= mlx5_get_send_wqe(qp
, 0);
3265 set_reg_data_seg(*seg
, mr
, pd
);
3266 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
3267 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
3272 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
)
3274 set_linv_umr_seg(*seg
);
3275 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3276 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3277 if (unlikely((*seg
== qp
->sq
.qend
)))
3278 *seg
= mlx5_get_send_wqe(qp
, 0);
3279 set_linv_mkey_seg(*seg
);
3280 *seg
+= sizeof(struct mlx5_mkey_seg
);
3281 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3282 if (unlikely((*seg
== qp
->sq
.qend
)))
3283 *seg
= mlx5_get_send_wqe(qp
, 0);
3286 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
3292 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
3293 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
3294 if ((i
& 0xf) == 0) {
3295 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
3296 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
3300 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
3301 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
3302 be32_to_cpu(p
[j
+ 3]));
3306 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
3307 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
3309 while (bytecnt
> 0) {
3310 __iowrite64_copy(dst
++, src
++, 8);
3311 __iowrite64_copy(dst
++, src
++, 8);
3312 __iowrite64_copy(dst
++, src
++, 8);
3313 __iowrite64_copy(dst
++, src
++, 8);
3314 __iowrite64_copy(dst
++, src
++, 8);
3315 __iowrite64_copy(dst
++, src
++, 8);
3316 __iowrite64_copy(dst
++, src
++, 8);
3317 __iowrite64_copy(dst
++, src
++, 8);
3319 if (unlikely(src
== qp
->sq
.qend
))
3320 src
= mlx5_get_send_wqe(qp
, 0);
3324 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
3326 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
3327 wr
->send_flags
& IB_SEND_FENCE
))
3328 return MLX5_FENCE_MODE_STRONG_ORDERING
;
3330 if (unlikely(fence
)) {
3331 if (wr
->send_flags
& IB_SEND_FENCE
)
3332 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
3335 } else if (unlikely(wr
->send_flags
& IB_SEND_FENCE
)) {
3336 return MLX5_FENCE_MODE_FENCE
;
3342 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
3343 struct mlx5_wqe_ctrl_seg
**ctrl
,
3344 struct ib_send_wr
*wr
, unsigned *idx
,
3345 int *size
, int nreq
)
3349 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
3354 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
3355 *seg
= mlx5_get_send_wqe(qp
, *idx
);
3357 *(uint32_t *)(*seg
+ 8) = 0;
3358 (*ctrl
)->imm
= send_ieth(wr
);
3359 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
3360 (wr
->send_flags
& IB_SEND_SIGNALED
?
3361 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
3362 (wr
->send_flags
& IB_SEND_SOLICITED
?
3363 MLX5_WQE_CTRL_SOLICITED
: 0);
3365 *seg
+= sizeof(**ctrl
);
3366 *size
= sizeof(**ctrl
) / 16;
3371 static void finish_wqe(struct mlx5_ib_qp
*qp
,
3372 struct mlx5_wqe_ctrl_seg
*ctrl
,
3373 u8 size
, unsigned idx
, u64 wr_id
,
3374 int nreq
, u8 fence
, u8 next_fence
,
3379 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
3380 mlx5_opcode
| ((u32
)opmod
<< 24));
3381 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
3382 ctrl
->fm_ce_se
|= fence
;
3383 qp
->fm_cache
= next_fence
;
3384 if (unlikely(qp
->wq_sig
))
3385 ctrl
->signature
= wq_sig(ctrl
);
3387 qp
->sq
.wrid
[idx
] = wr_id
;
3388 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
3389 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
3390 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
3391 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
3395 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
3396 struct ib_send_wr
**bad_wr
)
3398 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
3399 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3400 struct mlx5_ib_qp
*qp
;
3401 struct mlx5_ib_mr
*mr
;
3402 struct mlx5_wqe_data_seg
*dpseg
;
3403 struct mlx5_wqe_xrc_seg
*xrc
;
3405 int uninitialized_var(size
);
3407 unsigned long flags
;
3418 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3419 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
3425 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3427 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
3428 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
3429 mlx5_ib_warn(dev
, "\n");
3435 fence
= qp
->fm_cache
;
3436 num_sge
= wr
->num_sge
;
3437 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
3438 mlx5_ib_warn(dev
, "\n");
3444 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, nreq
);
3446 mlx5_ib_warn(dev
, "\n");
3452 switch (ibqp
->qp_type
) {
3453 case IB_QPT_XRC_INI
:
3455 seg
+= sizeof(*xrc
);
3456 size
+= sizeof(*xrc
) / 16;
3459 switch (wr
->opcode
) {
3460 case IB_WR_RDMA_READ
:
3461 case IB_WR_RDMA_WRITE
:
3462 case IB_WR_RDMA_WRITE_WITH_IMM
:
3463 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
3465 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
3466 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
3469 case IB_WR_ATOMIC_CMP_AND_SWP
:
3470 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3471 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
3472 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
3477 case IB_WR_LOCAL_INV
:
3478 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3479 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
3480 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
3481 set_linv_wr(qp
, &seg
, &size
);
3486 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3487 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
3488 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
3489 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
);
3497 case IB_WR_REG_SIG_MR
:
3498 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
3499 mr
= to_mmr(sig_handover_wr(wr
)->sig_mr
);
3501 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
3502 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
);
3504 mlx5_ib_warn(dev
, "\n");
3509 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
3510 nreq
, get_fence(fence
, wr
),
3511 next_fence
, MLX5_OPCODE_UMR
);
3513 * SET_PSV WQEs are not signaled and solicited
3516 wr
->send_flags
&= ~IB_SEND_SIGNALED
;
3517 wr
->send_flags
|= IB_SEND_SOLICITED
;
3518 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
3521 mlx5_ib_warn(dev
, "\n");
3527 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->mem
,
3528 mr
->sig
->psv_memory
.psv_idx
, &seg
,
3531 mlx5_ib_warn(dev
, "\n");
3536 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
3537 nreq
, get_fence(fence
, wr
),
3538 next_fence
, MLX5_OPCODE_SET_PSV
);
3539 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
3542 mlx5_ib_warn(dev
, "\n");
3548 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
3549 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->wire
,
3550 mr
->sig
->psv_wire
.psv_idx
, &seg
,
3553 mlx5_ib_warn(dev
, "\n");
3558 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
,
3559 nreq
, get_fence(fence
, wr
),
3560 next_fence
, MLX5_OPCODE_SET_PSV
);
3570 switch (wr
->opcode
) {
3571 case IB_WR_RDMA_WRITE
:
3572 case IB_WR_RDMA_WRITE_WITH_IMM
:
3573 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
3575 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
3576 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
3585 case MLX5_IB_QPT_HW_GSI
:
3586 set_datagram_seg(seg
, wr
);
3587 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
3588 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
3589 if (unlikely((seg
== qend
)))
3590 seg
= mlx5_get_send_wqe(qp
, 0);
3593 set_datagram_seg(seg
, wr
);
3594 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
3595 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
3597 if (unlikely((seg
== qend
)))
3598 seg
= mlx5_get_send_wqe(qp
, 0);
3600 /* handle qp that supports ud offload */
3601 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
3602 struct mlx5_wqe_eth_pad
*pad
;
3605 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
3606 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
3607 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
3609 seg
= set_eth_seg(seg
, wr
, qend
, qp
, &size
);
3611 if (unlikely((seg
== qend
)))
3612 seg
= mlx5_get_send_wqe(qp
, 0);
3615 case MLX5_IB_QPT_REG_UMR
:
3616 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
3618 mlx5_ib_warn(dev
, "bad opcode\n");
3621 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
3622 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
3623 set_reg_umr_segment(seg
, wr
);
3624 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
3625 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
3626 if (unlikely((seg
== qend
)))
3627 seg
= mlx5_get_send_wqe(qp
, 0);
3628 set_reg_mkey_segment(seg
, wr
);
3629 seg
+= sizeof(struct mlx5_mkey_seg
);
3630 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
3631 if (unlikely((seg
== qend
)))
3632 seg
= mlx5_get_send_wqe(qp
, 0);
3639 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
3640 int uninitialized_var(sz
);
3642 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
3643 if (unlikely(err
)) {
3644 mlx5_ib_warn(dev
, "\n");
3652 for (i
= 0; i
< num_sge
; i
++) {
3653 if (unlikely(dpseg
== qend
)) {
3654 seg
= mlx5_get_send_wqe(qp
, 0);
3657 if (likely(wr
->sg_list
[i
].length
)) {
3658 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
3659 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
3665 finish_wqe(qp
, ctrl
, size
, idx
, wr
->wr_id
, nreq
,
3666 get_fence(fence
, wr
), next_fence
,
3667 mlx5_ib_opcode
[wr
->opcode
]);
3670 dump_wqe(qp
, idx
, size
);
3675 qp
->sq
.head
+= nreq
;
3677 /* Make sure that descriptors are written before
3678 * updating doorbell record and ringing the doorbell
3682 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
3684 /* Make sure doorbell record is visible to the HCA before
3685 * we hit doorbell */
3689 spin_lock(&bf
->lock
);
3691 __acquire(&bf
->lock
);
3694 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
3695 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
3698 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
3699 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
3700 /* Make sure doorbells don't leak out of SQ spinlock
3701 * and reach the HCA out of order.
3705 bf
->offset
^= bf
->buf_size
;
3707 spin_unlock(&bf
->lock
);
3709 __release(&bf
->lock
);
3712 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
3717 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
3719 sig
->signature
= calc_sig(sig
, size
);
3722 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
3723 struct ib_recv_wr
**bad_wr
)
3725 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3726 struct mlx5_wqe_data_seg
*scat
;
3727 struct mlx5_rwqe_sig
*sig
;
3728 unsigned long flags
;
3734 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3735 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
3737 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
3739 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
3741 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
3742 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
3748 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
3754 scat
= get_recv_wqe(qp
, ind
);
3758 for (i
= 0; i
< wr
->num_sge
; i
++)
3759 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
3761 if (i
< qp
->rq
.max_gs
) {
3762 scat
[i
].byte_count
= 0;
3763 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
3768 sig
= (struct mlx5_rwqe_sig
*)scat
;
3769 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
3772 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
3774 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
3779 qp
->rq
.head
+= nreq
;
3781 /* Make sure that descriptors are written before
3786 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
3789 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
3794 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
3796 switch (mlx5_state
) {
3797 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
3798 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
3799 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
3800 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
3801 case MLX5_QP_STATE_SQ_DRAINING
:
3802 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
3803 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
3804 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
3809 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
3811 switch (mlx5_mig_state
) {
3812 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
3813 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
3814 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
3819 static int to_ib_qp_access_flags(int mlx5_flags
)
3823 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
3824 ib_flags
|= IB_ACCESS_REMOTE_READ
;
3825 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
3826 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
3827 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
3828 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
3833 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
3834 struct mlx5_qp_path
*path
)
3836 struct mlx5_core_dev
*dev
= ibdev
->mdev
;
3838 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
3839 ib_ah_attr
->port_num
= path
->port
;
3841 if (ib_ah_attr
->port_num
== 0 ||
3842 ib_ah_attr
->port_num
> MLX5_CAP_GEN(dev
, num_ports
))
3845 ib_ah_attr
->sl
= path
->dci_cfi_prio_sl
& 0xf;
3847 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
3848 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
3849 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
3850 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
3851 if (ib_ah_attr
->ah_flags
) {
3852 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
3853 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
3854 ib_ah_attr
->grh
.traffic_class
=
3855 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
3856 ib_ah_attr
->grh
.flow_label
=
3857 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
3858 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
3859 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
3863 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
3864 struct mlx5_ib_sq
*sq
,
3872 inlen
= MLX5_ST_SZ_BYTES(query_sq_out
);
3873 out
= mlx5_vzalloc(inlen
);
3877 err
= mlx5_core_query_sq(dev
->mdev
, sq
->base
.mqp
.qpn
, out
);
3881 sqc
= MLX5_ADDR_OF(query_sq_out
, out
, sq_context
);
3882 *sq_state
= MLX5_GET(sqc
, sqc
, state
);
3883 sq
->state
= *sq_state
;
3890 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
3891 struct mlx5_ib_rq
*rq
,
3899 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
3900 out
= mlx5_vzalloc(inlen
);
3904 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
3908 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
3909 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
3910 rq
->state
= *rq_state
;
3917 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
3918 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
3920 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
3921 [MLX5_RQC_STATE_RST
] = {
3922 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
3923 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
3924 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
3925 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
3927 [MLX5_RQC_STATE_RDY
] = {
3928 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
3929 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
3930 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
3931 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
3933 [MLX5_RQC_STATE_ERR
] = {
3934 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
3935 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
3936 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
3937 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
3939 [MLX5_RQ_STATE_NA
] = {
3940 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
3941 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
3942 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
3943 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
3947 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
3949 if (*qp_state
== MLX5_QP_STATE_BAD
) {
3950 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
3951 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
3952 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
3956 if (*qp_state
== MLX5_QP_STATE
)
3957 *qp_state
= qp
->state
;
3962 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
3963 struct mlx5_ib_qp
*qp
,
3964 u8
*raw_packet_qp_state
)
3966 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
3967 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
3968 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
3970 u8 sq_state
= MLX5_SQ_STATE_NA
;
3971 u8 rq_state
= MLX5_RQ_STATE_NA
;
3973 if (qp
->sq
.wqe_cnt
) {
3974 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
3979 if (qp
->rq
.wqe_cnt
) {
3980 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
3985 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
3986 raw_packet_qp_state
);
3989 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
3990 struct ib_qp_attr
*qp_attr
)
3992 struct mlx5_query_qp_mbox_out
*outb
;
3993 struct mlx5_qp_context
*context
;
3997 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
4001 context
= &outb
->ctx
;
4002 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
4007 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
4009 qp
->state
= to_ib_qp_state(mlx5_state
);
4010 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
4011 qp_attr
->path_mig_state
=
4012 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
4013 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
4014 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
4015 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
4016 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
4017 qp_attr
->qp_access_flags
=
4018 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
4020 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4021 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
4022 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
4023 qp_attr
->alt_pkey_index
=
4024 be16_to_cpu(context
->alt_path
.pkey_index
);
4025 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
4028 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
4029 qp_attr
->port_num
= context
->pri_path
.port
;
4031 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4032 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
4034 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
4036 qp_attr
->max_dest_rd_atomic
=
4037 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
4038 qp_attr
->min_rnr_timer
=
4039 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
4040 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
4041 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
4042 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
4043 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
4050 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
4051 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
4053 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4054 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
4056 u8 raw_packet_qp_state
;
4058 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4059 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
4062 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4064 * Wait for any outstanding page faults, in case the user frees memory
4065 * based upon this query's result.
4067 flush_workqueue(mlx5_ib_page_fault_wq
);
4070 mutex_lock(&qp
->mutex
);
4072 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) {
4073 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
4076 qp
->state
= raw_packet_qp_state
;
4077 qp_attr
->port_num
= 1;
4079 err
= query_qp_attr(dev
, qp
, qp_attr
);
4084 qp_attr
->qp_state
= qp
->state
;
4085 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4086 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4087 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4089 if (!ibqp
->uobject
) {
4090 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
4091 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4092 qp_init_attr
->qp_context
= ibqp
->qp_context
;
4094 qp_attr
->cap
.max_send_wr
= 0;
4095 qp_attr
->cap
.max_send_sge
= 0;
4098 qp_init_attr
->qp_type
= ibqp
->qp_type
;
4099 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
4100 qp_init_attr
->send_cq
= ibqp
->send_cq
;
4101 qp_init_attr
->srq
= ibqp
->srq
;
4102 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
4104 qp_init_attr
->cap
= qp_attr
->cap
;
4106 qp_init_attr
->create_flags
= 0;
4107 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4108 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4110 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
4111 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
4112 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
4113 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
4114 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
4115 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
4116 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
4117 qp_init_attr
->create_flags
|= mlx5_ib_create_qp_sqpn_qp1();
4119 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
4120 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4123 mutex_unlock(&qp
->mutex
);
4127 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
4128 struct ib_ucontext
*context
,
4129 struct ib_udata
*udata
)
4131 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
4132 struct mlx5_ib_xrcd
*xrcd
;
4135 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
4136 return ERR_PTR(-ENOSYS
);
4138 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
4140 return ERR_PTR(-ENOMEM
);
4142 err
= mlx5_core_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
);
4145 return ERR_PTR(-ENOMEM
);
4148 return &xrcd
->ibxrcd
;
4151 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
4153 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
4154 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
4157 err
= mlx5_core_xrcd_dealloc(dev
->mdev
, xrcdn
);
4159 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);