2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
63 const int ib_rvt_state_ops
[IB_QPS_ERR
+ 1] = {
65 [IB_QPS_INIT
] = RVT_POST_RECV_OK
,
66 [IB_QPS_RTR
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
,
67 [IB_QPS_RTS
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
68 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
|
69 RVT_PROCESS_NEXT_SEND_OK
,
70 [IB_QPS_SQD
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
71 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
,
72 [IB_QPS_SQE
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
73 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
74 [IB_QPS_ERR
] = RVT_POST_RECV_OK
| RVT_FLUSH_RECV
|
75 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
77 EXPORT_SYMBOL(ib_rvt_state_ops
);
79 static void get_map_page(struct rvt_qpn_table
*qpt
,
80 struct rvt_qpn_map
*map
,
83 unsigned long page
= get_zeroed_page(gfp
);
86 * Free the page if someone raced with us installing it.
89 spin_lock(&qpt
->lock
);
93 map
->page
= (void *)page
;
94 spin_unlock(&qpt
->lock
);
98 * init_qpn_table - initialize the QP number table for a device
101 static int init_qpn_table(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
)
104 struct rvt_qpn_map
*map
;
107 if (!(rdi
->dparms
.qpn_res_end
>= rdi
->dparms
.qpn_res_start
))
110 spin_lock_init(&qpt
->lock
);
112 qpt
->last
= rdi
->dparms
.qpn_start
;
113 qpt
->incr
= rdi
->dparms
.qpn_inc
<< rdi
->dparms
.qos_shift
;
116 * Drivers may want some QPs beyond what we need for verbs let them use
117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 * for those. The reserved range must be *after* the range which verbs
122 /* Figure out number of bit maps needed before reserved range */
123 qpt
->nmaps
= rdi
->dparms
.qpn_res_start
/ RVT_BITS_PER_PAGE
;
125 /* This should always be zero */
126 offset
= rdi
->dparms
.qpn_res_start
& RVT_BITS_PER_PAGE_MASK
;
128 /* Starting with the first reserved bit map */
129 map
= &qpt
->map
[qpt
->nmaps
];
131 rvt_pr_info(rdi
, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 rdi
->dparms
.qpn_res_start
, rdi
->dparms
.qpn_res_end
);
133 for (i
= rdi
->dparms
.qpn_res_start
; i
<= rdi
->dparms
.qpn_res_end
; i
++) {
135 get_map_page(qpt
, map
, GFP_KERNEL
);
141 set_bit(offset
, map
->page
);
143 if (offset
== RVT_BITS_PER_PAGE
) {
154 * free_qpn_table - free the QP number table for a device
155 * @qpt: the QPN table
157 static void free_qpn_table(struct rvt_qpn_table
*qpt
)
161 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
162 free_page((unsigned long)qpt
->map
[i
].page
);
166 * rvt_driver_qp_init - Init driver qp resources
167 * @rdi: rvt dev strucutre
169 * Return: 0 on success
171 int rvt_driver_qp_init(struct rvt_dev_info
*rdi
)
176 if (!rdi
->dparms
.qp_table_size
)
180 * If driver is not doing any QP allocation then make sure it is
181 * providing the necessary QP functions.
183 if (!rdi
->driver_f
.free_all_qps
||
184 !rdi
->driver_f
.qp_priv_alloc
||
185 !rdi
->driver_f
.qp_priv_free
||
186 !rdi
->driver_f
.notify_qp_reset
)
189 /* allocate parent object */
190 rdi
->qp_dev
= kzalloc_node(sizeof(*rdi
->qp_dev
), GFP_KERNEL
,
195 /* allocate hash table */
196 rdi
->qp_dev
->qp_table_size
= rdi
->dparms
.qp_table_size
;
197 rdi
->qp_dev
->qp_table_bits
= ilog2(rdi
->dparms
.qp_table_size
);
198 rdi
->qp_dev
->qp_table
=
199 kmalloc_node(rdi
->qp_dev
->qp_table_size
*
200 sizeof(*rdi
->qp_dev
->qp_table
),
201 GFP_KERNEL
, rdi
->dparms
.node
);
202 if (!rdi
->qp_dev
->qp_table
)
205 for (i
= 0; i
< rdi
->qp_dev
->qp_table_size
; i
++)
206 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[i
], NULL
);
208 spin_lock_init(&rdi
->qp_dev
->qpt_lock
);
210 /* initialize qpn map */
211 if (init_qpn_table(rdi
, &rdi
->qp_dev
->qpn_table
))
214 spin_lock_init(&rdi
->n_qps_lock
);
219 kfree(rdi
->qp_dev
->qp_table
);
220 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
229 * free_all_qps - check for QPs still in use
230 * @qpt: the QP table to empty
232 * There should not be any QPs still in use.
233 * Free memory for table.
235 static unsigned rvt_free_all_qps(struct rvt_dev_info
*rdi
)
239 unsigned n
, qp_inuse
= 0;
240 spinlock_t
*ql
; /* work around too long line below */
242 if (rdi
->driver_f
.free_all_qps
)
243 qp_inuse
= rdi
->driver_f
.free_all_qps(rdi
);
245 qp_inuse
+= rvt_mcast_tree_empty(rdi
);
250 ql
= &rdi
->qp_dev
->qpt_lock
;
251 spin_lock_irqsave(ql
, flags
);
252 for (n
= 0; n
< rdi
->qp_dev
->qp_table_size
; n
++) {
253 qp
= rcu_dereference_protected(rdi
->qp_dev
->qp_table
[n
],
254 lockdep_is_held(ql
));
255 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[n
], NULL
);
257 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
258 lockdep_is_held(ql
)))
261 spin_unlock_irqrestore(ql
, flags
);
267 * rvt_qp_exit - clean up qps on device exit
268 * @rdi: rvt dev structure
270 * Check for qp leaks and free resources.
272 void rvt_qp_exit(struct rvt_dev_info
*rdi
)
274 u32 qps_inuse
= rvt_free_all_qps(rdi
);
277 rvt_pr_err(rdi
, "QP memory leak! %u still in use\n",
282 kfree(rdi
->qp_dev
->qp_table
);
283 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
287 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
288 struct rvt_qpn_map
*map
, unsigned off
)
290 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
294 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
295 * IB_QPT_SMI/IB_QPT_GSI
296 *@rdi: rvt device info structure
297 *@qpt: queue pair number table pointer
298 *@port_num: IB port number, 1 based, comes from core
300 * Return: The queue pair number
302 static int alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
303 enum ib_qp_type type
, u8 port_num
, gfp_t gfp
)
305 u32 i
, offset
, max_scan
, qpn
;
306 struct rvt_qpn_map
*map
;
309 if (rdi
->driver_f
.alloc_qpn
)
310 return rdi
->driver_f
.alloc_qpn(rdi
, qpt
, type
, port_num
, gfp
);
312 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
315 ret
= type
== IB_QPT_GSI
;
316 n
= 1 << (ret
+ 2 * (port_num
- 1));
317 spin_lock(&qpt
->lock
);
322 spin_unlock(&qpt
->lock
);
326 qpn
= qpt
->last
+ qpt
->incr
;
327 if (qpn
>= RVT_QPN_MAX
)
328 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
329 /* offset carries bit 0 */
330 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
331 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
332 max_scan
= qpt
->nmaps
- !offset
;
334 if (unlikely(!map
->page
)) {
335 get_map_page(qpt
, map
, gfp
);
336 if (unlikely(!map
->page
))
340 if (!test_and_set_bit(offset
, map
->page
)) {
347 * This qpn might be bogus if offset >= BITS_PER_PAGE.
348 * That is OK. It gets re-assigned below
350 qpn
= mk_qpn(qpt
, map
, offset
);
351 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
353 * In order to keep the number of pages allocated to a
354 * minimum, we scan the all existing pages before increasing
355 * the size of the bitmap table.
357 if (++i
> max_scan
) {
358 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
360 map
= &qpt
->map
[qpt
->nmaps
++];
361 /* start at incr with current bit 0 */
362 offset
= qpt
->incr
| (offset
& 1);
363 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
365 /* start at incr with current bit 0 */
366 offset
= qpt
->incr
| (offset
& 1);
369 /* wrap to first map page, invert bit 0 */
370 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
372 /* there can be no set bits in low-order QoS bits */
373 WARN_ON(offset
& (BIT(rdi
->dparms
.qos_shift
) - 1));
374 qpn
= mk_qpn(qpt
, map
, offset
);
383 static void free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
385 struct rvt_qpn_map
*map
;
387 map
= qpt
->map
+ qpn
/ RVT_BITS_PER_PAGE
;
389 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
393 * rvt_clear_mr_refs - Drop help mr refs
394 * @qp: rvt qp data structure
395 * @clr_sends: If shoudl clear send side or not
397 static void rvt_clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
400 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
402 if (test_and_clear_bit(RVT_R_REWIND_SGE
, &qp
->r_aflags
))
403 rvt_put_ss(&qp
->s_rdma_read_sge
);
405 rvt_put_ss(&qp
->r_sge
);
408 while (qp
->s_last
!= qp
->s_head
) {
409 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
412 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
413 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
417 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
418 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
419 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
420 atomic_dec(&ibah_to_rvtah(
421 wqe
->ud_wr
.ah
)->refcount
);
422 if (++qp
->s_last
>= qp
->s_size
)
424 smp_wmb(); /* see qp_set_savail */
427 rvt_put_mr(qp
->s_rdma_mr
);
428 qp
->s_rdma_mr
= NULL
;
432 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
435 for (n
= 0; n
< rvt_max_atomic(rdi
); n
++) {
436 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
438 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
440 rvt_put_mr(e
->rdma_sge
.mr
);
441 e
->rdma_sge
.mr
= NULL
;
447 * rvt_remove_qp - remove qp form table
448 * @rdi: rvt dev struct
451 * Remove the QP from the table so it can't be found asynchronously by
452 * the receive routine.
454 static void rvt_remove_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
456 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
457 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
461 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
463 if (rcu_dereference_protected(rvp
->qp
[0],
464 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
465 RCU_INIT_POINTER(rvp
->qp
[0], NULL
);
466 } else if (rcu_dereference_protected(rvp
->qp
[1],
467 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
468 RCU_INIT_POINTER(rvp
->qp
[1], NULL
);
471 struct rvt_qp __rcu
**qpp
;
474 qpp
= &rdi
->qp_dev
->qp_table
[n
];
475 for (; (q
= rcu_dereference_protected(*qpp
,
476 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
))) != NULL
;
479 RCU_INIT_POINTER(*qpp
,
480 rcu_dereference_protected(qp
->next
,
481 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)));
483 trace_rvt_qpremove(qp
, n
);
489 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
492 if (atomic_dec_and_test(&qp
->refcount
))
498 * reset_qp - initialize the QP state to the reset state
499 * @qp: the QP to reset
501 * r and s lock are required to be held by the caller
503 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
504 enum ib_qp_type type
)
505 __releases(&qp
->s_lock
)
506 __releases(&qp
->s_hlock
)
507 __releases(&qp
->r_lock
)
508 __acquires(&qp
->r_lock
)
509 __acquires(&qp
->s_hlock
)
510 __acquires(&qp
->s_lock
)
512 if (qp
->state
!= IB_QPS_RESET
) {
513 qp
->state
= IB_QPS_RESET
;
515 /* Let drivers flush their waitlist */
516 rdi
->driver_f
.flush_qp_waiters(qp
);
517 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_ANY_WAIT
);
518 spin_unlock(&qp
->s_lock
);
519 spin_unlock(&qp
->s_hlock
);
520 spin_unlock_irq(&qp
->r_lock
);
522 /* Stop the send queue and the retry timer */
523 rdi
->driver_f
.stop_send_queue(qp
);
525 /* Wait for things to stop */
526 rdi
->driver_f
.quiesce_qp(qp
);
528 /* take qp out the hash and wait for it to be unused */
529 rvt_remove_qp(rdi
, qp
);
530 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
532 /* grab the lock b/c it was locked at call time */
533 spin_lock_irq(&qp
->r_lock
);
534 spin_lock(&qp
->s_hlock
);
535 spin_lock(&qp
->s_lock
);
537 rvt_clear_mr_refs(qp
, 1);
541 * Let the driver do any tear down it needs to for a qp
542 * that has been reset
544 rdi
->driver_f
.notify_qp_reset(qp
);
548 qp
->qp_access_flags
= 0;
549 qp
->s_flags
&= RVT_S_SIGNAL_REQ_WR
;
555 qp
->s_sending_psn
= 0;
556 qp
->s_sending_hpsn
= 0;
560 if (type
== IB_QPT_RC
) {
561 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
562 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
564 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
565 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
567 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
578 qp
->s_mig_state
= IB_MIG_MIGRATED
;
579 qp
->r_head_ack_queue
= 0;
580 qp
->s_tail_ack_queue
= 0;
581 qp
->s_num_rd_atomic
= 0;
583 qp
->r_rq
.wq
->head
= 0;
584 qp
->r_rq
.wq
->tail
= 0;
586 qp
->r_sge
.num_sge
= 0;
590 * rvt_create_qp - create a queue pair for a device
591 * @ibpd: the protection domain who's device we create the queue pair for
592 * @init_attr: the attributes of the queue pair
593 * @udata: user data for libibverbs.so
595 * Queue pair creation is mostly an rvt issue. However, drivers have their own
596 * unique idea of what queue pair numbers mean. For instance there is a reserved
599 * Return: the queue pair on success, otherwise returns an errno.
601 * Called by the ib_create_qp() core verbs function.
603 struct ib_qp
*rvt_create_qp(struct ib_pd
*ibpd
,
604 struct ib_qp_init_attr
*init_attr
,
605 struct ib_udata
*udata
)
609 struct rvt_swqe
*swq
= NULL
;
612 struct ib_qp
*ret
= ERR_PTR(-ENOMEM
);
613 struct rvt_dev_info
*rdi
= ib_to_rvt(ibpd
->device
);
618 return ERR_PTR(-EINVAL
);
620 if (init_attr
->cap
.max_send_sge
> rdi
->dparms
.props
.max_sge
||
621 init_attr
->cap
.max_send_wr
> rdi
->dparms
.props
.max_qp_wr
||
622 init_attr
->create_flags
& ~(IB_QP_CREATE_USE_GFP_NOIO
))
623 return ERR_PTR(-EINVAL
);
625 /* GFP_NOIO is applicable to RC QP's only */
627 if (init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
&&
628 init_attr
->qp_type
!= IB_QPT_RC
)
629 return ERR_PTR(-EINVAL
);
631 gfp
= init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
?
632 GFP_NOIO
: GFP_KERNEL
;
634 /* Check receive queue parameters if no SRQ is specified. */
635 if (!init_attr
->srq
) {
636 if (init_attr
->cap
.max_recv_sge
> rdi
->dparms
.props
.max_sge
||
637 init_attr
->cap
.max_recv_wr
> rdi
->dparms
.props
.max_qp_wr
)
638 return ERR_PTR(-EINVAL
);
640 if (init_attr
->cap
.max_send_sge
+
641 init_attr
->cap
.max_send_wr
+
642 init_attr
->cap
.max_recv_sge
+
643 init_attr
->cap
.max_recv_wr
== 0)
644 return ERR_PTR(-EINVAL
);
647 switch (init_attr
->qp_type
) {
650 if (init_attr
->port_num
== 0 ||
651 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
)
652 return ERR_PTR(-EINVAL
);
656 sz
= sizeof(struct rvt_sge
) *
657 init_attr
->cap
.max_send_sge
+
658 sizeof(struct rvt_swqe
);
661 (init_attr
->cap
.max_send_wr
+ 1) * sz
,
662 gfp
| __GFP_ZERO
, PAGE_KERNEL
);
665 (init_attr
->cap
.max_send_wr
+ 1) * sz
,
668 return ERR_PTR(-ENOMEM
);
672 if (init_attr
->srq
) {
673 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
675 if (srq
->rq
.max_sge
> 1)
676 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
677 (srq
->rq
.max_sge
- 1);
678 } else if (init_attr
->cap
.max_recv_sge
> 1)
679 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
680 (init_attr
->cap
.max_recv_sge
- 1);
681 qp
= kzalloc_node(sz
+ sg_list_sz
, gfp
, rdi
->dparms
.node
);
685 RCU_INIT_POINTER(qp
->next
, NULL
);
686 if (init_attr
->qp_type
== IB_QPT_RC
) {
689 sizeof(*qp
->s_ack_queue
) *
693 if (!qp
->s_ack_queue
)
698 * Driver needs to set up it's private QP structure and do any
699 * initialization that is needed.
701 priv
= rdi
->driver_f
.qp_priv_alloc(rdi
, qp
, gfp
);
707 qp
->timeout_jiffies
=
708 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
710 if (init_attr
->srq
) {
713 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
714 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
715 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
716 sizeof(struct rvt_rwqe
);
718 qp
->r_rq
.wq
= vmalloc_user(
719 sizeof(struct rvt_rwq
) +
721 else if (gfp
== GFP_NOIO
)
722 qp
->r_rq
.wq
= __vmalloc(
723 sizeof(struct rvt_rwq
) +
725 gfp
| __GFP_ZERO
, PAGE_KERNEL
);
727 qp
->r_rq
.wq
= vzalloc_node(
728 sizeof(struct rvt_rwq
) +
732 goto bail_driver_priv
;
736 * ib_create_qp() will initialize qp->ibqp
737 * except for qp->ibqp.qp_num.
739 spin_lock_init(&qp
->r_lock
);
740 spin_lock_init(&qp
->s_hlock
);
741 spin_lock_init(&qp
->s_lock
);
742 spin_lock_init(&qp
->r_rq
.lock
);
743 atomic_set(&qp
->refcount
, 0);
744 init_waitqueue_head(&qp
->wait
);
745 init_timer(&qp
->s_timer
);
746 qp
->s_timer
.data
= (unsigned long)qp
;
747 INIT_LIST_HEAD(&qp
->rspwait
);
748 qp
->state
= IB_QPS_RESET
;
750 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
751 qp
->s_avail
= init_attr
->cap
.max_send_wr
;
752 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
753 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
754 qp
->s_flags
= RVT_S_SIGNAL_REQ_WR
;
756 err
= alloc_qpn(rdi
, &rdi
->qp_dev
->qpn_table
,
758 init_attr
->port_num
, gfp
);
763 qp
->ibqp
.qp_num
= err
;
764 qp
->port_num
= init_attr
->port_num
;
765 rvt_reset_qp(rdi
, qp
, init_attr
->qp_type
);
769 /* Don't support raw QPs */
770 return ERR_PTR(-EINVAL
);
773 init_attr
->cap
.max_inline_data
= 0;
776 * Return the address of the RWQ as the offset to mmap.
777 * See rvt_mmap() for details.
779 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
783 err
= ib_copy_to_udata(udata
, &offset
,
790 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
792 qp
->ip
= rvt_create_mmap_info(rdi
, s
,
793 ibpd
->uobject
->context
,
796 ret
= ERR_PTR(-ENOMEM
);
800 err
= ib_copy_to_udata(udata
, &qp
->ip
->offset
,
801 sizeof(qp
->ip
->offset
));
807 qp
->pid
= current
->pid
;
810 spin_lock(&rdi
->n_qps_lock
);
811 if (rdi
->n_qps_allocated
== rdi
->dparms
.props
.max_qp
) {
812 spin_unlock(&rdi
->n_qps_lock
);
813 ret
= ERR_PTR(-ENOMEM
);
817 rdi
->n_qps_allocated
++;
819 * Maintain a busy_jiffies variable that will be added to the timeout
820 * period in mod_retry_timer and add_retry_timer. This busy jiffies
821 * is scaled by the number of rc qps created for the device to reduce
822 * the number of timeouts occurring when there is a large number of
823 * qps. busy_jiffies is incremented every rc qp scaling interval.
824 * The scaling interval is selected based on extensive performance
825 * evaluation of targeted workloads.
827 if (init_attr
->qp_type
== IB_QPT_RC
) {
829 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
831 spin_unlock(&rdi
->n_qps_lock
);
834 spin_lock_irq(&rdi
->pending_lock
);
835 list_add(&qp
->ip
->pending_mmaps
, &rdi
->pending_mmaps
);
836 spin_unlock_irq(&rdi
->pending_lock
);
842 * We have our QP and its good, now keep track of what types of opcodes
843 * can be processed on this QP. We do this by keeping track of what the
844 * 3 high order bits of the opcode are.
846 switch (init_attr
->qp_type
) {
850 qp
->allowed_ops
= IB_OPCODE_UD
;
853 qp
->allowed_ops
= IB_OPCODE_RC
;
856 qp
->allowed_ops
= IB_OPCODE_UC
;
859 ret
= ERR_PTR(-EINVAL
);
866 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
869 free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
875 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
878 kfree(qp
->s_ack_queue
);
888 * rvt_error_qp - put a QP into the error state
889 * @qp: the QP to put into the error state
890 * @err: the receive completion error to signal if a RWQE is active
892 * Flushes both send and receive work queues.
894 * Return: true if last WQE event should be generated.
895 * The QP r_lock and s_lock should be held and interrupts disabled.
896 * If we are already in error state, just return.
898 int rvt_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
902 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
904 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
907 qp
->state
= IB_QPS_ERR
;
909 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
910 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
911 del_timer(&qp
->s_timer
);
914 if (qp
->s_flags
& RVT_S_ANY_WAIT_SEND
)
915 qp
->s_flags
&= ~RVT_S_ANY_WAIT_SEND
;
917 rdi
->driver_f
.notify_error_qp(qp
);
919 /* Schedule the sending tasklet to drain the send work queue. */
920 if (ACCESS_ONCE(qp
->s_last
) != qp
->s_head
)
921 rdi
->driver_f
.schedule_send(qp
);
923 rvt_clear_mr_refs(qp
, 0);
925 memset(&wc
, 0, sizeof(wc
));
927 wc
.opcode
= IB_WC_RECV
;
929 if (test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
)) {
930 wc
.wr_id
= qp
->r_wr_id
;
932 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
934 wc
.status
= IB_WC_WR_FLUSH_ERR
;
941 spin_lock(&qp
->r_rq
.lock
);
943 /* sanity check pointers before trusting them */
946 if (head
>= qp
->r_rq
.size
)
949 if (tail
>= qp
->r_rq
.size
)
951 while (tail
!= head
) {
952 wc
.wr_id
= rvt_get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
953 if (++tail
>= qp
->r_rq
.size
)
955 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
959 spin_unlock(&qp
->r_rq
.lock
);
960 } else if (qp
->ibqp
.event_handler
) {
967 EXPORT_SYMBOL(rvt_error_qp
);
970 * Put the QP into the hash table.
971 * The hash table holds a reference to the QP.
973 static void rvt_insert_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
975 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
978 atomic_inc(&qp
->refcount
);
979 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
981 if (qp
->ibqp
.qp_num
<= 1) {
982 rcu_assign_pointer(rvp
->qp
[qp
->ibqp
.qp_num
], qp
);
984 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
986 qp
->next
= rdi
->qp_dev
->qp_table
[n
];
987 rcu_assign_pointer(rdi
->qp_dev
->qp_table
[n
], qp
);
988 trace_rvt_qpinsert(qp
, n
);
991 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
995 * qib_modify_qp - modify the attributes of a queue pair
996 * @ibqp: the queue pair who's attributes we're modifying
997 * @attr: the new attributes
998 * @attr_mask: the mask of attributes to modify
999 * @udata: user data for libibverbs.so
1001 * Return: 0 on success, otherwise returns an errno.
1003 int rvt_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1004 int attr_mask
, struct ib_udata
*udata
)
1006 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1007 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1008 enum ib_qp_state cur_state
, new_state
;
1012 int pmtu
= 0; /* for gcc warning only */
1013 enum rdma_link_layer link
;
1015 link
= rdma_port_get_link_layer(ibqp
->device
, qp
->port_num
);
1017 spin_lock_irq(&qp
->r_lock
);
1018 spin_lock(&qp
->s_hlock
);
1019 spin_lock(&qp
->s_lock
);
1021 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1022 attr
->cur_qp_state
: qp
->state
;
1023 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1025 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1029 if (rdi
->driver_f
.check_modify_qp
&&
1030 rdi
->driver_f
.check_modify_qp(qp
, attr
, attr_mask
, udata
))
1033 if (attr_mask
& IB_QP_AV
) {
1034 if (attr
->ah_attr
.dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))
1036 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
1040 if (attr_mask
& IB_QP_ALT_PATH
) {
1041 if (attr
->alt_ah_attr
.dlid
>=
1042 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1044 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
1046 if (attr
->alt_pkey_index
>= rvt_get_npkeys(rdi
))
1050 if (attr_mask
& IB_QP_PKEY_INDEX
)
1051 if (attr
->pkey_index
>= rvt_get_npkeys(rdi
))
1054 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1055 if (attr
->min_rnr_timer
> 31)
1058 if (attr_mask
& IB_QP_PORT
)
1059 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1060 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1061 attr
->port_num
== 0 ||
1062 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
1065 if (attr_mask
& IB_QP_DEST_QPN
)
1066 if (attr
->dest_qp_num
> RVT_QPN_MASK
)
1069 if (attr_mask
& IB_QP_RETRY_CNT
)
1070 if (attr
->retry_cnt
> 7)
1073 if (attr_mask
& IB_QP_RNR_RETRY
)
1074 if (attr
->rnr_retry
> 7)
1078 * Don't allow invalid path_mtu values. OK to set greater
1079 * than the active mtu (or even the max_cap, if we have tuned
1080 * that to a small mtu. We'll set qp->path_mtu
1081 * to the lesser of requested attribute mtu and active,
1082 * for packetizing messages.
1083 * Note that the QP port has to be set in INIT and MTU in RTR.
1085 if (attr_mask
& IB_QP_PATH_MTU
) {
1086 pmtu
= rdi
->driver_f
.get_pmtu_from_attr(rdi
, qp
, attr
);
1091 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1092 if (attr
->path_mig_state
== IB_MIG_REARM
) {
1093 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1095 if (new_state
!= IB_QPS_RTS
)
1097 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
1098 if (qp
->s_mig_state
== IB_MIG_REARM
)
1100 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
1102 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1109 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1110 if (attr
->max_dest_rd_atomic
> rdi
->dparms
.max_rdma_atomic
)
1113 switch (new_state
) {
1115 if (qp
->state
!= IB_QPS_RESET
)
1116 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1120 /* Allow event to re-trigger if QP set to RTR more than once */
1121 qp
->r_flags
&= ~RVT_R_COMM_EST
;
1122 qp
->state
= new_state
;
1126 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
1127 qp
->state
= new_state
;
1131 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
1133 qp
->state
= new_state
;
1137 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1141 qp
->state
= new_state
;
1145 if (attr_mask
& IB_QP_PKEY_INDEX
)
1146 qp
->s_pkey_index
= attr
->pkey_index
;
1148 if (attr_mask
& IB_QP_PORT
)
1149 qp
->port_num
= attr
->port_num
;
1151 if (attr_mask
& IB_QP_DEST_QPN
)
1152 qp
->remote_qpn
= attr
->dest_qp_num
;
1154 if (attr_mask
& IB_QP_SQ_PSN
) {
1155 qp
->s_next_psn
= attr
->sq_psn
& rdi
->dparms
.psn_modify_mask
;
1156 qp
->s_psn
= qp
->s_next_psn
;
1157 qp
->s_sending_psn
= qp
->s_next_psn
;
1158 qp
->s_last_psn
= qp
->s_next_psn
- 1;
1159 qp
->s_sending_hpsn
= qp
->s_last_psn
;
1162 if (attr_mask
& IB_QP_RQ_PSN
)
1163 qp
->r_psn
= attr
->rq_psn
& rdi
->dparms
.psn_modify_mask
;
1165 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1166 qp
->qp_access_flags
= attr
->qp_access_flags
;
1168 if (attr_mask
& IB_QP_AV
) {
1169 qp
->remote_ah_attr
= attr
->ah_attr
;
1170 qp
->s_srate
= attr
->ah_attr
.static_rate
;
1171 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
1174 if (attr_mask
& IB_QP_ALT_PATH
) {
1175 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
1176 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
1179 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1180 qp
->s_mig_state
= attr
->path_mig_state
;
1182 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1183 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
1184 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1188 if (attr_mask
& IB_QP_PATH_MTU
) {
1189 qp
->pmtu
= rdi
->driver_f
.mtu_from_qp(rdi
, qp
, pmtu
);
1190 qp
->path_mtu
= rdi
->driver_f
.mtu_to_path_mtu(qp
->pmtu
);
1191 qp
->log_pmtu
= ilog2(qp
->pmtu
);
1194 if (attr_mask
& IB_QP_RETRY_CNT
) {
1195 qp
->s_retry_cnt
= attr
->retry_cnt
;
1196 qp
->s_retry
= attr
->retry_cnt
;
1199 if (attr_mask
& IB_QP_RNR_RETRY
) {
1200 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
1201 qp
->s_rnr_retry
= attr
->rnr_retry
;
1204 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1205 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
1207 if (attr_mask
& IB_QP_TIMEOUT
) {
1208 qp
->timeout
= attr
->timeout
;
1209 qp
->timeout_jiffies
=
1210 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1214 if (attr_mask
& IB_QP_QKEY
)
1215 qp
->qkey
= attr
->qkey
;
1217 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1218 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
1220 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
1221 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
1223 if (rdi
->driver_f
.modify_qp
)
1224 rdi
->driver_f
.modify_qp(qp
, attr
, attr_mask
, udata
);
1226 spin_unlock(&qp
->s_lock
);
1227 spin_unlock(&qp
->s_hlock
);
1228 spin_unlock_irq(&qp
->r_lock
);
1230 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1231 rvt_insert_qp(rdi
, qp
);
1234 ev
.device
= qp
->ibqp
.device
;
1235 ev
.element
.qp
= &qp
->ibqp
;
1236 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1237 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1240 ev
.device
= qp
->ibqp
.device
;
1241 ev
.element
.qp
= &qp
->ibqp
;
1242 ev
.event
= IB_EVENT_PATH_MIG
;
1243 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1248 spin_unlock(&qp
->s_lock
);
1249 spin_unlock(&qp
->s_hlock
);
1250 spin_unlock_irq(&qp
->r_lock
);
1254 /** rvt_free_qpn - Free a qpn from the bit map
1256 * @qpn: queue pair number to free
1258 static void rvt_free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
1260 struct rvt_qpn_map
*map
;
1262 map
= qpt
->map
+ qpn
/ RVT_BITS_PER_PAGE
;
1264 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
1268 * rvt_destroy_qp - destroy a queue pair
1269 * @ibqp: the queue pair to destroy
1271 * Note that this can be called while the QP is actively sending or
1274 * Return: 0 on success.
1276 int rvt_destroy_qp(struct ib_qp
*ibqp
)
1278 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1279 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1281 spin_lock_irq(&qp
->r_lock
);
1282 spin_lock(&qp
->s_hlock
);
1283 spin_lock(&qp
->s_lock
);
1284 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1285 spin_unlock(&qp
->s_lock
);
1286 spin_unlock(&qp
->s_hlock
);
1287 spin_unlock_irq(&qp
->r_lock
);
1289 /* qpn is now available for use again */
1290 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1292 spin_lock(&rdi
->n_qps_lock
);
1293 rdi
->n_qps_allocated
--;
1294 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1296 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1298 spin_unlock(&rdi
->n_qps_lock
);
1301 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1305 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1306 kfree(qp
->s_ack_queue
);
1312 * rvt_query_qp - query an ipbq
1313 * @ibqp: IB qp to query
1314 * @attr: attr struct to fill in
1315 * @attr_mask: attr mask ignored
1316 * @init_attr: struct to fill in
1320 int rvt_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1321 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1323 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1324 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1326 attr
->qp_state
= qp
->state
;
1327 attr
->cur_qp_state
= attr
->qp_state
;
1328 attr
->path_mtu
= qp
->path_mtu
;
1329 attr
->path_mig_state
= qp
->s_mig_state
;
1330 attr
->qkey
= qp
->qkey
;
1331 attr
->rq_psn
= qp
->r_psn
& rdi
->dparms
.psn_mask
;
1332 attr
->sq_psn
= qp
->s_next_psn
& rdi
->dparms
.psn_mask
;
1333 attr
->dest_qp_num
= qp
->remote_qpn
;
1334 attr
->qp_access_flags
= qp
->qp_access_flags
;
1335 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
1336 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
1337 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
1338 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
1339 attr
->cap
.max_inline_data
= 0;
1340 attr
->ah_attr
= qp
->remote_ah_attr
;
1341 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
1342 attr
->pkey_index
= qp
->s_pkey_index
;
1343 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
1344 attr
->en_sqd_async_notify
= 0;
1345 attr
->sq_draining
= qp
->s_draining
;
1346 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
1347 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
1348 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
1349 attr
->port_num
= qp
->port_num
;
1350 attr
->timeout
= qp
->timeout
;
1351 attr
->retry_cnt
= qp
->s_retry_cnt
;
1352 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
1353 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
1354 attr
->alt_timeout
= qp
->alt_timeout
;
1356 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
1357 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
1358 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
1359 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
1360 init_attr
->srq
= qp
->ibqp
.srq
;
1361 init_attr
->cap
= attr
->cap
;
1362 if (qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
)
1363 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1365 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1366 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
1367 init_attr
->port_num
= qp
->port_num
;
1372 * rvt_post_receive - post a receive on a QP
1373 * @ibqp: the QP to post the receive on
1374 * @wr: the WR to post
1375 * @bad_wr: the first bad WR is put here
1377 * This may be called from interrupt context.
1379 * Return: 0 on success otherwise errno
1381 int rvt_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1382 struct ib_recv_wr
**bad_wr
)
1384 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1385 struct rvt_rwq
*wq
= qp
->r_rq
.wq
;
1386 unsigned long flags
;
1387 int qp_err_flush
= (ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_RECV
) &&
1390 /* Check that state is OK to post receive. */
1391 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_RECV_OK
) || !wq
) {
1396 for (; wr
; wr
= wr
->next
) {
1397 struct rvt_rwqe
*wqe
;
1401 if ((unsigned)wr
->num_sge
> qp
->r_rq
.max_sge
) {
1406 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
1407 next
= wq
->head
+ 1;
1408 if (next
>= qp
->r_rq
.size
)
1410 if (next
== wq
->tail
) {
1411 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1415 if (unlikely(qp_err_flush
)) {
1418 memset(&wc
, 0, sizeof(wc
));
1420 wc
.opcode
= IB_WC_RECV
;
1421 wc
.wr_id
= wr
->wr_id
;
1422 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1423 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1425 wqe
= rvt_get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
1426 wqe
->wr_id
= wr
->wr_id
;
1427 wqe
->num_sge
= wr
->num_sge
;
1428 for (i
= 0; i
< wr
->num_sge
; i
++)
1429 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
1431 * Make sure queue entry is written
1432 * before the head index.
1437 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1443 * qp_get_savail - return number of avail send entries
1447 * This assumes the s_hlock is held but the s_last
1448 * qp variable is uncontrolled.
1450 static inline u32
qp_get_savail(struct rvt_qp
*qp
)
1455 smp_read_barrier_depends(); /* see rc.c */
1456 slast
= ACCESS_ONCE(qp
->s_last
);
1457 if (qp
->s_head
>= slast
)
1458 ret
= qp
->s_size
- (qp
->s_head
- slast
);
1460 ret
= slast
- qp
->s_head
;
1465 * rvt_post_one_wr - post one RC, UC, or UD send work request
1466 * @qp: the QP to post on
1467 * @wr: the work request to send
1469 static int rvt_post_one_wr(struct rvt_qp
*qp
,
1470 struct ib_send_wr
*wr
,
1473 struct rvt_swqe
*wqe
;
1478 struct rvt_lkey_table
*rkt
;
1480 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1484 /* IB spec says that num_sge == 0 is OK. */
1485 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
1489 * Don't allow RDMA reads or atomic operations on UC or
1490 * undefined operations.
1491 * Make sure buffer is large enough to hold the result for atomics.
1493 if (qp
->ibqp
.qp_type
== IB_QPT_UC
) {
1494 if ((unsigned)wr
->opcode
>= IB_WR_RDMA_READ
)
1496 } else if (qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1497 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1498 if (wr
->opcode
!= IB_WR_SEND
&&
1499 wr
->opcode
!= IB_WR_SEND_WITH_IMM
)
1501 /* Check UD destination address PD */
1502 if (qp
->ibqp
.pd
!= ud_wr(wr
)->ah
->pd
)
1504 } else if ((unsigned)wr
->opcode
> IB_WR_ATOMIC_FETCH_AND_ADD
) {
1506 } else if (wr
->opcode
>= IB_WR_ATOMIC_CMP_AND_SWP
&&
1507 (wr
->num_sge
== 0 ||
1508 wr
->sg_list
[0].length
< sizeof(u64
) ||
1509 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1))) {
1511 } else if (wr
->opcode
>= IB_WR_RDMA_READ
&& !qp
->s_max_rd_atomic
) {
1514 /* check for avail */
1515 if (unlikely(!qp
->s_avail
)) {
1516 qp
->s_avail
= qp_get_savail(qp
);
1517 if (WARN_ON(qp
->s_avail
> (qp
->s_size
- 1)))
1519 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1520 qp
->ibqp
.qp_num
, qp
->s_size
, qp
->s_avail
,
1521 qp
->s_head
, qp
->s_tail
, qp
->s_cur
,
1522 qp
->s_acked
, qp
->s_last
);
1526 next
= qp
->s_head
+ 1;
1527 if (next
>= qp
->s_size
)
1530 rkt
= &rdi
->lkey_table
;
1531 pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
1532 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_head
);
1534 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1535 qp
->ibqp
.qp_type
!= IB_QPT_RC
)
1536 memcpy(&wqe
->ud_wr
, ud_wr(wr
), sizeof(wqe
->ud_wr
));
1537 else if (wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
1538 wr
->opcode
== IB_WR_RDMA_WRITE
||
1539 wr
->opcode
== IB_WR_RDMA_READ
)
1540 memcpy(&wqe
->rdma_wr
, rdma_wr(wr
), sizeof(wqe
->rdma_wr
));
1541 else if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1542 wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
1543 memcpy(&wqe
->atomic_wr
, atomic_wr(wr
), sizeof(wqe
->atomic_wr
));
1545 memcpy(&wqe
->wr
, wr
, sizeof(wqe
->wr
));
1550 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
1551 IB_ACCESS_LOCAL_WRITE
: 0;
1552 for (i
= 0; i
< wr
->num_sge
; i
++) {
1553 u32 length
= wr
->sg_list
[i
].length
;
1558 ok
= rvt_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
],
1559 &wr
->sg_list
[i
], acc
);
1562 goto bail_inval_free
;
1564 wqe
->length
+= length
;
1567 wqe
->wr
.num_sge
= j
;
1570 /* general part of wqe valid - allow for driver checks */
1571 if (rdi
->driver_f
.check_send_wqe
) {
1572 ret
= rdi
->driver_f
.check_send_wqe(qp
, wqe
);
1574 goto bail_inval_free
;
1579 log_pmtu
= qp
->log_pmtu
;
1580 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1581 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1582 struct rvt_ah
*ah
= ibah_to_rvtah(wqe
->ud_wr
.ah
);
1584 log_pmtu
= ah
->log_pmtu
;
1585 atomic_inc(&ibah_to_rvtah(ud_wr(wr
)->ah
)->refcount
);
1588 wqe
->ssn
= qp
->s_ssn
++;
1589 wqe
->psn
= qp
->s_next_psn
;
1590 wqe
->lpsn
= wqe
->psn
+
1591 (wqe
->length
? ((wqe
->length
- 1) >> log_pmtu
) : 0);
1592 qp
->s_next_psn
= wqe
->lpsn
+ 1;
1593 trace_rvt_post_one_wr(qp
, wqe
);
1594 smp_wmb(); /* see request builders */
1601 /* release mr holds */
1603 struct rvt_sge
*sge
= &wqe
->sg_list
[--j
];
1605 rvt_put_mr(sge
->mr
);
1611 * rvt_post_send - post a send on a QP
1612 * @ibqp: the QP to post the send on
1613 * @wr: the list of work requests to post
1614 * @bad_wr: the first bad WR is put here
1616 * This may be called from interrupt context.
1618 * Return: 0 on success else errno
1620 int rvt_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1621 struct ib_send_wr
**bad_wr
)
1623 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1624 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1625 unsigned long flags
= 0;
1630 spin_lock_irqsave(&qp
->s_hlock
, flags
);
1633 * Ensure QP state is such that we can send. If not bail out early,
1634 * there is no need to do this every time we post a send.
1636 if (unlikely(!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))) {
1637 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
1642 * If the send queue is empty, and we only have a single WR then just go
1643 * ahead and kick the send engine into gear. Otherwise we will always
1644 * just schedule the send to happen later.
1646 call_send
= qp
->s_head
== ACCESS_ONCE(qp
->s_last
) && !wr
->next
;
1648 for (; wr
; wr
= wr
->next
) {
1649 err
= rvt_post_one_wr(qp
, wr
, &call_send
);
1650 if (unlikely(err
)) {
1657 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
1660 rdi
->driver_f
.do_send(qp
);
1662 rdi
->driver_f
.schedule_send_no_lock(qp
);
1668 * rvt_post_srq_receive - post a receive on a shared receive queue
1669 * @ibsrq: the SRQ to post the receive on
1670 * @wr: the list of work requests to post
1671 * @bad_wr: A pointer to the first WR to cause a problem is put here
1673 * This may be called from interrupt context.
1675 * Return: 0 on success else errno
1677 int rvt_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
1678 struct ib_recv_wr
**bad_wr
)
1680 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
1682 unsigned long flags
;
1684 for (; wr
; wr
= wr
->next
) {
1685 struct rvt_rwqe
*wqe
;
1689 if ((unsigned)wr
->num_sge
> srq
->rq
.max_sge
) {
1694 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
1696 next
= wq
->head
+ 1;
1697 if (next
>= srq
->rq
.size
)
1699 if (next
== wq
->tail
) {
1700 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
1705 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, wq
->head
);
1706 wqe
->wr_id
= wr
->wr_id
;
1707 wqe
->num_sge
= wr
->num_sge
;
1708 for (i
= 0; i
< wr
->num_sge
; i
++)
1709 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
1710 /* Make sure queue entry is written before the head index. */
1713 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);