2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
63 const int ib_rvt_state_ops
[IB_QPS_ERR
+ 1] = {
65 [IB_QPS_INIT
] = RVT_POST_RECV_OK
,
66 [IB_QPS_RTR
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
,
67 [IB_QPS_RTS
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
68 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
|
69 RVT_PROCESS_NEXT_SEND_OK
,
70 [IB_QPS_SQD
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
71 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
,
72 [IB_QPS_SQE
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
73 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
74 [IB_QPS_ERR
] = RVT_POST_RECV_OK
| RVT_FLUSH_RECV
|
75 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
77 EXPORT_SYMBOL(ib_rvt_state_ops
);
79 static void get_map_page(struct rvt_qpn_table
*qpt
,
80 struct rvt_qpn_map
*map
,
83 unsigned long page
= get_zeroed_page(gfp
);
86 * Free the page if someone raced with us installing it.
89 spin_lock(&qpt
->lock
);
93 map
->page
= (void *)page
;
94 spin_unlock(&qpt
->lock
);
98 * init_qpn_table - initialize the QP number table for a device
101 static int init_qpn_table(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
)
104 struct rvt_qpn_map
*map
;
107 if (!(rdi
->dparms
.qpn_res_end
>= rdi
->dparms
.qpn_res_start
))
110 spin_lock_init(&qpt
->lock
);
112 qpt
->last
= rdi
->dparms
.qpn_start
;
113 qpt
->incr
= rdi
->dparms
.qpn_inc
<< rdi
->dparms
.qos_shift
;
116 * Drivers may want some QPs beyond what we need for verbs let them use
117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 * for those. The reserved range must be *after* the range which verbs
122 /* Figure out number of bit maps needed before reserved range */
123 qpt
->nmaps
= rdi
->dparms
.qpn_res_start
/ RVT_BITS_PER_PAGE
;
125 /* This should always be zero */
126 offset
= rdi
->dparms
.qpn_res_start
& RVT_BITS_PER_PAGE_MASK
;
128 /* Starting with the first reserved bit map */
129 map
= &qpt
->map
[qpt
->nmaps
];
131 rvt_pr_info(rdi
, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 rdi
->dparms
.qpn_res_start
, rdi
->dparms
.qpn_res_end
);
133 for (i
= rdi
->dparms
.qpn_res_start
; i
<= rdi
->dparms
.qpn_res_end
; i
++) {
135 get_map_page(qpt
, map
, GFP_KERNEL
);
141 set_bit(offset
, map
->page
);
143 if (offset
== RVT_BITS_PER_PAGE
) {
154 * free_qpn_table - free the QP number table for a device
155 * @qpt: the QPN table
157 static void free_qpn_table(struct rvt_qpn_table
*qpt
)
161 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
162 free_page((unsigned long)qpt
->map
[i
].page
);
166 * rvt_driver_qp_init - Init driver qp resources
167 * @rdi: rvt dev strucutre
169 * Return: 0 on success
171 int rvt_driver_qp_init(struct rvt_dev_info
*rdi
)
176 if (!rdi
->dparms
.qp_table_size
)
180 * If driver is not doing any QP allocation then make sure it is
181 * providing the necessary QP functions.
183 if (!rdi
->driver_f
.free_all_qps
||
184 !rdi
->driver_f
.qp_priv_alloc
||
185 !rdi
->driver_f
.qp_priv_free
||
186 !rdi
->driver_f
.notify_qp_reset
)
189 /* allocate parent object */
190 rdi
->qp_dev
= kzalloc_node(sizeof(*rdi
->qp_dev
), GFP_KERNEL
,
195 /* allocate hash table */
196 rdi
->qp_dev
->qp_table_size
= rdi
->dparms
.qp_table_size
;
197 rdi
->qp_dev
->qp_table_bits
= ilog2(rdi
->dparms
.qp_table_size
);
198 rdi
->qp_dev
->qp_table
=
199 kmalloc_node(rdi
->qp_dev
->qp_table_size
*
200 sizeof(*rdi
->qp_dev
->qp_table
),
201 GFP_KERNEL
, rdi
->dparms
.node
);
202 if (!rdi
->qp_dev
->qp_table
)
205 for (i
= 0; i
< rdi
->qp_dev
->qp_table_size
; i
++)
206 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[i
], NULL
);
208 spin_lock_init(&rdi
->qp_dev
->qpt_lock
);
210 /* initialize qpn map */
211 if (init_qpn_table(rdi
, &rdi
->qp_dev
->qpn_table
))
214 spin_lock_init(&rdi
->n_qps_lock
);
219 kfree(rdi
->qp_dev
->qp_table
);
220 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
229 * free_all_qps - check for QPs still in use
230 * @qpt: the QP table to empty
232 * There should not be any QPs still in use.
233 * Free memory for table.
235 static unsigned rvt_free_all_qps(struct rvt_dev_info
*rdi
)
239 unsigned n
, qp_inuse
= 0;
240 spinlock_t
*ql
; /* work around too long line below */
242 if (rdi
->driver_f
.free_all_qps
)
243 qp_inuse
= rdi
->driver_f
.free_all_qps(rdi
);
245 qp_inuse
+= rvt_mcast_tree_empty(rdi
);
250 ql
= &rdi
->qp_dev
->qpt_lock
;
251 spin_lock_irqsave(ql
, flags
);
252 for (n
= 0; n
< rdi
->qp_dev
->qp_table_size
; n
++) {
253 qp
= rcu_dereference_protected(rdi
->qp_dev
->qp_table
[n
],
254 lockdep_is_held(ql
));
255 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[n
], NULL
);
257 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
258 lockdep_is_held(ql
)))
261 spin_unlock_irqrestore(ql
, flags
);
267 * rvt_qp_exit - clean up qps on device exit
268 * @rdi: rvt dev structure
270 * Check for qp leaks and free resources.
272 void rvt_qp_exit(struct rvt_dev_info
*rdi
)
274 u32 qps_inuse
= rvt_free_all_qps(rdi
);
277 rvt_pr_err(rdi
, "QP memory leak! %u still in use\n",
282 kfree(rdi
->qp_dev
->qp_table
);
283 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
287 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
288 struct rvt_qpn_map
*map
, unsigned off
)
290 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
294 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
295 * IB_QPT_SMI/IB_QPT_GSI
296 *@rdi: rvt device info structure
297 *@qpt: queue pair number table pointer
298 *@port_num: IB port number, 1 based, comes from core
300 * Return: The queue pair number
302 static int alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
303 enum ib_qp_type type
, u8 port_num
, gfp_t gfp
)
305 u32 i
, offset
, max_scan
, qpn
;
306 struct rvt_qpn_map
*map
;
309 if (rdi
->driver_f
.alloc_qpn
)
310 return rdi
->driver_f
.alloc_qpn(rdi
, qpt
, type
, port_num
, gfp
);
312 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
315 ret
= type
== IB_QPT_GSI
;
316 n
= 1 << (ret
+ 2 * (port_num
- 1));
317 spin_lock(&qpt
->lock
);
322 spin_unlock(&qpt
->lock
);
326 qpn
= qpt
->last
+ qpt
->incr
;
327 if (qpn
>= RVT_QPN_MAX
)
328 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
329 /* offset carries bit 0 */
330 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
331 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
332 max_scan
= qpt
->nmaps
- !offset
;
334 if (unlikely(!map
->page
)) {
335 get_map_page(qpt
, map
, gfp
);
336 if (unlikely(!map
->page
))
340 if (!test_and_set_bit(offset
, map
->page
)) {
347 * This qpn might be bogus if offset >= BITS_PER_PAGE.
348 * That is OK. It gets re-assigned below
350 qpn
= mk_qpn(qpt
, map
, offset
);
351 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
353 * In order to keep the number of pages allocated to a
354 * minimum, we scan the all existing pages before increasing
355 * the size of the bitmap table.
357 if (++i
> max_scan
) {
358 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
360 map
= &qpt
->map
[qpt
->nmaps
++];
361 /* start at incr with current bit 0 */
362 offset
= qpt
->incr
| (offset
& 1);
363 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
365 /* start at incr with current bit 0 */
366 offset
= qpt
->incr
| (offset
& 1);
369 /* wrap to first map page, invert bit 0 */
370 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
372 /* there can be no bits at shift and below */
373 WARN_ON(offset
& (rdi
->dparms
.qos_shift
- 1));
374 qpn
= mk_qpn(qpt
, map
, offset
);
383 static void free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
385 struct rvt_qpn_map
*map
;
387 map
= qpt
->map
+ qpn
/ RVT_BITS_PER_PAGE
;
389 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
393 * rvt_clear_mr_refs - Drop help mr refs
394 * @qp: rvt qp data structure
395 * @clr_sends: If shoudl clear send side or not
397 static void rvt_clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
400 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
402 if (test_and_clear_bit(RVT_R_REWIND_SGE
, &qp
->r_aflags
))
403 rvt_put_ss(&qp
->s_rdma_read_sge
);
405 rvt_put_ss(&qp
->r_sge
);
408 while (qp
->s_last
!= qp
->s_head
) {
409 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
412 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
413 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
417 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
418 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
419 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
420 atomic_dec(&ibah_to_rvtah(
421 wqe
->ud_wr
.ah
)->refcount
);
422 if (++qp
->s_last
>= qp
->s_size
)
424 smp_wmb(); /* see qp_set_savail */
427 rvt_put_mr(qp
->s_rdma_mr
);
428 qp
->s_rdma_mr
= NULL
;
432 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
435 for (n
= 0; n
< rvt_max_atomic(rdi
); n
++) {
436 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
438 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
440 rvt_put_mr(e
->rdma_sge
.mr
);
441 e
->rdma_sge
.mr
= NULL
;
447 * rvt_remove_qp - remove qp form table
448 * @rdi: rvt dev struct
451 * Remove the QP from the table so it can't be found asynchronously by
452 * the receive routine.
454 static void rvt_remove_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
456 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
457 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
461 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
463 if (rcu_dereference_protected(rvp
->qp
[0],
464 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
465 RCU_INIT_POINTER(rvp
->qp
[0], NULL
);
466 } else if (rcu_dereference_protected(rvp
->qp
[1],
467 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
468 RCU_INIT_POINTER(rvp
->qp
[1], NULL
);
471 struct rvt_qp __rcu
**qpp
;
474 qpp
= &rdi
->qp_dev
->qp_table
[n
];
475 for (; (q
= rcu_dereference_protected(*qpp
,
476 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
))) != NULL
;
479 RCU_INIT_POINTER(*qpp
,
480 rcu_dereference_protected(qp
->next
,
481 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)));
483 trace_rvt_qpremove(qp
, n
);
489 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
492 if (atomic_dec_and_test(&qp
->refcount
))
498 * reset_qp - initialize the QP state to the reset state
499 * @qp: the QP to reset
501 * r and s lock are required to be held by the caller
503 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
504 enum ib_qp_type type
)
505 __releases(&qp
->s_lock
)
506 __releases(&qp
->s_hlock
)
507 __releases(&qp
->r_lock
)
508 __acquires(&qp
->r_lock
)
509 __acquires(&qp
->s_hlock
)
510 __acquires(&qp
->s_lock
)
512 if (qp
->state
!= IB_QPS_RESET
) {
513 qp
->state
= IB_QPS_RESET
;
515 /* Let drivers flush their waitlist */
516 rdi
->driver_f
.flush_qp_waiters(qp
);
517 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_ANY_WAIT
);
518 spin_unlock(&qp
->s_lock
);
519 spin_unlock(&qp
->s_hlock
);
520 spin_unlock_irq(&qp
->r_lock
);
522 /* Stop the send queue and the retry timer */
523 rdi
->driver_f
.stop_send_queue(qp
);
525 /* Wait for things to stop */
526 rdi
->driver_f
.quiesce_qp(qp
);
528 /* take qp out the hash and wait for it to be unused */
529 rvt_remove_qp(rdi
, qp
);
530 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
532 /* grab the lock b/c it was locked at call time */
533 spin_lock_irq(&qp
->r_lock
);
534 spin_lock(&qp
->s_hlock
);
535 spin_lock(&qp
->s_lock
);
537 rvt_clear_mr_refs(qp
, 1);
541 * Let the driver do any tear down it needs to for a qp
542 * that has been reset
544 rdi
->driver_f
.notify_qp_reset(qp
);
548 qp
->qp_access_flags
= 0;
549 qp
->s_flags
&= RVT_S_SIGNAL_REQ_WR
;
555 qp
->s_sending_psn
= 0;
556 qp
->s_sending_hpsn
= 0;
560 if (type
== IB_QPT_RC
) {
561 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
562 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
564 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
565 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
567 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
578 qp
->s_mig_state
= IB_MIG_MIGRATED
;
583 rvt_max_atomic(rdi
) *
584 sizeof(*qp
->s_ack_queue
));
585 qp
->r_head_ack_queue
= 0;
586 qp
->s_tail_ack_queue
= 0;
587 qp
->s_num_rd_atomic
= 0;
589 qp
->r_rq
.wq
->head
= 0;
590 qp
->r_rq
.wq
->tail
= 0;
592 qp
->r_sge
.num_sge
= 0;
596 * rvt_create_qp - create a queue pair for a device
597 * @ibpd: the protection domain who's device we create the queue pair for
598 * @init_attr: the attributes of the queue pair
599 * @udata: user data for libibverbs.so
601 * Queue pair creation is mostly an rvt issue. However, drivers have their own
602 * unique idea of what queue pair numbers mean. For instance there is a reserved
605 * Return: the queue pair on success, otherwise returns an errno.
607 * Called by the ib_create_qp() core verbs function.
609 struct ib_qp
*rvt_create_qp(struct ib_pd
*ibpd
,
610 struct ib_qp_init_attr
*init_attr
,
611 struct ib_udata
*udata
)
615 struct rvt_swqe
*swq
= NULL
;
618 struct ib_qp
*ret
= ERR_PTR(-ENOMEM
);
619 struct rvt_dev_info
*rdi
= ib_to_rvt(ibpd
->device
);
624 return ERR_PTR(-EINVAL
);
626 if (init_attr
->cap
.max_send_sge
> rdi
->dparms
.props
.max_sge
||
627 init_attr
->cap
.max_send_wr
> rdi
->dparms
.props
.max_qp_wr
||
628 init_attr
->create_flags
& ~(IB_QP_CREATE_USE_GFP_NOIO
))
629 return ERR_PTR(-EINVAL
);
631 /* GFP_NOIO is applicable to RC QP's only */
633 if (init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
&&
634 init_attr
->qp_type
!= IB_QPT_RC
)
635 return ERR_PTR(-EINVAL
);
637 gfp
= init_attr
->create_flags
& IB_QP_CREATE_USE_GFP_NOIO
?
638 GFP_NOIO
: GFP_KERNEL
;
640 /* Check receive queue parameters if no SRQ is specified. */
641 if (!init_attr
->srq
) {
642 if (init_attr
->cap
.max_recv_sge
> rdi
->dparms
.props
.max_sge
||
643 init_attr
->cap
.max_recv_wr
> rdi
->dparms
.props
.max_qp_wr
)
644 return ERR_PTR(-EINVAL
);
646 if (init_attr
->cap
.max_send_sge
+
647 init_attr
->cap
.max_send_wr
+
648 init_attr
->cap
.max_recv_sge
+
649 init_attr
->cap
.max_recv_wr
== 0)
650 return ERR_PTR(-EINVAL
);
653 switch (init_attr
->qp_type
) {
656 if (init_attr
->port_num
== 0 ||
657 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
)
658 return ERR_PTR(-EINVAL
);
662 sz
= sizeof(struct rvt_sge
) *
663 init_attr
->cap
.max_send_sge
+
664 sizeof(struct rvt_swqe
);
667 (init_attr
->cap
.max_send_wr
+ 1) * sz
,
668 gfp
| __GFP_ZERO
, PAGE_KERNEL
);
671 (init_attr
->cap
.max_send_wr
+ 1) * sz
,
674 return ERR_PTR(-ENOMEM
);
678 if (init_attr
->srq
) {
679 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
681 if (srq
->rq
.max_sge
> 1)
682 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
683 (srq
->rq
.max_sge
- 1);
684 } else if (init_attr
->cap
.max_recv_sge
> 1)
685 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
686 (init_attr
->cap
.max_recv_sge
- 1);
687 qp
= kzalloc_node(sz
+ sg_list_sz
, gfp
, rdi
->dparms
.node
);
691 RCU_INIT_POINTER(qp
->next
, NULL
);
692 if (init_attr
->qp_type
== IB_QPT_RC
) {
695 sizeof(*qp
->s_ack_queue
) *
699 if (!qp
->s_ack_queue
)
704 * Driver needs to set up it's private QP structure and do any
705 * initialization that is needed.
707 priv
= rdi
->driver_f
.qp_priv_alloc(rdi
, qp
, gfp
);
711 qp
->timeout_jiffies
=
712 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
714 if (init_attr
->srq
) {
717 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
718 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
719 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
720 sizeof(struct rvt_rwqe
);
722 qp
->r_rq
.wq
= vmalloc_user(
723 sizeof(struct rvt_rwq
) +
725 else if (gfp
== GFP_NOIO
)
726 qp
->r_rq
.wq
= __vmalloc(
727 sizeof(struct rvt_rwq
) +
729 gfp
| __GFP_ZERO
, PAGE_KERNEL
);
731 qp
->r_rq
.wq
= vzalloc_node(
732 sizeof(struct rvt_rwq
) +
736 goto bail_driver_priv
;
740 * ib_create_qp() will initialize qp->ibqp
741 * except for qp->ibqp.qp_num.
743 spin_lock_init(&qp
->r_lock
);
744 spin_lock_init(&qp
->s_hlock
);
745 spin_lock_init(&qp
->s_lock
);
746 spin_lock_init(&qp
->r_rq
.lock
);
747 atomic_set(&qp
->refcount
, 0);
748 init_waitqueue_head(&qp
->wait
);
749 init_timer(&qp
->s_timer
);
750 qp
->s_timer
.data
= (unsigned long)qp
;
751 INIT_LIST_HEAD(&qp
->rspwait
);
752 qp
->state
= IB_QPS_RESET
;
754 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
755 qp
->s_avail
= init_attr
->cap
.max_send_wr
;
756 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
757 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
758 qp
->s_flags
= RVT_S_SIGNAL_REQ_WR
;
760 err
= alloc_qpn(rdi
, &rdi
->qp_dev
->qpn_table
,
762 init_attr
->port_num
, gfp
);
767 qp
->ibqp
.qp_num
= err
;
768 qp
->port_num
= init_attr
->port_num
;
769 rvt_reset_qp(rdi
, qp
, init_attr
->qp_type
);
773 /* Don't support raw QPs */
774 return ERR_PTR(-EINVAL
);
777 init_attr
->cap
.max_inline_data
= 0;
780 * Return the address of the RWQ as the offset to mmap.
781 * See rvt_mmap() for details.
783 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
787 err
= ib_copy_to_udata(udata
, &offset
,
794 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
796 qp
->ip
= rvt_create_mmap_info(rdi
, s
,
797 ibpd
->uobject
->context
,
800 ret
= ERR_PTR(-ENOMEM
);
804 err
= ib_copy_to_udata(udata
, &qp
->ip
->offset
,
805 sizeof(qp
->ip
->offset
));
811 qp
->pid
= current
->pid
;
814 spin_lock(&rdi
->n_qps_lock
);
815 if (rdi
->n_qps_allocated
== rdi
->dparms
.props
.max_qp
) {
816 spin_unlock(&rdi
->n_qps_lock
);
817 ret
= ERR_PTR(-ENOMEM
);
821 rdi
->n_qps_allocated
++;
823 * Maintain a busy_jiffies variable that will be added to the timeout
824 * period in mod_retry_timer and add_retry_timer. This busy jiffies
825 * is scaled by the number of rc qps created for the device to reduce
826 * the number of timeouts occurring when there is a large number of
827 * qps. busy_jiffies is incremented every rc qp scaling interval.
828 * The scaling interval is selected based on extensive performance
829 * evaluation of targeted workloads.
831 if (init_attr
->qp_type
== IB_QPT_RC
) {
833 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
835 spin_unlock(&rdi
->n_qps_lock
);
838 spin_lock_irq(&rdi
->pending_lock
);
839 list_add(&qp
->ip
->pending_mmaps
, &rdi
->pending_mmaps
);
840 spin_unlock_irq(&rdi
->pending_lock
);
846 * We have our QP and its good, now keep track of what types of opcodes
847 * can be processed on this QP. We do this by keeping track of what the
848 * 3 high order bits of the opcode are.
850 switch (init_attr
->qp_type
) {
854 qp
->allowed_ops
= IB_OPCODE_UD
;
857 qp
->allowed_ops
= IB_OPCODE_RC
;
860 qp
->allowed_ops
= IB_OPCODE_UC
;
863 ret
= ERR_PTR(-EINVAL
);
870 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
873 free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
879 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
882 kfree(qp
->s_ack_queue
);
892 * rvt_error_qp - put a QP into the error state
893 * @qp: the QP to put into the error state
894 * @err: the receive completion error to signal if a RWQE is active
896 * Flushes both send and receive work queues.
898 * Return: true if last WQE event should be generated.
899 * The QP r_lock and s_lock should be held and interrupts disabled.
900 * If we are already in error state, just return.
902 int rvt_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
906 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
908 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
911 qp
->state
= IB_QPS_ERR
;
913 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
914 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
915 del_timer(&qp
->s_timer
);
918 if (qp
->s_flags
& RVT_S_ANY_WAIT_SEND
)
919 qp
->s_flags
&= ~RVT_S_ANY_WAIT_SEND
;
921 rdi
->driver_f
.notify_error_qp(qp
);
923 /* Schedule the sending tasklet to drain the send work queue. */
924 if (ACCESS_ONCE(qp
->s_last
) != qp
->s_head
)
925 rdi
->driver_f
.schedule_send(qp
);
927 rvt_clear_mr_refs(qp
, 0);
929 memset(&wc
, 0, sizeof(wc
));
931 wc
.opcode
= IB_WC_RECV
;
933 if (test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
)) {
934 wc
.wr_id
= qp
->r_wr_id
;
936 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
938 wc
.status
= IB_WC_WR_FLUSH_ERR
;
945 spin_lock(&qp
->r_rq
.lock
);
947 /* sanity check pointers before trusting them */
950 if (head
>= qp
->r_rq
.size
)
953 if (tail
>= qp
->r_rq
.size
)
955 while (tail
!= head
) {
956 wc
.wr_id
= rvt_get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
957 if (++tail
>= qp
->r_rq
.size
)
959 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
963 spin_unlock(&qp
->r_rq
.lock
);
964 } else if (qp
->ibqp
.event_handler
) {
971 EXPORT_SYMBOL(rvt_error_qp
);
974 * Put the QP into the hash table.
975 * The hash table holds a reference to the QP.
977 static void rvt_insert_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
979 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
982 atomic_inc(&qp
->refcount
);
983 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
985 if (qp
->ibqp
.qp_num
<= 1) {
986 rcu_assign_pointer(rvp
->qp
[qp
->ibqp
.qp_num
], qp
);
988 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
990 qp
->next
= rdi
->qp_dev
->qp_table
[n
];
991 rcu_assign_pointer(rdi
->qp_dev
->qp_table
[n
], qp
);
992 trace_rvt_qpinsert(qp
, n
);
995 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
999 * qib_modify_qp - modify the attributes of a queue pair
1000 * @ibqp: the queue pair who's attributes we're modifying
1001 * @attr: the new attributes
1002 * @attr_mask: the mask of attributes to modify
1003 * @udata: user data for libibverbs.so
1005 * Return: 0 on success, otherwise returns an errno.
1007 int rvt_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1008 int attr_mask
, struct ib_udata
*udata
)
1010 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1011 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1012 enum ib_qp_state cur_state
, new_state
;
1016 int pmtu
= 0; /* for gcc warning only */
1017 enum rdma_link_layer link
;
1019 link
= rdma_port_get_link_layer(ibqp
->device
, qp
->port_num
);
1021 spin_lock_irq(&qp
->r_lock
);
1022 spin_lock(&qp
->s_hlock
);
1023 spin_lock(&qp
->s_lock
);
1025 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1026 attr
->cur_qp_state
: qp
->state
;
1027 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1029 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1033 if (rdi
->driver_f
.check_modify_qp
&&
1034 rdi
->driver_f
.check_modify_qp(qp
, attr
, attr_mask
, udata
))
1037 if (attr_mask
& IB_QP_AV
) {
1038 if (attr
->ah_attr
.dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
))
1040 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
1044 if (attr_mask
& IB_QP_ALT_PATH
) {
1045 if (attr
->alt_ah_attr
.dlid
>=
1046 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1048 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
1050 if (attr
->alt_pkey_index
>= rvt_get_npkeys(rdi
))
1054 if (attr_mask
& IB_QP_PKEY_INDEX
)
1055 if (attr
->pkey_index
>= rvt_get_npkeys(rdi
))
1058 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1059 if (attr
->min_rnr_timer
> 31)
1062 if (attr_mask
& IB_QP_PORT
)
1063 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1064 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1065 attr
->port_num
== 0 ||
1066 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
1069 if (attr_mask
& IB_QP_DEST_QPN
)
1070 if (attr
->dest_qp_num
> RVT_QPN_MASK
)
1073 if (attr_mask
& IB_QP_RETRY_CNT
)
1074 if (attr
->retry_cnt
> 7)
1077 if (attr_mask
& IB_QP_RNR_RETRY
)
1078 if (attr
->rnr_retry
> 7)
1082 * Don't allow invalid path_mtu values. OK to set greater
1083 * than the active mtu (or even the max_cap, if we have tuned
1084 * that to a small mtu. We'll set qp->path_mtu
1085 * to the lesser of requested attribute mtu and active,
1086 * for packetizing messages.
1087 * Note that the QP port has to be set in INIT and MTU in RTR.
1089 if (attr_mask
& IB_QP_PATH_MTU
) {
1090 pmtu
= rdi
->driver_f
.get_pmtu_from_attr(rdi
, qp
, attr
);
1095 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1096 if (attr
->path_mig_state
== IB_MIG_REARM
) {
1097 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1099 if (new_state
!= IB_QPS_RTS
)
1101 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
1102 if (qp
->s_mig_state
== IB_MIG_REARM
)
1104 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
1106 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1113 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1114 if (attr
->max_dest_rd_atomic
> rdi
->dparms
.max_rdma_atomic
)
1117 switch (new_state
) {
1119 if (qp
->state
!= IB_QPS_RESET
)
1120 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1124 /* Allow event to re-trigger if QP set to RTR more than once */
1125 qp
->r_flags
&= ~RVT_R_COMM_EST
;
1126 qp
->state
= new_state
;
1130 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
1131 qp
->state
= new_state
;
1135 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
1137 qp
->state
= new_state
;
1141 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1145 qp
->state
= new_state
;
1149 if (attr_mask
& IB_QP_PKEY_INDEX
)
1150 qp
->s_pkey_index
= attr
->pkey_index
;
1152 if (attr_mask
& IB_QP_PORT
)
1153 qp
->port_num
= attr
->port_num
;
1155 if (attr_mask
& IB_QP_DEST_QPN
)
1156 qp
->remote_qpn
= attr
->dest_qp_num
;
1158 if (attr_mask
& IB_QP_SQ_PSN
) {
1159 qp
->s_next_psn
= attr
->sq_psn
& rdi
->dparms
.psn_modify_mask
;
1160 qp
->s_psn
= qp
->s_next_psn
;
1161 qp
->s_sending_psn
= qp
->s_next_psn
;
1162 qp
->s_last_psn
= qp
->s_next_psn
- 1;
1163 qp
->s_sending_hpsn
= qp
->s_last_psn
;
1166 if (attr_mask
& IB_QP_RQ_PSN
)
1167 qp
->r_psn
= attr
->rq_psn
& rdi
->dparms
.psn_modify_mask
;
1169 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1170 qp
->qp_access_flags
= attr
->qp_access_flags
;
1172 if (attr_mask
& IB_QP_AV
) {
1173 qp
->remote_ah_attr
= attr
->ah_attr
;
1174 qp
->s_srate
= attr
->ah_attr
.static_rate
;
1175 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
1178 if (attr_mask
& IB_QP_ALT_PATH
) {
1179 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
1180 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
1183 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1184 qp
->s_mig_state
= attr
->path_mig_state
;
1186 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1187 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
1188 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1192 if (attr_mask
& IB_QP_PATH_MTU
) {
1193 qp
->pmtu
= rdi
->driver_f
.mtu_from_qp(rdi
, qp
, pmtu
);
1194 qp
->path_mtu
= rdi
->driver_f
.mtu_to_path_mtu(qp
->pmtu
);
1195 qp
->log_pmtu
= ilog2(qp
->pmtu
);
1198 if (attr_mask
& IB_QP_RETRY_CNT
) {
1199 qp
->s_retry_cnt
= attr
->retry_cnt
;
1200 qp
->s_retry
= attr
->retry_cnt
;
1203 if (attr_mask
& IB_QP_RNR_RETRY
) {
1204 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
1205 qp
->s_rnr_retry
= attr
->rnr_retry
;
1208 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1209 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
1211 if (attr_mask
& IB_QP_TIMEOUT
) {
1212 qp
->timeout
= attr
->timeout
;
1213 qp
->timeout_jiffies
=
1214 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1218 if (attr_mask
& IB_QP_QKEY
)
1219 qp
->qkey
= attr
->qkey
;
1221 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1222 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
1224 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
1225 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
1227 if (rdi
->driver_f
.modify_qp
)
1228 rdi
->driver_f
.modify_qp(qp
, attr
, attr_mask
, udata
);
1230 spin_unlock(&qp
->s_lock
);
1231 spin_unlock(&qp
->s_hlock
);
1232 spin_unlock_irq(&qp
->r_lock
);
1234 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1235 rvt_insert_qp(rdi
, qp
);
1238 ev
.device
= qp
->ibqp
.device
;
1239 ev
.element
.qp
= &qp
->ibqp
;
1240 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1241 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1244 ev
.device
= qp
->ibqp
.device
;
1245 ev
.element
.qp
= &qp
->ibqp
;
1246 ev
.event
= IB_EVENT_PATH_MIG
;
1247 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1252 spin_unlock(&qp
->s_lock
);
1253 spin_unlock(&qp
->s_hlock
);
1254 spin_unlock_irq(&qp
->r_lock
);
1258 /** rvt_free_qpn - Free a qpn from the bit map
1260 * @qpn: queue pair number to free
1262 static void rvt_free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
1264 struct rvt_qpn_map
*map
;
1266 map
= qpt
->map
+ qpn
/ RVT_BITS_PER_PAGE
;
1268 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
1272 * rvt_destroy_qp - destroy a queue pair
1273 * @ibqp: the queue pair to destroy
1275 * Note that this can be called while the QP is actively sending or
1278 * Return: 0 on success.
1280 int rvt_destroy_qp(struct ib_qp
*ibqp
)
1282 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1283 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1285 spin_lock_irq(&qp
->r_lock
);
1286 spin_lock(&qp
->s_hlock
);
1287 spin_lock(&qp
->s_lock
);
1288 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1289 spin_unlock(&qp
->s_lock
);
1290 spin_unlock(&qp
->s_hlock
);
1291 spin_unlock_irq(&qp
->r_lock
);
1293 /* qpn is now available for use again */
1294 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1296 spin_lock(&rdi
->n_qps_lock
);
1297 rdi
->n_qps_allocated
--;
1298 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1300 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1302 spin_unlock(&rdi
->n_qps_lock
);
1305 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1309 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1310 kfree(qp
->s_ack_queue
);
1316 * rvt_query_qp - query an ipbq
1317 * @ibqp: IB qp to query
1318 * @attr: attr struct to fill in
1319 * @attr_mask: attr mask ignored
1320 * @init_attr: struct to fill in
1324 int rvt_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1325 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1327 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1328 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1330 attr
->qp_state
= qp
->state
;
1331 attr
->cur_qp_state
= attr
->qp_state
;
1332 attr
->path_mtu
= qp
->path_mtu
;
1333 attr
->path_mig_state
= qp
->s_mig_state
;
1334 attr
->qkey
= qp
->qkey
;
1335 attr
->rq_psn
= qp
->r_psn
& rdi
->dparms
.psn_mask
;
1336 attr
->sq_psn
= qp
->s_next_psn
& rdi
->dparms
.psn_mask
;
1337 attr
->dest_qp_num
= qp
->remote_qpn
;
1338 attr
->qp_access_flags
= qp
->qp_access_flags
;
1339 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
1340 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
1341 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
1342 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
1343 attr
->cap
.max_inline_data
= 0;
1344 attr
->ah_attr
= qp
->remote_ah_attr
;
1345 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
1346 attr
->pkey_index
= qp
->s_pkey_index
;
1347 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
1348 attr
->en_sqd_async_notify
= 0;
1349 attr
->sq_draining
= qp
->s_draining
;
1350 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
1351 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
1352 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
1353 attr
->port_num
= qp
->port_num
;
1354 attr
->timeout
= qp
->timeout
;
1355 attr
->retry_cnt
= qp
->s_retry_cnt
;
1356 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
1357 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
1358 attr
->alt_timeout
= qp
->alt_timeout
;
1360 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
1361 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
1362 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
1363 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
1364 init_attr
->srq
= qp
->ibqp
.srq
;
1365 init_attr
->cap
= attr
->cap
;
1366 if (qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
)
1367 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1369 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1370 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
1371 init_attr
->port_num
= qp
->port_num
;
1376 * rvt_post_receive - post a receive on a QP
1377 * @ibqp: the QP to post the receive on
1378 * @wr: the WR to post
1379 * @bad_wr: the first bad WR is put here
1381 * This may be called from interrupt context.
1383 * Return: 0 on success otherwise errno
1385 int rvt_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1386 struct ib_recv_wr
**bad_wr
)
1388 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1389 struct rvt_rwq
*wq
= qp
->r_rq
.wq
;
1390 unsigned long flags
;
1391 int qp_err_flush
= (ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_RECV
) &&
1394 /* Check that state is OK to post receive. */
1395 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_RECV_OK
) || !wq
) {
1400 for (; wr
; wr
= wr
->next
) {
1401 struct rvt_rwqe
*wqe
;
1405 if ((unsigned)wr
->num_sge
> qp
->r_rq
.max_sge
) {
1410 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
1411 next
= wq
->head
+ 1;
1412 if (next
>= qp
->r_rq
.size
)
1414 if (next
== wq
->tail
) {
1415 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1419 if (unlikely(qp_err_flush
)) {
1422 memset(&wc
, 0, sizeof(wc
));
1424 wc
.opcode
= IB_WC_RECV
;
1425 wc
.wr_id
= wr
->wr_id
;
1426 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1427 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1429 wqe
= rvt_get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
1430 wqe
->wr_id
= wr
->wr_id
;
1431 wqe
->num_sge
= wr
->num_sge
;
1432 for (i
= 0; i
< wr
->num_sge
; i
++)
1433 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
1435 * Make sure queue entry is written
1436 * before the head index.
1441 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1447 * qp_get_savail - return number of avail send entries
1451 * This assumes the s_hlock is held but the s_last
1452 * qp variable is uncontrolled.
1454 static inline u32
qp_get_savail(struct rvt_qp
*qp
)
1459 smp_read_barrier_depends(); /* see rc.c */
1460 slast
= ACCESS_ONCE(qp
->s_last
);
1461 if (qp
->s_head
>= slast
)
1462 ret
= qp
->s_size
- (qp
->s_head
- slast
);
1464 ret
= slast
- qp
->s_head
;
1469 * rvt_post_one_wr - post one RC, UC, or UD send work request
1470 * @qp: the QP to post on
1471 * @wr: the work request to send
1473 static int rvt_post_one_wr(struct rvt_qp
*qp
,
1474 struct ib_send_wr
*wr
,
1477 struct rvt_swqe
*wqe
;
1482 struct rvt_lkey_table
*rkt
;
1484 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1488 /* IB spec says that num_sge == 0 is OK. */
1489 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
1493 * Don't allow RDMA reads or atomic operations on UC or
1494 * undefined operations.
1495 * Make sure buffer is large enough to hold the result for atomics.
1497 if (qp
->ibqp
.qp_type
== IB_QPT_UC
) {
1498 if ((unsigned)wr
->opcode
>= IB_WR_RDMA_READ
)
1500 } else if (qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1501 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1502 if (wr
->opcode
!= IB_WR_SEND
&&
1503 wr
->opcode
!= IB_WR_SEND_WITH_IMM
)
1505 /* Check UD destination address PD */
1506 if (qp
->ibqp
.pd
!= ud_wr(wr
)->ah
->pd
)
1508 } else if ((unsigned)wr
->opcode
> IB_WR_ATOMIC_FETCH_AND_ADD
) {
1510 } else if (wr
->opcode
>= IB_WR_ATOMIC_CMP_AND_SWP
&&
1511 (wr
->num_sge
== 0 ||
1512 wr
->sg_list
[0].length
< sizeof(u64
) ||
1513 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1))) {
1515 } else if (wr
->opcode
>= IB_WR_RDMA_READ
&& !qp
->s_max_rd_atomic
) {
1518 /* check for avail */
1519 if (unlikely(!qp
->s_avail
)) {
1520 qp
->s_avail
= qp_get_savail(qp
);
1521 if (WARN_ON(qp
->s_avail
> (qp
->s_size
- 1)))
1523 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1524 qp
->ibqp
.qp_num
, qp
->s_size
, qp
->s_avail
,
1525 qp
->s_head
, qp
->s_tail
, qp
->s_cur
,
1526 qp
->s_acked
, qp
->s_last
);
1530 next
= qp
->s_head
+ 1;
1531 if (next
>= qp
->s_size
)
1534 rkt
= &rdi
->lkey_table
;
1535 pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
1536 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_head
);
1538 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1539 qp
->ibqp
.qp_type
!= IB_QPT_RC
)
1540 memcpy(&wqe
->ud_wr
, ud_wr(wr
), sizeof(wqe
->ud_wr
));
1541 else if (wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
1542 wr
->opcode
== IB_WR_RDMA_WRITE
||
1543 wr
->opcode
== IB_WR_RDMA_READ
)
1544 memcpy(&wqe
->rdma_wr
, rdma_wr(wr
), sizeof(wqe
->rdma_wr
));
1545 else if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1546 wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
1547 memcpy(&wqe
->atomic_wr
, atomic_wr(wr
), sizeof(wqe
->atomic_wr
));
1549 memcpy(&wqe
->wr
, wr
, sizeof(wqe
->wr
));
1554 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
1555 IB_ACCESS_LOCAL_WRITE
: 0;
1556 for (i
= 0; i
< wr
->num_sge
; i
++) {
1557 u32 length
= wr
->sg_list
[i
].length
;
1562 ok
= rvt_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
],
1563 &wr
->sg_list
[i
], acc
);
1566 goto bail_inval_free
;
1568 wqe
->length
+= length
;
1571 wqe
->wr
.num_sge
= j
;
1574 /* general part of wqe valid - allow for driver checks */
1575 if (rdi
->driver_f
.check_send_wqe
) {
1576 ret
= rdi
->driver_f
.check_send_wqe(qp
, wqe
);
1578 goto bail_inval_free
;
1583 log_pmtu
= qp
->log_pmtu
;
1584 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1585 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1586 struct rvt_ah
*ah
= ibah_to_rvtah(wqe
->ud_wr
.ah
);
1588 log_pmtu
= ah
->log_pmtu
;
1589 atomic_inc(&ibah_to_rvtah(ud_wr(wr
)->ah
)->refcount
);
1592 wqe
->ssn
= qp
->s_ssn
++;
1593 wqe
->psn
= qp
->s_next_psn
;
1594 wqe
->lpsn
= wqe
->psn
+
1595 (wqe
->length
? ((wqe
->length
- 1) >> log_pmtu
) : 0);
1596 qp
->s_next_psn
= wqe
->lpsn
+ 1;
1597 trace_rvt_post_one_wr(qp
, wqe
);
1598 smp_wmb(); /* see request builders */
1605 /* release mr holds */
1607 struct rvt_sge
*sge
= &wqe
->sg_list
[--j
];
1609 rvt_put_mr(sge
->mr
);
1615 * rvt_post_send - post a send on a QP
1616 * @ibqp: the QP to post the send on
1617 * @wr: the list of work requests to post
1618 * @bad_wr: the first bad WR is put here
1620 * This may be called from interrupt context.
1622 * Return: 0 on success else errno
1624 int rvt_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1625 struct ib_send_wr
**bad_wr
)
1627 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1628 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1629 unsigned long flags
= 0;
1634 spin_lock_irqsave(&qp
->s_hlock
, flags
);
1637 * Ensure QP state is such that we can send. If not bail out early,
1638 * there is no need to do this every time we post a send.
1640 if (unlikely(!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))) {
1641 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
1646 * If the send queue is empty, and we only have a single WR then just go
1647 * ahead and kick the send engine into gear. Otherwise we will always
1648 * just schedule the send to happen later.
1650 call_send
= qp
->s_head
== ACCESS_ONCE(qp
->s_last
) && !wr
->next
;
1652 for (; wr
; wr
= wr
->next
) {
1653 err
= rvt_post_one_wr(qp
, wr
, &call_send
);
1654 if (unlikely(err
)) {
1661 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
1664 rdi
->driver_f
.do_send(qp
);
1666 rdi
->driver_f
.schedule_send_no_lock(qp
);
1672 * rvt_post_srq_receive - post a receive on a shared receive queue
1673 * @ibsrq: the SRQ to post the receive on
1674 * @wr: the list of work requests to post
1675 * @bad_wr: A pointer to the first WR to cause a problem is put here
1677 * This may be called from interrupt context.
1679 * Return: 0 on success else errno
1681 int rvt_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
1682 struct ib_recv_wr
**bad_wr
)
1684 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
1686 unsigned long flags
;
1688 for (; wr
; wr
= wr
->next
) {
1689 struct rvt_rwqe
*wqe
;
1693 if ((unsigned)wr
->num_sge
> srq
->rq
.max_sge
) {
1698 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
1700 next
= wq
->head
+ 1;
1701 if (next
>= srq
->rq
.size
)
1703 if (next
== wq
->tail
) {
1704 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
1709 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, wq
->head
);
1710 wqe
->wr_id
= wr
->wr_id
;
1711 wqe
->num_sge
= wr
->num_sge
;
1712 for (i
= 0; i
< wr
->num_sge
; i
++)
1713 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
1714 /* Make sure queue entry is written before the head index. */
1717 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);