2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 static struct kmem_cache
*qp_cache
;
56 * attributes not supported by query qp
58 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
59 IB_QP_MAX_QP_RD_ATOMIC | \
60 IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
64 * ehca (internal) qp state values
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 enum ib_qp_statetrans
{
91 IB_QPST_MAX
/* nr of transitions, this must be last!!! */
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
98 static inline enum ehca_qp_state
ib2ehca_qp_state(enum ib_qp_state ib_qp_state
)
100 switch (ib_qp_state
) {
102 return EHCA_QPS_RESET
;
104 return EHCA_QPS_INIT
;
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state
);
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
125 static inline enum ib_qp_state
ehca2ib_qp_state(enum ehca_qp_state
128 switch (ehca_qp_state
) {
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state
);
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
165 static inline enum ehca_qp_type
ib2ehcaqptype(enum ib_qp_type ibqptype
)
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype
);
183 static inline enum ib_qp_statetrans
get_modqp_statetrans(int ib_fromstate
,
187 switch (ib_tostate
) {
189 index
= IB_QPST_ANY2RESET
;
192 switch (ib_fromstate
) {
194 index
= IB_QPST_RESET2INIT
;
197 index
= IB_QPST_INIT2INIT
;
202 if (ib_fromstate
== IB_QPS_INIT
)
203 index
= IB_QPST_INIT2RTR
;
206 switch (ib_fromstate
) {
208 index
= IB_QPST_RTR2RTS
;
211 index
= IB_QPST_RTS2RTS
;
214 index
= IB_QPST_SQD2RTS
;
217 index
= IB_QPST_SQE2RTS
;
222 if (ib_fromstate
== IB_QPS_RTS
)
223 index
= IB_QPST_RTS2SQD
;
228 index
= IB_QPST_ANY2ERR
;
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
240 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype
)
252 case IB_QPT_RAW_IPV6
:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype
);
263 * init userspace queue info from ipz_queue data
265 static inline void queue2resp(struct ipzu_queue_resp
*resp
,
266 struct ipz_queue
*queue
)
268 resp
->qe_size
= queue
->qe_size
;
269 resp
->act_nr_of_sg
= queue
->act_nr_of_sg
;
270 resp
->queue_length
= queue
->queue_length
;
271 resp
->pagesize
= queue
->pagesize
;
272 resp
->toggle_state
= queue
->toggle_state
;
273 resp
->offset
= queue
->offset
;
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
279 static inline int init_qp_queue(struct ehca_shca
*shca
,
281 struct ehca_qp
*my_qp
,
282 struct ipz_queue
*queue
,
285 struct ehca_alloc_queue_parms
*parms
,
288 int ret
, cnt
, ipz_rc
, nr_q_pages
;
291 struct ib_device
*ib_dev
= &shca
->ib_device
;
292 struct ipz_adapter_handle ipz_hca_handle
= shca
->ipz_hca_handle
;
294 if (!parms
->queue_size
)
297 if (parms
->is_small
) {
299 ipz_rc
= ipz_queue_ctor(pd
, queue
, nr_q_pages
,
300 128 << parms
->page_size
,
301 wqe_size
, parms
->act_nr_sges
, 1);
303 nr_q_pages
= parms
->queue_size
;
304 ipz_rc
= ipz_queue_ctor(pd
, queue
, nr_q_pages
,
305 EHCA_PAGESIZE
, wqe_size
,
306 parms
->act_nr_sges
, 0);
310 ehca_err(ib_dev
, "Cannot allocate page for queue. ipz_rc=%i",
315 /* register queue pages */
316 for (cnt
= 0; cnt
< nr_q_pages
; cnt
++) {
317 vpage
= ipz_qpageit_get_inc(queue
);
319 ehca_err(ib_dev
, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage
);
324 rpage
= virt_to_abs(vpage
);
326 h_ret
= hipz_h_register_rpage_qp(ipz_hca_handle
,
327 my_qp
->ipz_qp_handle
,
329 rpage
, parms
->is_small
? 0 : 1,
330 my_qp
->galpas
.kernel
);
331 if (cnt
== (nr_q_pages
- 1)) { /* last page! */
332 if (h_ret
!= expected_hret
) {
333 ehca_err(ib_dev
, "hipz_qp_register_rpage() "
335 ret
= ehca2ib_return_code(h_ret
);
338 vpage
= ipz_qpageit_get_inc(&my_qp
->ipz_rqueue
);
340 ehca_err(ib_dev
, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage
);
346 if (h_ret
!= H_PAGE_REGISTERED
) {
347 ehca_err(ib_dev
, "hipz_qp_register_rpage() "
349 ret
= ehca2ib_return_code(h_ret
);
355 ipz_qeit_reset(queue
);
360 ipz_queue_dtor(pd
, queue
);
364 static inline int ehca_calc_wqe_size(int act_nr_sge
, int is_llqp
)
367 return 128 << act_nr_sge
;
369 return offsetof(struct ehca_wqe
,
370 u
.nud
.sg_list
[act_nr_sge
]);
373 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms
*queue
,
374 int req_nr_sge
, int is_llqp
)
376 u32 wqe_size
, q_size
;
377 int act_nr_sge
= req_nr_sge
;
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge
= 4; act_nr_sge
<= 252;
382 act_nr_sge
= 4 + 2 * act_nr_sge
)
383 if (act_nr_sge
>= req_nr_sge
)
386 wqe_size
= ehca_calc_wqe_size(act_nr_sge
, is_llqp
);
387 q_size
= wqe_size
* (queue
->max_wr
+ 1);
390 queue
->page_size
= 2;
391 else if (q_size
<= 1024)
392 queue
->page_size
= 3;
394 queue
->page_size
= 0;
396 queue
->is_small
= (queue
->page_size
!= 0);
399 /* needs to be called with cq->spinlock held */
400 void ehca_add_to_err_list(struct ehca_qp
*qp
, int on_sq
)
402 struct list_head
*list
, *node
;
404 /* TODO: support low latency QPs */
405 if (qp
->ext_type
== EQPT_LLQP
)
409 list
= &qp
->send_cq
->sqp_err_list
;
410 node
= &qp
->sq_err_node
;
412 list
= &qp
->recv_cq
->rqp_err_list
;
413 node
= &qp
->rq_err_node
;
416 if (list_empty(node
))
417 list_add_tail(node
, list
);
422 static void del_from_err_list(struct ehca_cq
*cq
, struct list_head
*node
)
426 spin_lock_irqsave(&cq
->spinlock
, flags
);
428 if (!list_empty(node
))
431 spin_unlock_irqrestore(&cq
->spinlock
, flags
);
434 static void reset_queue_map(struct ehca_queue_map
*qmap
)
439 for (i
= 0; i
< qmap
->entries
; i
++)
440 qmap
->map
[i
].reported
= 1;
444 * Create an ib_qp struct that is either a QP or an SRQ, depending on
445 * the value of the is_srq parameter. If init_attr and srq_init_attr share
446 * fields, the field out of init_attr is used.
448 static struct ehca_qp
*internal_create_qp(
450 struct ib_qp_init_attr
*init_attr
,
451 struct ib_srq_init_attr
*srq_init_attr
,
452 struct ib_udata
*udata
, int is_srq
)
454 struct ehca_qp
*my_qp
, *my_srq
= NULL
;
455 struct ehca_pd
*my_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
456 struct ehca_shca
*shca
= container_of(pd
->device
, struct ehca_shca
,
458 struct ib_ucontext
*context
= NULL
;
460 int is_llqp
= 0, has_srq
= 0;
461 int qp_type
, max_send_sge
, max_recv_sge
, ret
;
463 /* h_call's out parameters */
464 struct ehca_alloc_qp_parms parms
;
465 u32 swqe_size
= 0, rwqe_size
= 0, ib_qp_num
;
468 if (!atomic_add_unless(&shca
->num_qps
, 1, shca
->max_num_qps
)) {
469 ehca_err(pd
->device
, "Unable to create QP, max number of %i "
470 "QPs reached.", shca
->max_num_qps
);
471 ehca_err(pd
->device
, "To increase the maximum number of QPs "
472 "use the number_of_qps module parameter.\n");
473 return ERR_PTR(-ENOSPC
);
476 if (init_attr
->create_flags
) {
477 atomic_dec(&shca
->num_qps
);
478 return ERR_PTR(-EINVAL
);
481 memset(&parms
, 0, sizeof(parms
));
482 qp_type
= init_attr
->qp_type
;
484 if (init_attr
->sq_sig_type
!= IB_SIGNAL_REQ_WR
&&
485 init_attr
->sq_sig_type
!= IB_SIGNAL_ALL_WR
) {
486 ehca_err(pd
->device
, "init_attr->sg_sig_type=%x not allowed",
487 init_attr
->sq_sig_type
);
488 atomic_dec(&shca
->num_qps
);
489 return ERR_PTR(-EINVAL
);
493 if (qp_type
& 0x80) {
495 parms
.ext_type
= EQPT_LLQP
;
496 parms
.ll_comp_flags
= qp_type
& LLQP_COMP_MASK
;
499 init_attr
->qp_type
&= 0x1F;
501 /* handle SRQ base QPs */
502 if (init_attr
->srq
) {
503 my_srq
= container_of(init_attr
->srq
, struct ehca_qp
, ib_srq
);
505 if (qp_type
== IB_QPT_UC
) {
506 ehca_err(pd
->device
, "UC with SRQ not supported");
507 atomic_dec(&shca
->num_qps
);
508 return ERR_PTR(-EINVAL
);
512 parms
.ext_type
= EQPT_SRQBASE
;
513 parms
.srq_qpn
= my_srq
->real_qp_num
;
516 if (is_llqp
&& has_srq
) {
517 ehca_err(pd
->device
, "LLQPs can't have an SRQ");
518 atomic_dec(&shca
->num_qps
);
519 return ERR_PTR(-EINVAL
);
524 parms
.ext_type
= EQPT_SRQ
;
525 parms
.srq_limit
= srq_init_attr
->attr
.srq_limit
;
526 if (init_attr
->cap
.max_recv_sge
> 3) {
527 ehca_err(pd
->device
, "no more than three SGEs "
528 "supported for SRQ pd=%p max_sge=%x",
529 pd
, init_attr
->cap
.max_recv_sge
);
530 atomic_dec(&shca
->num_qps
);
531 return ERR_PTR(-EINVAL
);
536 if (qp_type
!= IB_QPT_UD
&&
537 qp_type
!= IB_QPT_UC
&&
538 qp_type
!= IB_QPT_RC
&&
539 qp_type
!= IB_QPT_SMI
&&
540 qp_type
!= IB_QPT_GSI
) {
541 ehca_err(pd
->device
, "wrong QP Type=%x", qp_type
);
542 atomic_dec(&shca
->num_qps
);
543 return ERR_PTR(-EINVAL
);
549 if ((init_attr
->cap
.max_send_wr
> 255) ||
550 (init_attr
->cap
.max_recv_wr
> 255)) {
552 "Invalid Number of max_sq_wr=%x "
553 "or max_rq_wr=%x for RC LLQP",
554 init_attr
->cap
.max_send_wr
,
555 init_attr
->cap
.max_recv_wr
);
556 atomic_dec(&shca
->num_qps
);
557 return ERR_PTR(-EINVAL
);
561 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP
, shca
->hca_cap
)) {
562 ehca_err(pd
->device
, "UD LLQP not supported "
564 atomic_dec(&shca
->num_qps
);
565 return ERR_PTR(-ENOSYS
);
567 if (!(init_attr
->cap
.max_send_sge
<= 5
568 && init_attr
->cap
.max_send_sge
>= 1
569 && init_attr
->cap
.max_recv_sge
<= 5
570 && init_attr
->cap
.max_recv_sge
>= 1)) {
572 "Invalid Number of max_send_sge=%x "
573 "or max_recv_sge=%x for UD LLQP",
574 init_attr
->cap
.max_send_sge
,
575 init_attr
->cap
.max_recv_sge
);
576 atomic_dec(&shca
->num_qps
);
577 return ERR_PTR(-EINVAL
);
578 } else if (init_attr
->cap
.max_send_wr
> 255) {
581 "max_send_wr=%x for UD QP_TYPE=%x",
582 init_attr
->cap
.max_send_wr
, qp_type
);
583 atomic_dec(&shca
->num_qps
);
584 return ERR_PTR(-EINVAL
);
588 ehca_err(pd
->device
, "unsupported LL QP Type=%x",
590 atomic_dec(&shca
->num_qps
);
591 return ERR_PTR(-EINVAL
);
594 int max_sge
= (qp_type
== IB_QPT_UD
|| qp_type
== IB_QPT_SMI
595 || qp_type
== IB_QPT_GSI
) ? 250 : 252;
597 if (init_attr
->cap
.max_send_sge
> max_sge
598 || init_attr
->cap
.max_recv_sge
> max_sge
) {
599 ehca_err(pd
->device
, "Invalid number of SGEs requested "
600 "send_sge=%x recv_sge=%x max_sge=%x",
601 init_attr
->cap
.max_send_sge
,
602 init_attr
->cap
.max_recv_sge
, max_sge
);
603 atomic_dec(&shca
->num_qps
);
604 return ERR_PTR(-EINVAL
);
608 if (pd
->uobject
&& udata
)
609 context
= pd
->uobject
->context
;
611 my_qp
= kmem_cache_zalloc(qp_cache
, GFP_KERNEL
);
613 ehca_err(pd
->device
, "pd=%p not enough memory to alloc qp", pd
);
614 atomic_dec(&shca
->num_qps
);
615 return ERR_PTR(-ENOMEM
);
618 atomic_set(&my_qp
->nr_events
, 0);
619 init_waitqueue_head(&my_qp
->wait_completion
);
620 spin_lock_init(&my_qp
->spinlock_s
);
621 spin_lock_init(&my_qp
->spinlock_r
);
622 my_qp
->qp_type
= qp_type
;
623 my_qp
->ext_type
= parms
.ext_type
;
624 my_qp
->state
= IB_QPS_RESET
;
626 if (init_attr
->recv_cq
)
628 container_of(init_attr
->recv_cq
, struct ehca_cq
, ib_cq
);
629 if (init_attr
->send_cq
)
631 container_of(init_attr
->send_cq
, struct ehca_cq
, ib_cq
);
634 if (!idr_pre_get(&ehca_qp_idr
, GFP_KERNEL
)) {
636 ehca_err(pd
->device
, "Can't reserve idr resources.");
637 goto create_qp_exit0
;
640 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
641 ret
= idr_get_new(&ehca_qp_idr
, my_qp
, &my_qp
->token
);
642 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
643 } while (ret
== -EAGAIN
);
647 ehca_err(pd
->device
, "Can't allocate new idr entry.");
648 goto create_qp_exit0
;
651 if (my_qp
->token
> 0x1FFFFFF) {
653 ehca_err(pd
->device
, "Invalid number of qp");
654 goto create_qp_exit1
;
658 parms
.srq_token
= my_qp
->token
;
660 parms
.servicetype
= ibqptype2servicetype(qp_type
);
661 if (parms
.servicetype
< 0) {
663 ehca_err(pd
->device
, "Invalid qp_type=%x", qp_type
);
664 goto create_qp_exit1
;
667 /* Always signal by WQE so we can hide circ. WQEs */
668 parms
.sigtype
= HCALL_SIGT_BY_WQE
;
670 /* UD_AV CIRCUMVENTION */
671 max_send_sge
= init_attr
->cap
.max_send_sge
;
672 max_recv_sge
= init_attr
->cap
.max_recv_sge
;
673 if (parms
.servicetype
== ST_UD
&& !is_llqp
) {
678 parms
.token
= my_qp
->token
;
679 parms
.eq_handle
= shca
->eq
.ipz_eq_handle
;
680 parms
.pd
= my_pd
->fw_pd
;
682 parms
.send_cq_handle
= my_qp
->send_cq
->ipz_cq_handle
;
684 parms
.recv_cq_handle
= my_qp
->recv_cq
->ipz_cq_handle
;
686 parms
.squeue
.max_wr
= init_attr
->cap
.max_send_wr
;
687 parms
.rqueue
.max_wr
= init_attr
->cap
.max_recv_wr
;
688 parms
.squeue
.max_sge
= max_send_sge
;
689 parms
.rqueue
.max_sge
= max_recv_sge
;
691 /* RC QPs need one more SWQE for unsolicited ack circumvention */
692 if (qp_type
== IB_QPT_RC
)
693 parms
.squeue
.max_wr
++;
695 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP
, shca
->hca_cap
)) {
697 ehca_determine_small_queue(
698 &parms
.squeue
, max_send_sge
, is_llqp
);
700 ehca_determine_small_queue(
701 &parms
.rqueue
, max_recv_sge
, is_llqp
);
703 (parms
.squeue
.is_small
|| parms
.rqueue
.is_small
);
706 h_ret
= hipz_h_alloc_resource_qp(shca
->ipz_hca_handle
, &parms
);
707 if (h_ret
!= H_SUCCESS
) {
708 ehca_err(pd
->device
, "h_alloc_resource_qp() failed h_ret=%li",
710 ret
= ehca2ib_return_code(h_ret
);
711 goto create_qp_exit1
;
714 ib_qp_num
= my_qp
->real_qp_num
= parms
.real_qp_num
;
715 my_qp
->ipz_qp_handle
= parms
.qp_handle
;
716 my_qp
->galpas
= parms
.galpas
;
718 swqe_size
= ehca_calc_wqe_size(parms
.squeue
.act_nr_sges
, is_llqp
);
719 rwqe_size
= ehca_calc_wqe_size(parms
.rqueue
.act_nr_sges
, is_llqp
);
724 parms
.squeue
.act_nr_sges
= 1;
725 parms
.rqueue
.act_nr_sges
= 1;
727 /* hide the extra WQE */
728 parms
.squeue
.act_nr_wqes
--;
733 /* UD circumvention */
735 parms
.squeue
.act_nr_sges
= 1;
736 parms
.rqueue
.act_nr_sges
= 1;
738 parms
.squeue
.act_nr_sges
-= 2;
739 parms
.rqueue
.act_nr_sges
-= 2;
742 if (IB_QPT_GSI
== qp_type
|| IB_QPT_SMI
== qp_type
) {
743 parms
.squeue
.act_nr_wqes
= init_attr
->cap
.max_send_wr
;
744 parms
.rqueue
.act_nr_wqes
= init_attr
->cap
.max_recv_wr
;
745 parms
.squeue
.act_nr_sges
= init_attr
->cap
.max_send_sge
;
746 parms
.rqueue
.act_nr_sges
= init_attr
->cap
.max_recv_sge
;
747 ib_qp_num
= (qp_type
== IB_QPT_SMI
) ? 0 : 1;
756 /* initialize r/squeue and register queue pages */
759 shca
, my_pd
, my_qp
, &my_qp
->ipz_squeue
, 0,
760 HAS_RQ(my_qp
) ? H_PAGE_REGISTERED
: H_SUCCESS
,
761 &parms
.squeue
, swqe_size
);
763 ehca_err(pd
->device
, "Couldn't initialize squeue "
764 "and pages ret=%i", ret
);
765 goto create_qp_exit2
;
768 my_qp
->sq_map
.entries
= my_qp
->ipz_squeue
.queue_length
/
769 my_qp
->ipz_squeue
.qe_size
;
770 my_qp
->sq_map
.map
= vmalloc(my_qp
->sq_map
.entries
*
771 sizeof(struct ehca_qmap_entry
));
772 if (!my_qp
->sq_map
.map
) {
773 ehca_err(pd
->device
, "Couldn't allocate squeue "
775 goto create_qp_exit3
;
777 INIT_LIST_HEAD(&my_qp
->sq_err_node
);
778 /* to avoid the generation of bogus flush CQEs */
779 reset_queue_map(&my_qp
->sq_map
);
784 shca
, my_pd
, my_qp
, &my_qp
->ipz_rqueue
, 1,
785 H_SUCCESS
, &parms
.rqueue
, rwqe_size
);
787 ehca_err(pd
->device
, "Couldn't initialize rqueue "
788 "and pages ret=%i", ret
);
789 goto create_qp_exit4
;
792 my_qp
->rq_map
.entries
= my_qp
->ipz_rqueue
.queue_length
/
793 my_qp
->ipz_rqueue
.qe_size
;
794 my_qp
->rq_map
.map
= vmalloc(my_qp
->rq_map
.entries
*
795 sizeof(struct ehca_qmap_entry
));
796 if (!my_qp
->rq_map
.map
) {
797 ehca_err(pd
->device
, "Couldn't allocate squeue "
799 goto create_qp_exit5
;
801 INIT_LIST_HEAD(&my_qp
->rq_err_node
);
802 /* to avoid the generation of bogus flush CQEs */
803 reset_queue_map(&my_qp
->rq_map
);
804 } else if (init_attr
->srq
) {
805 /* this is a base QP, use the queue map of the SRQ */
806 my_qp
->rq_map
= my_srq
->rq_map
;
807 INIT_LIST_HEAD(&my_qp
->rq_err_node
);
809 my_qp
->ipz_rqueue
= my_srq
->ipz_rqueue
;
813 my_qp
->ib_srq
.pd
= &my_pd
->ib_pd
;
814 my_qp
->ib_srq
.device
= my_pd
->ib_pd
.device
;
816 my_qp
->ib_srq
.srq_context
= init_attr
->qp_context
;
817 my_qp
->ib_srq
.event_handler
= init_attr
->event_handler
;
819 my_qp
->ib_qp
.qp_num
= ib_qp_num
;
820 my_qp
->ib_qp
.pd
= &my_pd
->ib_pd
;
821 my_qp
->ib_qp
.device
= my_pd
->ib_pd
.device
;
823 my_qp
->ib_qp
.recv_cq
= init_attr
->recv_cq
;
824 my_qp
->ib_qp
.send_cq
= init_attr
->send_cq
;
826 my_qp
->ib_qp
.qp_type
= qp_type
;
827 my_qp
->ib_qp
.srq
= init_attr
->srq
;
829 my_qp
->ib_qp
.qp_context
= init_attr
->qp_context
;
830 my_qp
->ib_qp
.event_handler
= init_attr
->event_handler
;
833 init_attr
->cap
.max_inline_data
= 0; /* not supported yet */
834 init_attr
->cap
.max_recv_sge
= parms
.rqueue
.act_nr_sges
;
835 init_attr
->cap
.max_recv_wr
= parms
.rqueue
.act_nr_wqes
;
836 init_attr
->cap
.max_send_sge
= parms
.squeue
.act_nr_sges
;
837 init_attr
->cap
.max_send_wr
= parms
.squeue
.act_nr_wqes
;
838 my_qp
->init_attr
= *init_attr
;
840 if (qp_type
== IB_QPT_SMI
|| qp_type
== IB_QPT_GSI
) {
841 shca
->sport
[init_attr
->port_num
- 1].ibqp_sqp
[qp_type
] =
843 if (ehca_nr_ports
< 0) {
844 /* alloc array to cache subsequent modify qp parms
845 * for autodetect mode
848 kzalloc(EHCA_MOD_QP_PARM_MAX
*
849 sizeof(*my_qp
->mod_qp_parm
),
851 if (!my_qp
->mod_qp_parm
) {
853 "Could not alloc mod_qp_parm");
854 goto create_qp_exit5
;
859 /* NOTE: define_apq0() not supported yet */
860 if (qp_type
== IB_QPT_GSI
) {
861 h_ret
= ehca_define_sqp(shca
, my_qp
, init_attr
);
862 if (h_ret
!= H_SUCCESS
) {
863 ret
= ehca2ib_return_code(h_ret
);
864 goto create_qp_exit6
;
868 if (my_qp
->send_cq
) {
869 ret
= ehca_cq_assign_qp(my_qp
->send_cq
, my_qp
);
872 "Couldn't assign qp to send_cq ret=%i", ret
);
873 goto create_qp_exit7
;
877 /* copy queues, galpa data to user space */
878 if (context
&& udata
) {
879 struct ehca_create_qp_resp resp
;
880 memset(&resp
, 0, sizeof(resp
));
882 resp
.qp_num
= my_qp
->real_qp_num
;
883 resp
.token
= my_qp
->token
;
884 resp
.qp_type
= my_qp
->qp_type
;
885 resp
.ext_type
= my_qp
->ext_type
;
886 resp
.qkey
= my_qp
->qkey
;
887 resp
.real_qp_num
= my_qp
->real_qp_num
;
890 queue2resp(&resp
.ipz_squeue
, &my_qp
->ipz_squeue
);
892 queue2resp(&resp
.ipz_rqueue
, &my_qp
->ipz_rqueue
);
893 resp
.fw_handle_ofs
= (u32
)
894 (my_qp
->galpas
.user
.fw_handle
& (PAGE_SIZE
- 1));
896 if (ib_copy_to_udata(udata
, &resp
, sizeof resp
)) {
897 ehca_err(pd
->device
, "Copy to udata failed");
899 goto create_qp_exit8
;
906 ehca_cq_unassign_qp(my_qp
->send_cq
, my_qp
->real_qp_num
);
909 kfree(my_qp
->mod_qp_parm
);
913 vfree(my_qp
->rq_map
.map
);
917 ipz_queue_dtor(my_pd
, &my_qp
->ipz_rqueue
);
921 vfree(my_qp
->sq_map
.map
);
925 ipz_queue_dtor(my_pd
, &my_qp
->ipz_squeue
);
928 hipz_h_destroy_qp(shca
->ipz_hca_handle
, my_qp
);
931 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
932 idr_remove(&ehca_qp_idr
, my_qp
->token
);
933 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
936 kmem_cache_free(qp_cache
, my_qp
);
937 atomic_dec(&shca
->num_qps
);
941 struct ib_qp
*ehca_create_qp(struct ib_pd
*pd
,
942 struct ib_qp_init_attr
*qp_init_attr
,
943 struct ib_udata
*udata
)
947 ret
= internal_create_qp(pd
, qp_init_attr
, NULL
, udata
, 0);
948 return IS_ERR(ret
) ? (struct ib_qp
*)ret
: &ret
->ib_qp
;
951 static int internal_destroy_qp(struct ib_device
*dev
, struct ehca_qp
*my_qp
,
952 struct ib_uobject
*uobject
);
954 struct ib_srq
*ehca_create_srq(struct ib_pd
*pd
,
955 struct ib_srq_init_attr
*srq_init_attr
,
956 struct ib_udata
*udata
)
958 struct ib_qp_init_attr qp_init_attr
;
959 struct ehca_qp
*my_qp
;
961 struct ehca_shca
*shca
= container_of(pd
->device
, struct ehca_shca
,
963 struct hcp_modify_qp_control_block
*mqpcb
;
964 u64 hret
, update_mask
;
966 /* For common attributes, internal_create_qp() takes its info
967 * out of qp_init_attr, so copy all common attrs there.
969 memset(&qp_init_attr
, 0, sizeof(qp_init_attr
));
970 qp_init_attr
.event_handler
= srq_init_attr
->event_handler
;
971 qp_init_attr
.qp_context
= srq_init_attr
->srq_context
;
972 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
973 qp_init_attr
.qp_type
= IB_QPT_RC
;
974 qp_init_attr
.cap
.max_recv_wr
= srq_init_attr
->attr
.max_wr
;
975 qp_init_attr
.cap
.max_recv_sge
= srq_init_attr
->attr
.max_sge
;
977 my_qp
= internal_create_qp(pd
, &qp_init_attr
, srq_init_attr
, udata
, 1);
979 return (struct ib_srq
*)my_qp
;
981 /* copy back return values */
982 srq_init_attr
->attr
.max_wr
= qp_init_attr
.cap
.max_recv_wr
;
983 srq_init_attr
->attr
.max_sge
= 3;
985 /* drive SRQ into RTR state */
986 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
988 ehca_err(pd
->device
, "Could not get zeroed page for mqpcb "
989 "ehca_qp=%p qp_num=%x ", my_qp
, my_qp
->real_qp_num
);
990 ret
= ERR_PTR(-ENOMEM
);
994 mqpcb
->qp_state
= EHCA_QPS_INIT
;
995 mqpcb
->prim_phys_port
= 1;
996 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
997 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
998 my_qp
->ipz_qp_handle
,
1001 mqpcb
, my_qp
->galpas
.kernel
);
1002 if (hret
!= H_SUCCESS
) {
1003 ehca_err(pd
->device
, "Could not modify SRQ to INIT "
1004 "ehca_qp=%p qp_num=%x h_ret=%li",
1005 my_qp
, my_qp
->real_qp_num
, hret
);
1009 mqpcb
->qp_enable
= 1;
1010 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE
, 1);
1011 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1012 my_qp
->ipz_qp_handle
,
1015 mqpcb
, my_qp
->galpas
.kernel
);
1016 if (hret
!= H_SUCCESS
) {
1017 ehca_err(pd
->device
, "Could not enable SRQ "
1018 "ehca_qp=%p qp_num=%x h_ret=%li",
1019 my_qp
, my_qp
->real_qp_num
, hret
);
1023 mqpcb
->qp_state
= EHCA_QPS_RTR
;
1024 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1025 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1026 my_qp
->ipz_qp_handle
,
1029 mqpcb
, my_qp
->galpas
.kernel
);
1030 if (hret
!= H_SUCCESS
) {
1031 ehca_err(pd
->device
, "Could not modify SRQ to RTR "
1032 "ehca_qp=%p qp_num=%x h_ret=%li",
1033 my_qp
, my_qp
->real_qp_num
, hret
);
1037 ehca_free_fw_ctrlblock(mqpcb
);
1039 return &my_qp
->ib_srq
;
1042 ret
= ERR_PTR(ehca2ib_return_code(hret
));
1043 ehca_free_fw_ctrlblock(mqpcb
);
1046 internal_destroy_qp(pd
->device
, my_qp
, my_qp
->ib_srq
.uobject
);
1052 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1053 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1054 * returns total number of bad wqes in bad_wqe_cnt
1056 static int prepare_sqe_rts(struct ehca_qp
*my_qp
, struct ehca_shca
*shca
,
1060 struct ipz_queue
*squeue
;
1061 void *bad_send_wqe_p
, *bad_send_wqe_v
;
1063 struct ehca_wqe
*wqe
;
1064 int qp_num
= my_qp
->ib_qp
.qp_num
;
1066 /* get send wqe pointer */
1067 h_ret
= hipz_h_disable_and_get_wqe(shca
->ipz_hca_handle
,
1068 my_qp
->ipz_qp_handle
, &my_qp
->pf
,
1069 &bad_send_wqe_p
, NULL
, 2);
1070 if (h_ret
!= H_SUCCESS
) {
1071 ehca_err(&shca
->ib_device
, "hipz_h_disable_and_get_wqe() failed"
1072 " ehca_qp=%p qp_num=%x h_ret=%li",
1073 my_qp
, qp_num
, h_ret
);
1074 return ehca2ib_return_code(h_ret
);
1076 bad_send_wqe_p
= (void *)((u64
)bad_send_wqe_p
& (~(1L << 63)));
1077 ehca_dbg(&shca
->ib_device
, "qp_num=%x bad_send_wqe_p=%p",
1078 qp_num
, bad_send_wqe_p
);
1079 /* convert wqe pointer to vadr */
1080 bad_send_wqe_v
= abs_to_virt((u64
)bad_send_wqe_p
);
1081 if (ehca_debug_level
>= 2)
1082 ehca_dmp(bad_send_wqe_v
, 32, "qp_num=%x bad_wqe", qp_num
);
1083 squeue
= &my_qp
->ipz_squeue
;
1084 if (ipz_queue_abs_to_offset(squeue
, (u64
)bad_send_wqe_p
, &q_ofs
)) {
1085 ehca_err(&shca
->ib_device
, "failed to get wqe offset qp_num=%x"
1086 " bad_send_wqe_p=%p", qp_num
, bad_send_wqe_p
);
1090 /* loop sets wqe's purge bit */
1091 wqe
= (struct ehca_wqe
*)ipz_qeit_calc(squeue
, q_ofs
);
1093 while (wqe
->optype
!= 0xff && wqe
->wqef
!= 0xff) {
1094 if (ehca_debug_level
>= 2)
1095 ehca_dmp(wqe
, 32, "qp_num=%x wqe", qp_num
);
1096 wqe
->nr_of_data_seg
= 0; /* suppress data access */
1097 wqe
->wqef
= WQEF_PURGE
; /* WQE to be purged */
1098 q_ofs
= ipz_queue_advance_offset(squeue
, q_ofs
);
1099 wqe
= (struct ehca_wqe
*)ipz_qeit_calc(squeue
, q_ofs
);
1100 *bad_wqe_cnt
= (*bad_wqe_cnt
)+1;
1103 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1104 * i.e. nr of wqes with flush error status is one less
1106 ehca_dbg(&shca
->ib_device
, "qp_num=%x flusherr_wqe_cnt=%x",
1107 qp_num
, (*bad_wqe_cnt
)-1);
1113 static int calc_left_cqes(u64 wqe_p
, struct ipz_queue
*ipz_queue
,
1114 struct ehca_queue_map
*qmap
)
1120 /* convert real to abs address */
1121 wqe_p
= wqe_p
& (~(1UL << 63));
1123 wqe_v
= abs_to_virt(wqe_p
);
1125 if (ipz_queue_abs_to_offset(ipz_queue
, wqe_p
, &q_ofs
)) {
1126 ehca_gen_err("Invalid offset for calculating left cqes "
1127 "wqe_p=%#lx wqe_v=%p\n", wqe_p
, wqe_v
);
1131 wqe_idx
= q_ofs
/ ipz_queue
->qe_size
;
1132 if (wqe_idx
< qmap
->tail
)
1133 qmap
->left_to_poll
= (qmap
->entries
- qmap
->tail
) + wqe_idx
;
1135 qmap
->left_to_poll
= wqe_idx
- qmap
->tail
;
1140 static int check_for_left_cqes(struct ehca_qp
*my_qp
, struct ehca_shca
*shca
)
1143 void *send_wqe_p
, *recv_wqe_p
;
1145 unsigned long flags
;
1146 int qp_num
= my_qp
->ib_qp
.qp_num
;
1148 /* this hcall is not supported on base QPs */
1149 if (my_qp
->ext_type
!= EQPT_SRQBASE
) {
1150 /* get send and receive wqe pointer */
1151 h_ret
= hipz_h_disable_and_get_wqe(shca
->ipz_hca_handle
,
1152 my_qp
->ipz_qp_handle
, &my_qp
->pf
,
1153 &send_wqe_p
, &recv_wqe_p
, 4);
1154 if (h_ret
!= H_SUCCESS
) {
1155 ehca_err(&shca
->ib_device
, "disable_and_get_wqe() "
1156 "failed ehca_qp=%p qp_num=%x h_ret=%li",
1157 my_qp
, qp_num
, h_ret
);
1158 return ehca2ib_return_code(h_ret
);
1162 * acquire lock to ensure that nobody is polling the cq which
1163 * could mean that the qmap->tail pointer is in an
1164 * inconsistent state.
1166 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1167 ret
= calc_left_cqes((u64
)send_wqe_p
, &my_qp
->ipz_squeue
,
1169 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1174 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1175 ret
= calc_left_cqes((u64
)recv_wqe_p
, &my_qp
->ipz_rqueue
,
1177 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
, flags
);
1181 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1182 my_qp
->sq_map
.left_to_poll
= 0;
1183 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1185 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1186 my_qp
->rq_map
.left_to_poll
= 0;
1187 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
, flags
);
1190 /* this assures flush cqes being generated only for pending wqes */
1191 if ((my_qp
->sq_map
.left_to_poll
== 0) &&
1192 (my_qp
->rq_map
.left_to_poll
== 0)) {
1193 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1194 ehca_add_to_err_list(my_qp
, 1);
1195 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1197 if (HAS_RQ(my_qp
)) {
1198 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1199 ehca_add_to_err_list(my_qp
, 0);
1200 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
,
1209 * internal_modify_qp with circumvention to handle aqp0 properly
1210 * smi_reset2init indicates if this is an internal reset-to-init-call for
1211 * smi. This flag must always be zero if called from ehca_modify_qp()!
1212 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1214 static int internal_modify_qp(struct ib_qp
*ibqp
,
1215 struct ib_qp_attr
*attr
,
1216 int attr_mask
, int smi_reset2init
)
1218 enum ib_qp_state qp_cur_state
, qp_new_state
;
1219 int cnt
, qp_attr_idx
, ret
= 0;
1220 enum ib_qp_statetrans statetrans
;
1221 struct hcp_modify_qp_control_block
*mqpcb
;
1222 struct ehca_qp
*my_qp
= container_of(ibqp
, struct ehca_qp
, ib_qp
);
1223 struct ehca_shca
*shca
=
1224 container_of(ibqp
->pd
->device
, struct ehca_shca
, ib_device
);
1227 int bad_wqe_cnt
= 0;
1228 int squeue_locked
= 0;
1229 unsigned long flags
= 0;
1231 /* do query_qp to obtain current attr values */
1232 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_ATOMIC
);
1234 ehca_err(ibqp
->device
, "Could not get zeroed page for mqpcb "
1235 "ehca_qp=%p qp_num=%x ", my_qp
, ibqp
->qp_num
);
1239 h_ret
= hipz_h_query_qp(shca
->ipz_hca_handle
,
1240 my_qp
->ipz_qp_handle
,
1242 mqpcb
, my_qp
->galpas
.kernel
);
1243 if (h_ret
!= H_SUCCESS
) {
1244 ehca_err(ibqp
->device
, "hipz_h_query_qp() failed "
1245 "ehca_qp=%p qp_num=%x h_ret=%li",
1246 my_qp
, ibqp
->qp_num
, h_ret
);
1247 ret
= ehca2ib_return_code(h_ret
);
1248 goto modify_qp_exit1
;
1251 qp_cur_state
= ehca2ib_qp_state(mqpcb
->qp_state
);
1253 if (qp_cur_state
== -EINVAL
) { /* invalid qp state */
1255 ehca_err(ibqp
->device
, "Invalid current ehca_qp_state=%x "
1256 "ehca_qp=%p qp_num=%x",
1257 mqpcb
->qp_state
, my_qp
, ibqp
->qp_num
);
1258 goto modify_qp_exit1
;
1261 * circumvention to set aqp0 initial state to init
1262 * as expected by IB spec
1264 if (smi_reset2init
== 0 &&
1265 ibqp
->qp_type
== IB_QPT_SMI
&&
1266 qp_cur_state
== IB_QPS_RESET
&&
1267 (attr_mask
& IB_QP_STATE
) &&
1268 attr
->qp_state
== IB_QPS_INIT
) { /* RESET -> INIT */
1269 struct ib_qp_attr smiqp_attr
= {
1270 .qp_state
= IB_QPS_INIT
,
1271 .port_num
= my_qp
->init_attr
.port_num
,
1275 int smiqp_attr_mask
= IB_QP_STATE
| IB_QP_PORT
|
1276 IB_QP_PKEY_INDEX
| IB_QP_QKEY
;
1277 int smirc
= internal_modify_qp(
1278 ibqp
, &smiqp_attr
, smiqp_attr_mask
, 1);
1280 ehca_err(ibqp
->device
, "SMI RESET -> INIT failed. "
1281 "ehca_modify_qp() rc=%i", smirc
);
1283 goto modify_qp_exit1
;
1285 qp_cur_state
= IB_QPS_INIT
;
1286 ehca_dbg(ibqp
->device
, "SMI RESET -> INIT succeeded");
1288 /* is transmitted current state equal to "real" current state */
1289 if ((attr_mask
& IB_QP_CUR_STATE
) &&
1290 qp_cur_state
!= attr
->cur_qp_state
) {
1292 ehca_err(ibqp
->device
,
1293 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1294 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1295 attr
->cur_qp_state
, qp_cur_state
, my_qp
, ibqp
->qp_num
);
1296 goto modify_qp_exit1
;
1299 ehca_dbg(ibqp
->device
, "ehca_qp=%p qp_num=%x current qp_state=%x "
1300 "new qp_state=%x attribute_mask=%x",
1301 my_qp
, ibqp
->qp_num
, qp_cur_state
, attr
->qp_state
, attr_mask
);
1303 qp_new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: qp_cur_state
;
1304 if (!smi_reset2init
&&
1305 !ib_modify_qp_is_ok(qp_cur_state
, qp_new_state
, ibqp
->qp_type
,
1308 ehca_err(ibqp
->device
,
1309 "Invalid qp transition new_state=%x cur_state=%x "
1310 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state
,
1311 qp_cur_state
, my_qp
, ibqp
->qp_num
, attr_mask
);
1312 goto modify_qp_exit1
;
1315 mqpcb
->qp_state
= ib2ehca_qp_state(qp_new_state
);
1316 if (mqpcb
->qp_state
)
1317 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1320 ehca_err(ibqp
->device
, "Invalid new qp state=%x "
1321 "ehca_qp=%p qp_num=%x",
1322 qp_new_state
, my_qp
, ibqp
->qp_num
);
1323 goto modify_qp_exit1
;
1326 /* retrieve state transition struct to get req and opt attrs */
1327 statetrans
= get_modqp_statetrans(qp_cur_state
, qp_new_state
);
1328 if (statetrans
< 0) {
1330 ehca_err(ibqp
->device
, "<INVALID STATE CHANGE> qp_cur_state=%x "
1331 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1332 "qp_num=%x", qp_cur_state
, qp_new_state
,
1333 statetrans
, my_qp
, ibqp
->qp_num
);
1334 goto modify_qp_exit1
;
1337 qp_attr_idx
= ib2ehcaqptype(ibqp
->qp_type
);
1339 if (qp_attr_idx
< 0) {
1341 ehca_err(ibqp
->device
,
1342 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1343 ibqp
->qp_type
, my_qp
, ibqp
->qp_num
);
1344 goto modify_qp_exit1
;
1347 ehca_dbg(ibqp
->device
,
1348 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1349 my_qp
, ibqp
->qp_num
, statetrans
);
1351 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1354 if ((my_qp
->qp_type
== IB_QPT_UD
) &&
1355 (my_qp
->ext_type
!= EQPT_LLQP
) &&
1356 (statetrans
== IB_QPST_INIT2RTR
) &&
1357 (shca
->hw_level
>= 0x22)) {
1358 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG
, 1);
1359 mqpcb
->send_grh_flag
= 1;
1362 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1363 if ((my_qp
->qp_type
== IB_QPT_UD
||
1364 my_qp
->qp_type
== IB_QPT_GSI
||
1365 my_qp
->qp_type
== IB_QPT_SMI
) &&
1366 statetrans
== IB_QPST_SQE2RTS
) {
1367 /* mark next free wqe if kernel */
1368 if (!ibqp
->uobject
) {
1369 struct ehca_wqe
*wqe
;
1370 /* lock send queue */
1371 spin_lock_irqsave(&my_qp
->spinlock_s
, flags
);
1373 /* mark next free wqe */
1374 wqe
= (struct ehca_wqe
*)
1375 ipz_qeit_get(&my_qp
->ipz_squeue
);
1376 wqe
->optype
= wqe
->wqef
= 0xff;
1377 ehca_dbg(ibqp
->device
, "qp_num=%x next_free_wqe=%p",
1380 ret
= prepare_sqe_rts(my_qp
, shca
, &bad_wqe_cnt
);
1382 ehca_err(ibqp
->device
, "prepare_sqe_rts() failed "
1383 "ehca_qp=%p qp_num=%x ret=%i",
1384 my_qp
, ibqp
->qp_num
, ret
);
1385 goto modify_qp_exit2
;
1390 * enable RDMA_Atomic_Control if reset->init und reliable con
1391 * this is necessary since gen2 does not provide that flag,
1392 * but pHyp requires it
1394 if (statetrans
== IB_QPST_RESET2INIT
&&
1395 (ibqp
->qp_type
== IB_QPT_RC
|| ibqp
->qp_type
== IB_QPT_UC
)) {
1396 mqpcb
->rdma_atomic_ctrl
= 3;
1397 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL
, 1);
1399 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1400 if (statetrans
== IB_QPST_INIT2RTR
&&
1401 (ibqp
->qp_type
== IB_QPT_UC
) &&
1402 !(attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)) {
1403 mqpcb
->rdma_nr_atomic_resp_res
= 1; /* default to 1 */
1405 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES
, 1);
1408 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1409 if (attr
->pkey_index
>= 16) {
1411 ehca_err(ibqp
->device
, "Invalid pkey_index=%x. "
1412 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1413 attr
->pkey_index
, my_qp
, ibqp
->qp_num
);
1414 goto modify_qp_exit2
;
1416 mqpcb
->prim_p_key_idx
= attr
->pkey_index
;
1417 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX
, 1);
1419 if (attr_mask
& IB_QP_PORT
) {
1420 struct ehca_sport
*sport
;
1421 struct ehca_qp
*aqp1
;
1422 if (attr
->port_num
< 1 || attr
->port_num
> shca
->num_ports
) {
1424 ehca_err(ibqp
->device
, "Invalid port=%x. "
1425 "ehca_qp=%p qp_num=%x num_ports=%x",
1426 attr
->port_num
, my_qp
, ibqp
->qp_num
,
1428 goto modify_qp_exit2
;
1430 sport
= &shca
->sport
[attr
->port_num
- 1];
1431 if (!sport
->ibqp_sqp
[IB_QPT_GSI
]) {
1432 /* should not occur */
1434 ehca_err(ibqp
->device
, "AQP1 was not created for "
1435 "port=%x", attr
->port_num
);
1436 goto modify_qp_exit2
;
1438 aqp1
= container_of(sport
->ibqp_sqp
[IB_QPT_GSI
],
1439 struct ehca_qp
, ib_qp
);
1440 if (ibqp
->qp_type
!= IB_QPT_GSI
&&
1441 ibqp
->qp_type
!= IB_QPT_SMI
&&
1442 aqp1
->mod_qp_parm
) {
1444 * firmware will reject this modify_qp() because
1445 * port is not activated/initialized fully
1448 ehca_warn(ibqp
->device
, "Couldn't modify qp port=%x: "
1449 "either port is being activated (try again) "
1450 "or cabling issue", attr
->port_num
);
1451 goto modify_qp_exit2
;
1453 mqpcb
->prim_phys_port
= attr
->port_num
;
1454 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT
, 1);
1456 if (attr_mask
& IB_QP_QKEY
) {
1457 mqpcb
->qkey
= attr
->qkey
;
1458 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_QKEY
, 1);
1460 if (attr_mask
& IB_QP_AV
) {
1461 mqpcb
->dlid
= attr
->ah_attr
.dlid
;
1462 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DLID
, 1);
1463 mqpcb
->source_path_bits
= attr
->ah_attr
.src_path_bits
;
1464 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS
, 1);
1465 mqpcb
->service_level
= attr
->ah_attr
.sl
;
1466 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL
, 1);
1468 if (ehca_calc_ipd(shca
, mqpcb
->prim_phys_port
,
1469 attr
->ah_attr
.static_rate
,
1470 &mqpcb
->max_static_rate
)) {
1472 goto modify_qp_exit2
;
1474 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE
, 1);
1477 * Always supply the GRH flag, even if it's zero, to give the
1478 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1480 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG
, 1);
1483 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1484 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1486 if (attr
->ah_attr
.ah_flags
== IB_AH_GRH
) {
1487 mqpcb
->send_grh_flag
= 1;
1489 mqpcb
->source_gid_idx
= attr
->ah_attr
.grh
.sgid_index
;
1491 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX
, 1);
1493 for (cnt
= 0; cnt
< 16; cnt
++)
1494 mqpcb
->dest_gid
.byte
[cnt
] =
1495 attr
->ah_attr
.grh
.dgid
.raw
[cnt
];
1497 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID
, 1);
1498 mqpcb
->flow_label
= attr
->ah_attr
.grh
.flow_label
;
1499 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL
, 1);
1500 mqpcb
->hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
1501 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT
, 1);
1502 mqpcb
->traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
1504 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS
, 1);
1508 if (attr_mask
& IB_QP_PATH_MTU
) {
1510 my_qp
->mtu_shift
= attr
->path_mtu
+ 7;
1511 mqpcb
->path_mtu
= attr
->path_mtu
;
1512 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU
, 1);
1514 if (attr_mask
& IB_QP_TIMEOUT
) {
1515 mqpcb
->timeout
= attr
->timeout
;
1516 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT
, 1);
1518 if (attr_mask
& IB_QP_RETRY_CNT
) {
1519 mqpcb
->retry_count
= attr
->retry_cnt
;
1520 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT
, 1);
1522 if (attr_mask
& IB_QP_RNR_RETRY
) {
1523 mqpcb
->rnr_retry_count
= attr
->rnr_retry
;
1524 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT
, 1);
1526 if (attr_mask
& IB_QP_RQ_PSN
) {
1527 mqpcb
->receive_psn
= attr
->rq_psn
;
1528 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN
, 1);
1530 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1531 mqpcb
->rdma_nr_atomic_resp_res
= attr
->max_dest_rd_atomic
< 3 ?
1532 attr
->max_dest_rd_atomic
: 2;
1534 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES
, 1);
1536 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1537 mqpcb
->rdma_atomic_outst_dest_qp
= attr
->max_rd_atomic
< 3 ?
1538 attr
->max_rd_atomic
: 2;
1541 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP
, 1);
1543 if (attr_mask
& IB_QP_ALT_PATH
) {
1544 if (attr
->alt_port_num
< 1
1545 || attr
->alt_port_num
> shca
->num_ports
) {
1547 ehca_err(ibqp
->device
, "Invalid alt_port=%x. "
1548 "ehca_qp=%p qp_num=%x num_ports=%x",
1549 attr
->alt_port_num
, my_qp
, ibqp
->qp_num
,
1551 goto modify_qp_exit2
;
1553 mqpcb
->alt_phys_port
= attr
->alt_port_num
;
1555 if (attr
->alt_pkey_index
>= 16) {
1557 ehca_err(ibqp
->device
, "Invalid alt_pkey_index=%x. "
1558 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1559 attr
->pkey_index
, my_qp
, ibqp
->qp_num
);
1560 goto modify_qp_exit2
;
1562 mqpcb
->alt_p_key_idx
= attr
->alt_pkey_index
;
1564 mqpcb
->timeout_al
= attr
->alt_timeout
;
1565 mqpcb
->dlid_al
= attr
->alt_ah_attr
.dlid
;
1566 mqpcb
->source_path_bits_al
= attr
->alt_ah_attr
.src_path_bits
;
1567 mqpcb
->service_level_al
= attr
->alt_ah_attr
.sl
;
1569 if (ehca_calc_ipd(shca
, mqpcb
->alt_phys_port
,
1570 attr
->alt_ah_attr
.static_rate
,
1571 &mqpcb
->max_static_rate_al
)) {
1573 goto modify_qp_exit2
;
1576 /* OpenIB doesn't support alternate retry counts - copy them */
1577 mqpcb
->retry_count_al
= mqpcb
->retry_count
;
1578 mqpcb
->rnr_retry_count_al
= mqpcb
->rnr_retry_count
;
1580 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT
, 1)
1581 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX
, 1)
1582 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL
, 1)
1583 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL
, 1)
1584 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL
, 1)
1585 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL
, 1)
1586 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL
, 1)
1587 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL
, 1)
1588 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL
, 1);
1591 * Always supply the GRH flag, even if it's zero, to give the
1592 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1594 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL
, 1);
1597 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1598 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1600 if (attr
->alt_ah_attr
.ah_flags
== IB_AH_GRH
) {
1601 mqpcb
->send_grh_flag_al
= 1;
1603 for (cnt
= 0; cnt
< 16; cnt
++)
1604 mqpcb
->dest_gid_al
.byte
[cnt
] =
1605 attr
->alt_ah_attr
.grh
.dgid
.raw
[cnt
];
1606 mqpcb
->source_gid_idx_al
=
1607 attr
->alt_ah_attr
.grh
.sgid_index
;
1608 mqpcb
->flow_label_al
= attr
->alt_ah_attr
.grh
.flow_label
;
1609 mqpcb
->hop_limit_al
= attr
->alt_ah_attr
.grh
.hop_limit
;
1610 mqpcb
->traffic_class_al
=
1611 attr
->alt_ah_attr
.grh
.traffic_class
;
1614 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL
, 1)
1615 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL
, 1)
1616 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL
, 1)
1617 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL
, 1) |
1618 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL
, 1);
1622 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1623 mqpcb
->min_rnr_nak_timer_field
= attr
->min_rnr_timer
;
1625 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD
, 1);
1628 if (attr_mask
& IB_QP_SQ_PSN
) {
1629 mqpcb
->send_psn
= attr
->sq_psn
;
1630 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN
, 1);
1633 if (attr_mask
& IB_QP_DEST_QPN
) {
1634 mqpcb
->dest_qp_nr
= attr
->dest_qp_num
;
1635 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR
, 1);
1638 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1639 if (attr
->path_mig_state
!= IB_MIG_REARM
1640 && attr
->path_mig_state
!= IB_MIG_MIGRATED
) {
1642 ehca_err(ibqp
->device
, "Invalid mig_state=%x",
1643 attr
->path_mig_state
);
1644 goto modify_qp_exit2
;
1646 mqpcb
->path_migration_state
= attr
->path_mig_state
+ 1;
1647 if (attr
->path_mig_state
== IB_MIG_REARM
)
1648 my_qp
->mig_armed
= 1;
1650 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE
, 1);
1653 if (attr_mask
& IB_QP_CAP
) {
1654 mqpcb
->max_nr_outst_send_wr
= attr
->cap
.max_send_wr
+1;
1656 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR
, 1);
1657 mqpcb
->max_nr_outst_recv_wr
= attr
->cap
.max_recv_wr
+1;
1659 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR
, 1);
1660 /* no support for max_send/recv_sge yet */
1663 if (ehca_debug_level
>= 2)
1664 ehca_dmp(mqpcb
, 4*70, "qp_num=%x", ibqp
->qp_num
);
1666 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1667 my_qp
->ipz_qp_handle
,
1670 mqpcb
, my_qp
->galpas
.kernel
);
1672 if (h_ret
!= H_SUCCESS
) {
1673 ret
= ehca2ib_return_code(h_ret
);
1674 ehca_err(ibqp
->device
, "hipz_h_modify_qp() failed h_ret=%li "
1675 "ehca_qp=%p qp_num=%x", h_ret
, my_qp
, ibqp
->qp_num
);
1676 goto modify_qp_exit2
;
1679 if ((my_qp
->qp_type
== IB_QPT_UD
||
1680 my_qp
->qp_type
== IB_QPT_GSI
||
1681 my_qp
->qp_type
== IB_QPT_SMI
) &&
1682 statetrans
== IB_QPST_SQE2RTS
) {
1683 /* doorbell to reprocessing wqes */
1684 iosync(); /* serialize GAL register access */
1685 hipz_update_sqa(my_qp
, bad_wqe_cnt
-1);
1686 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt
);
1689 if (statetrans
== IB_QPST_RESET2INIT
||
1690 statetrans
== IB_QPST_INIT2INIT
) {
1691 mqpcb
->qp_enable
= 1;
1692 mqpcb
->qp_state
= EHCA_QPS_INIT
;
1694 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE
, 1);
1696 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1697 my_qp
->ipz_qp_handle
,
1701 my_qp
->galpas
.kernel
);
1703 if (h_ret
!= H_SUCCESS
) {
1704 ret
= ehca2ib_return_code(h_ret
);
1705 ehca_err(ibqp
->device
, "ENABLE in context of "
1706 "RESET_2_INIT failed! Maybe you didn't get "
1707 "a LID h_ret=%li ehca_qp=%p qp_num=%x",
1708 h_ret
, my_qp
, ibqp
->qp_num
);
1709 goto modify_qp_exit2
;
1712 if ((qp_new_state
== IB_QPS_ERR
) && (qp_cur_state
!= IB_QPS_ERR
)) {
1713 ret
= check_for_left_cqes(my_qp
, shca
);
1715 goto modify_qp_exit2
;
1718 if (statetrans
== IB_QPST_ANY2RESET
) {
1719 ipz_qeit_reset(&my_qp
->ipz_rqueue
);
1720 ipz_qeit_reset(&my_qp
->ipz_squeue
);
1722 if (qp_cur_state
== IB_QPS_ERR
) {
1723 del_from_err_list(my_qp
->send_cq
, &my_qp
->sq_err_node
);
1726 del_from_err_list(my_qp
->recv_cq
,
1727 &my_qp
->rq_err_node
);
1729 reset_queue_map(&my_qp
->sq_map
);
1732 reset_queue_map(&my_qp
->rq_map
);
1735 if (attr_mask
& IB_QP_QKEY
)
1736 my_qp
->qkey
= attr
->qkey
;
1739 if (squeue_locked
) { /* this means: sqe -> rts */
1740 spin_unlock_irqrestore(&my_qp
->spinlock_s
, flags
);
1741 my_qp
->sqerr_purgeflag
= 1;
1745 ehca_free_fw_ctrlblock(mqpcb
);
1750 int ehca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
,
1751 struct ib_udata
*udata
)
1755 struct ehca_shca
*shca
= container_of(ibqp
->device
, struct ehca_shca
,
1757 struct ehca_qp
*my_qp
= container_of(ibqp
, struct ehca_qp
, ib_qp
);
1759 /* The if-block below caches qp_attr to be modified for GSI and SMI
1760 * qps during the initialization by ib_mad. When the respective port
1761 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1762 * cached modify calls sequence, see ehca_recover_sqs() below.
1763 * Why that is required:
1764 * 1) If one port is connected, older code requires that port one
1765 * to be connected and module option nr_ports=1 to be given by
1766 * user, which is very inconvenient for end user.
1767 * 2) Firmware accepts modify_qp() only if respective port has become
1768 * active. Older code had a wait loop of 30sec create_qp()/
1769 * define_aqp1(), which is not appropriate in practice. This
1770 * code now removes that wait loop, see define_aqp1(), and always
1771 * reports all ports to ib_mad resp. users. Only activated ports
1772 * will then usable for the users.
1774 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1775 int port
= my_qp
->init_attr
.port_num
;
1776 struct ehca_sport
*sport
= &shca
->sport
[port
- 1];
1777 unsigned long flags
;
1778 spin_lock_irqsave(&sport
->mod_sqp_lock
, flags
);
1779 /* cache qp_attr only during init */
1780 if (my_qp
->mod_qp_parm
) {
1781 struct ehca_mod_qp_parm
*p
;
1782 if (my_qp
->mod_qp_parm_idx
>= EHCA_MOD_QP_PARM_MAX
) {
1783 ehca_err(&shca
->ib_device
,
1784 "mod_qp_parm overflow state=%x port=%x"
1785 " type=%x", attr
->qp_state
,
1786 my_qp
->init_attr
.port_num
,
1788 spin_unlock_irqrestore(&sport
->mod_sqp_lock
,
1792 p
= &my_qp
->mod_qp_parm
[my_qp
->mod_qp_parm_idx
];
1793 p
->mask
= attr_mask
;
1795 my_qp
->mod_qp_parm_idx
++;
1796 ehca_dbg(&shca
->ib_device
,
1797 "Saved qp_attr for state=%x port=%x type=%x",
1798 attr
->qp_state
, my_qp
->init_attr
.port_num
,
1800 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
1803 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
1806 ret
= internal_modify_qp(ibqp
, attr
, attr_mask
, 0);
1809 if ((ret
== 0) && (attr_mask
& IB_QP_STATE
))
1810 my_qp
->state
= attr
->qp_state
;
1815 void ehca_recover_sqp(struct ib_qp
*sqp
)
1817 struct ehca_qp
*my_sqp
= container_of(sqp
, struct ehca_qp
, ib_qp
);
1818 int port
= my_sqp
->init_attr
.port_num
;
1819 struct ib_qp_attr attr
;
1820 struct ehca_mod_qp_parm
*qp_parm
;
1821 int i
, qp_parm_idx
, ret
;
1822 unsigned long flags
, wr_cnt
;
1824 if (!my_sqp
->mod_qp_parm
)
1826 ehca_dbg(sqp
->device
, "SQP port=%x qp_num=%x", port
, sqp
->qp_num
);
1828 qp_parm
= my_sqp
->mod_qp_parm
;
1829 qp_parm_idx
= my_sqp
->mod_qp_parm_idx
;
1830 for (i
= 0; i
< qp_parm_idx
; i
++) {
1831 attr
= qp_parm
[i
].attr
;
1832 ret
= internal_modify_qp(sqp
, &attr
, qp_parm
[i
].mask
, 0);
1834 ehca_err(sqp
->device
, "Could not modify SQP port=%x "
1835 "qp_num=%x ret=%x", port
, sqp
->qp_num
, ret
);
1838 ehca_dbg(sqp
->device
, "SQP port=%x qp_num=%x in state=%x",
1839 port
, sqp
->qp_num
, attr
.qp_state
);
1842 /* re-trigger posted recv wrs */
1843 wr_cnt
= my_sqp
->ipz_rqueue
.current_q_offset
/
1844 my_sqp
->ipz_rqueue
.qe_size
;
1846 spin_lock_irqsave(&my_sqp
->spinlock_r
, flags
);
1847 hipz_update_rqa(my_sqp
, wr_cnt
);
1848 spin_unlock_irqrestore(&my_sqp
->spinlock_r
, flags
);
1849 ehca_dbg(sqp
->device
, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1850 port
, sqp
->qp_num
, wr_cnt
);
1855 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1856 my_sqp
->mod_qp_parm
= NULL
;
1859 int ehca_query_qp(struct ib_qp
*qp
,
1860 struct ib_qp_attr
*qp_attr
,
1861 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1863 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
1864 struct ehca_shca
*shca
= container_of(qp
->device
, struct ehca_shca
,
1866 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
1867 struct hcp_modify_qp_control_block
*qpcb
;
1871 if (qp_attr_mask
& QP_ATTR_QUERY_NOT_SUPPORTED
) {
1872 ehca_err(qp
->device
, "Invalid attribute mask "
1873 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1874 my_qp
, qp
->qp_num
, qp_attr_mask
);
1878 qpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1880 ehca_err(qp
->device
, "Out of memory for qpcb "
1881 "ehca_qp=%p qp_num=%x", my_qp
, qp
->qp_num
);
1885 h_ret
= hipz_h_query_qp(adapter_handle
,
1886 my_qp
->ipz_qp_handle
,
1888 qpcb
, my_qp
->galpas
.kernel
);
1890 if (h_ret
!= H_SUCCESS
) {
1891 ret
= ehca2ib_return_code(h_ret
);
1892 ehca_err(qp
->device
, "hipz_h_query_qp() failed "
1893 "ehca_qp=%p qp_num=%x h_ret=%li",
1894 my_qp
, qp
->qp_num
, h_ret
);
1895 goto query_qp_exit1
;
1898 qp_attr
->cur_qp_state
= ehca2ib_qp_state(qpcb
->qp_state
);
1899 qp_attr
->qp_state
= qp_attr
->cur_qp_state
;
1901 if (qp_attr
->cur_qp_state
== -EINVAL
) {
1903 ehca_err(qp
->device
, "Got invalid ehca_qp_state=%x "
1904 "ehca_qp=%p qp_num=%x",
1905 qpcb
->qp_state
, my_qp
, qp
->qp_num
);
1906 goto query_qp_exit1
;
1909 if (qp_attr
->qp_state
== IB_QPS_SQD
)
1910 qp_attr
->sq_draining
= 1;
1912 qp_attr
->qkey
= qpcb
->qkey
;
1913 qp_attr
->path_mtu
= qpcb
->path_mtu
;
1914 qp_attr
->path_mig_state
= qpcb
->path_migration_state
- 1;
1915 qp_attr
->rq_psn
= qpcb
->receive_psn
;
1916 qp_attr
->sq_psn
= qpcb
->send_psn
;
1917 qp_attr
->min_rnr_timer
= qpcb
->min_rnr_nak_timer_field
;
1918 qp_attr
->cap
.max_send_wr
= qpcb
->max_nr_outst_send_wr
-1;
1919 qp_attr
->cap
.max_recv_wr
= qpcb
->max_nr_outst_recv_wr
-1;
1920 /* UD_AV CIRCUMVENTION */
1921 if (my_qp
->qp_type
== IB_QPT_UD
) {
1922 qp_attr
->cap
.max_send_sge
=
1923 qpcb
->actual_nr_sges_in_sq_wqe
- 2;
1924 qp_attr
->cap
.max_recv_sge
=
1925 qpcb
->actual_nr_sges_in_rq_wqe
- 2;
1927 qp_attr
->cap
.max_send_sge
=
1928 qpcb
->actual_nr_sges_in_sq_wqe
;
1929 qp_attr
->cap
.max_recv_sge
=
1930 qpcb
->actual_nr_sges_in_rq_wqe
;
1933 qp_attr
->cap
.max_inline_data
= my_qp
->sq_max_inline_data_size
;
1934 qp_attr
->dest_qp_num
= qpcb
->dest_qp_nr
;
1936 qp_attr
->pkey_index
=
1937 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX
, qpcb
->prim_p_key_idx
);
1940 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT
, qpcb
->prim_phys_port
);
1942 qp_attr
->timeout
= qpcb
->timeout
;
1943 qp_attr
->retry_cnt
= qpcb
->retry_count
;
1944 qp_attr
->rnr_retry
= qpcb
->rnr_retry_count
;
1946 qp_attr
->alt_pkey_index
=
1947 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX
, qpcb
->alt_p_key_idx
);
1949 qp_attr
->alt_port_num
= qpcb
->alt_phys_port
;
1950 qp_attr
->alt_timeout
= qpcb
->timeout_al
;
1952 qp_attr
->max_dest_rd_atomic
= qpcb
->rdma_nr_atomic_resp_res
;
1953 qp_attr
->max_rd_atomic
= qpcb
->rdma_atomic_outst_dest_qp
;
1956 qp_attr
->ah_attr
.sl
= qpcb
->service_level
;
1958 if (qpcb
->send_grh_flag
) {
1959 qp_attr
->ah_attr
.ah_flags
= IB_AH_GRH
;
1962 qp_attr
->ah_attr
.static_rate
= qpcb
->max_static_rate
;
1963 qp_attr
->ah_attr
.dlid
= qpcb
->dlid
;
1964 qp_attr
->ah_attr
.src_path_bits
= qpcb
->source_path_bits
;
1965 qp_attr
->ah_attr
.port_num
= qp_attr
->port_num
;
1968 qp_attr
->ah_attr
.grh
.traffic_class
= qpcb
->traffic_class
;
1969 qp_attr
->ah_attr
.grh
.hop_limit
= qpcb
->hop_limit
;
1970 qp_attr
->ah_attr
.grh
.sgid_index
= qpcb
->source_gid_idx
;
1971 qp_attr
->ah_attr
.grh
.flow_label
= qpcb
->flow_label
;
1973 for (cnt
= 0; cnt
< 16; cnt
++)
1974 qp_attr
->ah_attr
.grh
.dgid
.raw
[cnt
] =
1975 qpcb
->dest_gid
.byte
[cnt
];
1978 qp_attr
->alt_ah_attr
.sl
= qpcb
->service_level_al
;
1979 if (qpcb
->send_grh_flag_al
) {
1980 qp_attr
->alt_ah_attr
.ah_flags
= IB_AH_GRH
;
1983 qp_attr
->alt_ah_attr
.static_rate
= qpcb
->max_static_rate_al
;
1984 qp_attr
->alt_ah_attr
.dlid
= qpcb
->dlid_al
;
1985 qp_attr
->alt_ah_attr
.src_path_bits
= qpcb
->source_path_bits_al
;
1988 qp_attr
->alt_ah_attr
.grh
.traffic_class
= qpcb
->traffic_class_al
;
1989 qp_attr
->alt_ah_attr
.grh
.hop_limit
= qpcb
->hop_limit_al
;
1990 qp_attr
->alt_ah_attr
.grh
.sgid_index
= qpcb
->source_gid_idx_al
;
1991 qp_attr
->alt_ah_attr
.grh
.flow_label
= qpcb
->flow_label_al
;
1993 for (cnt
= 0; cnt
< 16; cnt
++)
1994 qp_attr
->alt_ah_attr
.grh
.dgid
.raw
[cnt
] =
1995 qpcb
->dest_gid_al
.byte
[cnt
];
1997 /* return init attributes given in ehca_create_qp */
1999 *qp_init_attr
= my_qp
->init_attr
;
2001 if (ehca_debug_level
>= 2)
2002 ehca_dmp(qpcb
, 4*70, "qp_num=%x", qp
->qp_num
);
2005 ehca_free_fw_ctrlblock(qpcb
);
2010 int ehca_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
2011 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
2013 struct ehca_qp
*my_qp
=
2014 container_of(ibsrq
, struct ehca_qp
, ib_srq
);
2015 struct ehca_shca
*shca
=
2016 container_of(ibsrq
->pd
->device
, struct ehca_shca
, ib_device
);
2017 struct hcp_modify_qp_control_block
*mqpcb
;
2022 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2024 ehca_err(ibsrq
->device
, "Could not get zeroed page for mqpcb "
2025 "ehca_qp=%p qp_num=%x ", my_qp
, my_qp
->real_qp_num
);
2030 if (attr_mask
& IB_SRQ_LIMIT
) {
2031 attr_mask
&= ~IB_SRQ_LIMIT
;
2033 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT
, 1)
2034 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG
, 1);
2035 mqpcb
->curr_srq_limit
=
2036 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT
, attr
->srq_limit
);
2037 mqpcb
->qp_aff_asyn_ev_log_reg
=
2038 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT
, 1);
2041 /* by now, all bits in attr_mask should have been cleared */
2043 ehca_err(ibsrq
->device
, "invalid attribute mask bits set "
2044 "attr_mask=%x", attr_mask
);
2046 goto modify_srq_exit0
;
2049 if (ehca_debug_level
>= 2)
2050 ehca_dmp(mqpcb
, 4*70, "qp_num=%x", my_qp
->real_qp_num
);
2052 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
, my_qp
->ipz_qp_handle
,
2053 NULL
, update_mask
, mqpcb
,
2054 my_qp
->galpas
.kernel
);
2056 if (h_ret
!= H_SUCCESS
) {
2057 ret
= ehca2ib_return_code(h_ret
);
2058 ehca_err(ibsrq
->device
, "hipz_h_modify_qp() failed h_ret=%li "
2059 "ehca_qp=%p qp_num=%x",
2060 h_ret
, my_qp
, my_qp
->real_qp_num
);
2064 ehca_free_fw_ctrlblock(mqpcb
);
2069 int ehca_query_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2071 struct ehca_qp
*my_qp
= container_of(srq
, struct ehca_qp
, ib_srq
);
2072 struct ehca_shca
*shca
= container_of(srq
->device
, struct ehca_shca
,
2074 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
2075 struct hcp_modify_qp_control_block
*qpcb
;
2079 qpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2081 ehca_err(srq
->device
, "Out of memory for qpcb "
2082 "ehca_qp=%p qp_num=%x", my_qp
, my_qp
->real_qp_num
);
2086 h_ret
= hipz_h_query_qp(adapter_handle
, my_qp
->ipz_qp_handle
,
2087 NULL
, qpcb
, my_qp
->galpas
.kernel
);
2089 if (h_ret
!= H_SUCCESS
) {
2090 ret
= ehca2ib_return_code(h_ret
);
2091 ehca_err(srq
->device
, "hipz_h_query_qp() failed "
2092 "ehca_qp=%p qp_num=%x h_ret=%li",
2093 my_qp
, my_qp
->real_qp_num
, h_ret
);
2094 goto query_srq_exit1
;
2097 srq_attr
->max_wr
= qpcb
->max_nr_outst_recv_wr
- 1;
2098 srq_attr
->max_sge
= 3;
2099 srq_attr
->srq_limit
= EHCA_BMASK_GET(
2100 MQPCB_CURR_SRQ_LIMIT
, qpcb
->curr_srq_limit
);
2102 if (ehca_debug_level
>= 2)
2103 ehca_dmp(qpcb
, 4*70, "qp_num=%x", my_qp
->real_qp_num
);
2106 ehca_free_fw_ctrlblock(qpcb
);
2111 static int internal_destroy_qp(struct ib_device
*dev
, struct ehca_qp
*my_qp
,
2112 struct ib_uobject
*uobject
)
2114 struct ehca_shca
*shca
= container_of(dev
, struct ehca_shca
, ib_device
);
2115 struct ehca_pd
*my_pd
= container_of(my_qp
->ib_qp
.pd
, struct ehca_pd
,
2117 struct ehca_sport
*sport
= &shca
->sport
[my_qp
->init_attr
.port_num
- 1];
2118 u32 qp_num
= my_qp
->real_qp_num
;
2122 enum ib_qp_type qp_type
;
2123 unsigned long flags
;
2126 if (my_qp
->mm_count_galpa
||
2127 my_qp
->mm_count_rqueue
|| my_qp
->mm_count_squeue
) {
2128 ehca_err(dev
, "Resources still referenced in "
2129 "user space qp_num=%x", qp_num
);
2134 if (my_qp
->send_cq
) {
2135 ret
= ehca_cq_unassign_qp(my_qp
->send_cq
, qp_num
);
2137 ehca_err(dev
, "Couldn't unassign qp from "
2138 "send_cq ret=%i qp_num=%x cq_num=%x", ret
,
2139 qp_num
, my_qp
->send_cq
->cq_number
);
2144 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
2145 idr_remove(&ehca_qp_idr
, my_qp
->token
);
2146 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
2149 * SRQs will never get into an error list and do not have a recv_cq,
2150 * so we need to skip them here.
2152 if (HAS_RQ(my_qp
) && !IS_SRQ(my_qp
))
2153 del_from_err_list(my_qp
->recv_cq
, &my_qp
->rq_err_node
);
2156 del_from_err_list(my_qp
->send_cq
, &my_qp
->sq_err_node
);
2158 /* now wait until all pending events have completed */
2159 wait_event(my_qp
->wait_completion
, !atomic_read(&my_qp
->nr_events
));
2161 h_ret
= hipz_h_destroy_qp(shca
->ipz_hca_handle
, my_qp
);
2162 if (h_ret
!= H_SUCCESS
) {
2163 ehca_err(dev
, "hipz_h_destroy_qp() failed h_ret=%li "
2164 "ehca_qp=%p qp_num=%x", h_ret
, my_qp
, qp_num
);
2165 return ehca2ib_return_code(h_ret
);
2168 port_num
= my_qp
->init_attr
.port_num
;
2169 qp_type
= my_qp
->init_attr
.qp_type
;
2171 if (qp_type
== IB_QPT_SMI
|| qp_type
== IB_QPT_GSI
) {
2172 spin_lock_irqsave(&sport
->mod_sqp_lock
, flags
);
2173 kfree(my_qp
->mod_qp_parm
);
2174 my_qp
->mod_qp_parm
= NULL
;
2175 shca
->sport
[port_num
- 1].ibqp_sqp
[qp_type
] = NULL
;
2176 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
2179 /* no support for IB_QPT_SMI yet */
2180 if (qp_type
== IB_QPT_GSI
) {
2181 struct ib_event event
;
2182 ehca_info(dev
, "device %s: port %x is inactive.",
2183 shca
->ib_device
.name
, port_num
);
2184 event
.device
= &shca
->ib_device
;
2185 event
.event
= IB_EVENT_PORT_ERR
;
2186 event
.element
.port_num
= port_num
;
2187 shca
->sport
[port_num
- 1].port_state
= IB_PORT_DOWN
;
2188 ib_dispatch_event(&event
);
2191 if (HAS_RQ(my_qp
)) {
2192 ipz_queue_dtor(my_pd
, &my_qp
->ipz_rqueue
);
2194 vfree(my_qp
->rq_map
.map
);
2196 if (HAS_SQ(my_qp
)) {
2197 ipz_queue_dtor(my_pd
, &my_qp
->ipz_squeue
);
2199 vfree(my_qp
->sq_map
.map
);
2201 kmem_cache_free(qp_cache
, my_qp
);
2202 atomic_dec(&shca
->num_qps
);
2206 int ehca_destroy_qp(struct ib_qp
*qp
)
2208 return internal_destroy_qp(qp
->device
,
2209 container_of(qp
, struct ehca_qp
, ib_qp
),
2213 int ehca_destroy_srq(struct ib_srq
*srq
)
2215 return internal_destroy_qp(srq
->device
,
2216 container_of(srq
, struct ehca_qp
, ib_srq
),
2220 int ehca_init_qp_cache(void)
2222 qp_cache
= kmem_cache_create("ehca_cache_qp",
2223 sizeof(struct ehca_qp
), 0,
2231 void ehca_cleanup_qp_cache(void)
2234 kmem_cache_destroy(qp_cache
);