2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 static struct kmem_cache
*qp_cache
;
56 * attributes not supported by query qp
58 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
59 IB_QP_MAX_QP_RD_ATOMIC | \
60 IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
64 * ehca (internal) qp state values
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 enum ib_qp_statetrans
{
91 IB_QPST_MAX
/* nr of transitions, this must be last!!! */
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
98 static inline enum ehca_qp_state
ib2ehca_qp_state(enum ib_qp_state ib_qp_state
)
100 switch (ib_qp_state
) {
102 return EHCA_QPS_RESET
;
104 return EHCA_QPS_INIT
;
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state
);
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
125 static inline enum ib_qp_state
ehca2ib_qp_state(enum ehca_qp_state
128 switch (ehca_qp_state
) {
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state
);
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
165 static inline enum ehca_qp_type
ib2ehcaqptype(enum ib_qp_type ibqptype
)
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype
);
183 static inline enum ib_qp_statetrans
get_modqp_statetrans(int ib_fromstate
,
187 switch (ib_tostate
) {
189 index
= IB_QPST_ANY2RESET
;
192 switch (ib_fromstate
) {
194 index
= IB_QPST_RESET2INIT
;
197 index
= IB_QPST_INIT2INIT
;
202 if (ib_fromstate
== IB_QPS_INIT
)
203 index
= IB_QPST_INIT2RTR
;
206 switch (ib_fromstate
) {
208 index
= IB_QPST_RTR2RTS
;
211 index
= IB_QPST_RTS2RTS
;
214 index
= IB_QPST_SQD2RTS
;
217 index
= IB_QPST_SQE2RTS
;
222 if (ib_fromstate
== IB_QPS_RTS
)
223 index
= IB_QPST_RTS2SQD
;
228 index
= IB_QPST_ANY2ERR
;
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
240 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype
)
252 case IB_QPT_RAW_IPV6
:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype
);
263 * init userspace queue info from ipz_queue data
265 static inline void queue2resp(struct ipzu_queue_resp
*resp
,
266 struct ipz_queue
*queue
)
268 resp
->qe_size
= queue
->qe_size
;
269 resp
->act_nr_of_sg
= queue
->act_nr_of_sg
;
270 resp
->queue_length
= queue
->queue_length
;
271 resp
->pagesize
= queue
->pagesize
;
272 resp
->toggle_state
= queue
->toggle_state
;
273 resp
->offset
= queue
->offset
;
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
279 static inline int init_qp_queue(struct ehca_shca
*shca
,
281 struct ehca_qp
*my_qp
,
282 struct ipz_queue
*queue
,
285 struct ehca_alloc_queue_parms
*parms
,
288 int ret
, cnt
, ipz_rc
, nr_q_pages
;
291 struct ib_device
*ib_dev
= &shca
->ib_device
;
292 struct ipz_adapter_handle ipz_hca_handle
= shca
->ipz_hca_handle
;
294 if (!parms
->queue_size
)
297 if (parms
->is_small
) {
299 ipz_rc
= ipz_queue_ctor(pd
, queue
, nr_q_pages
,
300 128 << parms
->page_size
,
301 wqe_size
, parms
->act_nr_sges
, 1);
303 nr_q_pages
= parms
->queue_size
;
304 ipz_rc
= ipz_queue_ctor(pd
, queue
, nr_q_pages
,
305 EHCA_PAGESIZE
, wqe_size
,
306 parms
->act_nr_sges
, 0);
310 ehca_err(ib_dev
, "Cannot allocate page for queue. ipz_rc=%i",
315 /* register queue pages */
316 for (cnt
= 0; cnt
< nr_q_pages
; cnt
++) {
317 vpage
= ipz_qpageit_get_inc(queue
);
319 ehca_err(ib_dev
, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage
);
324 rpage
= virt_to_abs(vpage
);
326 h_ret
= hipz_h_register_rpage_qp(ipz_hca_handle
,
327 my_qp
->ipz_qp_handle
,
329 rpage
, parms
->is_small
? 0 : 1,
330 my_qp
->galpas
.kernel
);
331 if (cnt
== (nr_q_pages
- 1)) { /* last page! */
332 if (h_ret
!= expected_hret
) {
333 ehca_err(ib_dev
, "hipz_qp_register_rpage() "
335 ret
= ehca2ib_return_code(h_ret
);
338 vpage
= ipz_qpageit_get_inc(&my_qp
->ipz_rqueue
);
340 ehca_err(ib_dev
, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage
);
346 if (h_ret
!= H_PAGE_REGISTERED
) {
347 ehca_err(ib_dev
, "hipz_qp_register_rpage() "
349 ret
= ehca2ib_return_code(h_ret
);
355 ipz_qeit_reset(queue
);
360 ipz_queue_dtor(pd
, queue
);
364 static inline int ehca_calc_wqe_size(int act_nr_sge
, int is_llqp
)
367 return 128 << act_nr_sge
;
369 return offsetof(struct ehca_wqe
,
370 u
.nud
.sg_list
[act_nr_sge
]);
373 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms
*queue
,
374 int req_nr_sge
, int is_llqp
)
376 u32 wqe_size
, q_size
;
377 int act_nr_sge
= req_nr_sge
;
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge
= 4; act_nr_sge
<= 252;
382 act_nr_sge
= 4 + 2 * act_nr_sge
)
383 if (act_nr_sge
>= req_nr_sge
)
386 wqe_size
= ehca_calc_wqe_size(act_nr_sge
, is_llqp
);
387 q_size
= wqe_size
* (queue
->max_wr
+ 1);
390 queue
->page_size
= 2;
391 else if (q_size
<= 1024)
392 queue
->page_size
= 3;
394 queue
->page_size
= 0;
396 queue
->is_small
= (queue
->page_size
!= 0);
399 /* needs to be called with cq->spinlock held */
400 void ehca_add_to_err_list(struct ehca_qp
*qp
, int on_sq
)
402 struct list_head
*list
, *node
;
404 /* TODO: support low latency QPs */
405 if (qp
->ext_type
== EQPT_LLQP
)
409 list
= &qp
->send_cq
->sqp_err_list
;
410 node
= &qp
->sq_err_node
;
412 list
= &qp
->recv_cq
->rqp_err_list
;
413 node
= &qp
->rq_err_node
;
416 if (list_empty(node
))
417 list_add_tail(node
, list
);
422 static void del_from_err_list(struct ehca_cq
*cq
, struct list_head
*node
)
426 spin_lock_irqsave(&cq
->spinlock
, flags
);
428 if (!list_empty(node
))
431 spin_unlock_irqrestore(&cq
->spinlock
, flags
);
434 static void reset_queue_map(struct ehca_queue_map
*qmap
)
439 for (i
= 0; i
< qmap
->entries
; i
++)
440 qmap
->map
[i
].reported
= 1;
444 * Create an ib_qp struct that is either a QP or an SRQ, depending on
445 * the value of the is_srq parameter. If init_attr and srq_init_attr share
446 * fields, the field out of init_attr is used.
448 static struct ehca_qp
*internal_create_qp(
450 struct ib_qp_init_attr
*init_attr
,
451 struct ib_srq_init_attr
*srq_init_attr
,
452 struct ib_udata
*udata
, int is_srq
)
454 struct ehca_qp
*my_qp
, *my_srq
= NULL
;
455 struct ehca_pd
*my_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
456 struct ehca_shca
*shca
= container_of(pd
->device
, struct ehca_shca
,
458 struct ib_ucontext
*context
= NULL
;
460 int is_llqp
= 0, has_srq
= 0;
461 int qp_type
, max_send_sge
, max_recv_sge
, ret
;
463 /* h_call's out parameters */
464 struct ehca_alloc_qp_parms parms
;
465 u32 swqe_size
= 0, rwqe_size
= 0, ib_qp_num
;
468 if (!atomic_add_unless(&shca
->num_qps
, 1, ehca_max_qp
)) {
469 ehca_err(pd
->device
, "Unable to create QP, max number of %i "
470 "QPs reached.", ehca_max_qp
);
471 ehca_err(pd
->device
, "To increase the maximum number of QPs "
472 "use the number_of_qps module parameter.\n");
473 return ERR_PTR(-ENOSPC
);
476 if (init_attr
->create_flags
) {
477 atomic_dec(&shca
->num_qps
);
478 return ERR_PTR(-EINVAL
);
481 memset(&parms
, 0, sizeof(parms
));
482 qp_type
= init_attr
->qp_type
;
484 if (init_attr
->sq_sig_type
!= IB_SIGNAL_REQ_WR
&&
485 init_attr
->sq_sig_type
!= IB_SIGNAL_ALL_WR
) {
486 ehca_err(pd
->device
, "init_attr->sg_sig_type=%x not allowed",
487 init_attr
->sq_sig_type
);
488 atomic_dec(&shca
->num_qps
);
489 return ERR_PTR(-EINVAL
);
493 if (qp_type
& 0x80) {
495 parms
.ext_type
= EQPT_LLQP
;
496 parms
.ll_comp_flags
= qp_type
& LLQP_COMP_MASK
;
499 init_attr
->qp_type
&= 0x1F;
501 /* handle SRQ base QPs */
502 if (init_attr
->srq
) {
503 my_srq
= container_of(init_attr
->srq
, struct ehca_qp
, ib_srq
);
506 parms
.ext_type
= EQPT_SRQBASE
;
507 parms
.srq_qpn
= my_srq
->real_qp_num
;
510 if (is_llqp
&& has_srq
) {
511 ehca_err(pd
->device
, "LLQPs can't have an SRQ");
512 atomic_dec(&shca
->num_qps
);
513 return ERR_PTR(-EINVAL
);
518 parms
.ext_type
= EQPT_SRQ
;
519 parms
.srq_limit
= srq_init_attr
->attr
.srq_limit
;
520 if (init_attr
->cap
.max_recv_sge
> 3) {
521 ehca_err(pd
->device
, "no more than three SGEs "
522 "supported for SRQ pd=%p max_sge=%x",
523 pd
, init_attr
->cap
.max_recv_sge
);
524 atomic_dec(&shca
->num_qps
);
525 return ERR_PTR(-EINVAL
);
530 if (qp_type
!= IB_QPT_UD
&&
531 qp_type
!= IB_QPT_UC
&&
532 qp_type
!= IB_QPT_RC
&&
533 qp_type
!= IB_QPT_SMI
&&
534 qp_type
!= IB_QPT_GSI
) {
535 ehca_err(pd
->device
, "wrong QP Type=%x", qp_type
);
536 atomic_dec(&shca
->num_qps
);
537 return ERR_PTR(-EINVAL
);
543 if ((init_attr
->cap
.max_send_wr
> 255) ||
544 (init_attr
->cap
.max_recv_wr
> 255)) {
546 "Invalid Number of max_sq_wr=%x "
547 "or max_rq_wr=%x for RC LLQP",
548 init_attr
->cap
.max_send_wr
,
549 init_attr
->cap
.max_recv_wr
);
550 atomic_dec(&shca
->num_qps
);
551 return ERR_PTR(-EINVAL
);
555 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP
, shca
->hca_cap
)) {
556 ehca_err(pd
->device
, "UD LLQP not supported "
558 atomic_dec(&shca
->num_qps
);
559 return ERR_PTR(-ENOSYS
);
561 if (!(init_attr
->cap
.max_send_sge
<= 5
562 && init_attr
->cap
.max_send_sge
>= 1
563 && init_attr
->cap
.max_recv_sge
<= 5
564 && init_attr
->cap
.max_recv_sge
>= 1)) {
566 "Invalid Number of max_send_sge=%x "
567 "or max_recv_sge=%x for UD LLQP",
568 init_attr
->cap
.max_send_sge
,
569 init_attr
->cap
.max_recv_sge
);
570 atomic_dec(&shca
->num_qps
);
571 return ERR_PTR(-EINVAL
);
572 } else if (init_attr
->cap
.max_send_wr
> 255) {
575 "max_send_wr=%x for UD QP_TYPE=%x",
576 init_attr
->cap
.max_send_wr
, qp_type
);
577 atomic_dec(&shca
->num_qps
);
578 return ERR_PTR(-EINVAL
);
582 ehca_err(pd
->device
, "unsupported LL QP Type=%x",
584 atomic_dec(&shca
->num_qps
);
585 return ERR_PTR(-EINVAL
);
588 int max_sge
= (qp_type
== IB_QPT_UD
|| qp_type
== IB_QPT_SMI
589 || qp_type
== IB_QPT_GSI
) ? 250 : 252;
591 if (init_attr
->cap
.max_send_sge
> max_sge
592 || init_attr
->cap
.max_recv_sge
> max_sge
) {
593 ehca_err(pd
->device
, "Invalid number of SGEs requested "
594 "send_sge=%x recv_sge=%x max_sge=%x",
595 init_attr
->cap
.max_send_sge
,
596 init_attr
->cap
.max_recv_sge
, max_sge
);
597 atomic_dec(&shca
->num_qps
);
598 return ERR_PTR(-EINVAL
);
602 if (pd
->uobject
&& udata
)
603 context
= pd
->uobject
->context
;
605 my_qp
= kmem_cache_zalloc(qp_cache
, GFP_KERNEL
);
607 ehca_err(pd
->device
, "pd=%p not enough memory to alloc qp", pd
);
608 atomic_dec(&shca
->num_qps
);
609 return ERR_PTR(-ENOMEM
);
612 atomic_set(&my_qp
->nr_events
, 0);
613 init_waitqueue_head(&my_qp
->wait_completion
);
614 spin_lock_init(&my_qp
->spinlock_s
);
615 spin_lock_init(&my_qp
->spinlock_r
);
616 my_qp
->qp_type
= qp_type
;
617 my_qp
->ext_type
= parms
.ext_type
;
618 my_qp
->state
= IB_QPS_RESET
;
620 if (init_attr
->recv_cq
)
622 container_of(init_attr
->recv_cq
, struct ehca_cq
, ib_cq
);
623 if (init_attr
->send_cq
)
625 container_of(init_attr
->send_cq
, struct ehca_cq
, ib_cq
);
628 if (!idr_pre_get(&ehca_qp_idr
, GFP_KERNEL
)) {
630 ehca_err(pd
->device
, "Can't reserve idr resources.");
631 goto create_qp_exit0
;
634 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
635 ret
= idr_get_new(&ehca_qp_idr
, my_qp
, &my_qp
->token
);
636 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
637 } while (ret
== -EAGAIN
);
641 ehca_err(pd
->device
, "Can't allocate new idr entry.");
642 goto create_qp_exit0
;
645 if (my_qp
->token
> 0x1FFFFFF) {
647 ehca_err(pd
->device
, "Invalid number of qp");
648 goto create_qp_exit1
;
652 parms
.srq_token
= my_qp
->token
;
654 parms
.servicetype
= ibqptype2servicetype(qp_type
);
655 if (parms
.servicetype
< 0) {
657 ehca_err(pd
->device
, "Invalid qp_type=%x", qp_type
);
658 goto create_qp_exit1
;
661 /* Always signal by WQE so we can hide circ. WQEs */
662 parms
.sigtype
= HCALL_SIGT_BY_WQE
;
664 /* UD_AV CIRCUMVENTION */
665 max_send_sge
= init_attr
->cap
.max_send_sge
;
666 max_recv_sge
= init_attr
->cap
.max_recv_sge
;
667 if (parms
.servicetype
== ST_UD
&& !is_llqp
) {
672 parms
.token
= my_qp
->token
;
673 parms
.eq_handle
= shca
->eq
.ipz_eq_handle
;
674 parms
.pd
= my_pd
->fw_pd
;
676 parms
.send_cq_handle
= my_qp
->send_cq
->ipz_cq_handle
;
678 parms
.recv_cq_handle
= my_qp
->recv_cq
->ipz_cq_handle
;
680 parms
.squeue
.max_wr
= init_attr
->cap
.max_send_wr
;
681 parms
.rqueue
.max_wr
= init_attr
->cap
.max_recv_wr
;
682 parms
.squeue
.max_sge
= max_send_sge
;
683 parms
.rqueue
.max_sge
= max_recv_sge
;
685 /* RC QPs need one more SWQE for unsolicited ack circumvention */
686 if (qp_type
== IB_QPT_RC
)
687 parms
.squeue
.max_wr
++;
689 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP
, shca
->hca_cap
)) {
691 ehca_determine_small_queue(
692 &parms
.squeue
, max_send_sge
, is_llqp
);
694 ehca_determine_small_queue(
695 &parms
.rqueue
, max_recv_sge
, is_llqp
);
697 (parms
.squeue
.is_small
|| parms
.rqueue
.is_small
);
700 h_ret
= hipz_h_alloc_resource_qp(shca
->ipz_hca_handle
, &parms
);
701 if (h_ret
!= H_SUCCESS
) {
702 ehca_err(pd
->device
, "h_alloc_resource_qp() failed h_ret=%li",
704 ret
= ehca2ib_return_code(h_ret
);
705 goto create_qp_exit1
;
708 ib_qp_num
= my_qp
->real_qp_num
= parms
.real_qp_num
;
709 my_qp
->ipz_qp_handle
= parms
.qp_handle
;
710 my_qp
->galpas
= parms
.galpas
;
712 swqe_size
= ehca_calc_wqe_size(parms
.squeue
.act_nr_sges
, is_llqp
);
713 rwqe_size
= ehca_calc_wqe_size(parms
.rqueue
.act_nr_sges
, is_llqp
);
718 parms
.squeue
.act_nr_sges
= 1;
719 parms
.rqueue
.act_nr_sges
= 1;
721 /* hide the extra WQE */
722 parms
.squeue
.act_nr_wqes
--;
727 /* UD circumvention */
729 parms
.squeue
.act_nr_sges
= 1;
730 parms
.rqueue
.act_nr_sges
= 1;
732 parms
.squeue
.act_nr_sges
-= 2;
733 parms
.rqueue
.act_nr_sges
-= 2;
736 if (IB_QPT_GSI
== qp_type
|| IB_QPT_SMI
== qp_type
) {
737 parms
.squeue
.act_nr_wqes
= init_attr
->cap
.max_send_wr
;
738 parms
.rqueue
.act_nr_wqes
= init_attr
->cap
.max_recv_wr
;
739 parms
.squeue
.act_nr_sges
= init_attr
->cap
.max_send_sge
;
740 parms
.rqueue
.act_nr_sges
= init_attr
->cap
.max_recv_sge
;
741 ib_qp_num
= (qp_type
== IB_QPT_SMI
) ? 0 : 1;
750 /* initialize r/squeue and register queue pages */
753 shca
, my_pd
, my_qp
, &my_qp
->ipz_squeue
, 0,
754 HAS_RQ(my_qp
) ? H_PAGE_REGISTERED
: H_SUCCESS
,
755 &parms
.squeue
, swqe_size
);
757 ehca_err(pd
->device
, "Couldn't initialize squeue "
758 "and pages ret=%i", ret
);
759 goto create_qp_exit2
;
762 my_qp
->sq_map
.entries
= my_qp
->ipz_squeue
.queue_length
/
763 my_qp
->ipz_squeue
.qe_size
;
764 my_qp
->sq_map
.map
= vmalloc(my_qp
->sq_map
.entries
*
765 sizeof(struct ehca_qmap_entry
));
766 if (!my_qp
->sq_map
.map
) {
767 ehca_err(pd
->device
, "Couldn't allocate squeue "
769 goto create_qp_exit3
;
771 INIT_LIST_HEAD(&my_qp
->sq_err_node
);
772 /* to avoid the generation of bogus flush CQEs */
773 reset_queue_map(&my_qp
->sq_map
);
778 shca
, my_pd
, my_qp
, &my_qp
->ipz_rqueue
, 1,
779 H_SUCCESS
, &parms
.rqueue
, rwqe_size
);
781 ehca_err(pd
->device
, "Couldn't initialize rqueue "
782 "and pages ret=%i", ret
);
783 goto create_qp_exit4
;
786 my_qp
->rq_map
.entries
= my_qp
->ipz_rqueue
.queue_length
/
787 my_qp
->ipz_rqueue
.qe_size
;
788 my_qp
->rq_map
.map
= vmalloc(my_qp
->rq_map
.entries
*
789 sizeof(struct ehca_qmap_entry
));
790 if (!my_qp
->rq_map
.map
) {
791 ehca_err(pd
->device
, "Couldn't allocate squeue "
793 goto create_qp_exit5
;
795 INIT_LIST_HEAD(&my_qp
->rq_err_node
);
796 /* to avoid the generation of bogus flush CQEs */
797 reset_queue_map(&my_qp
->rq_map
);
798 } else if (init_attr
->srq
) {
799 /* this is a base QP, use the queue map of the SRQ */
800 my_qp
->rq_map
= my_srq
->rq_map
;
801 INIT_LIST_HEAD(&my_qp
->rq_err_node
);
803 my_qp
->ipz_rqueue
= my_srq
->ipz_rqueue
;
807 my_qp
->ib_srq
.pd
= &my_pd
->ib_pd
;
808 my_qp
->ib_srq
.device
= my_pd
->ib_pd
.device
;
810 my_qp
->ib_srq
.srq_context
= init_attr
->qp_context
;
811 my_qp
->ib_srq
.event_handler
= init_attr
->event_handler
;
813 my_qp
->ib_qp
.qp_num
= ib_qp_num
;
814 my_qp
->ib_qp
.pd
= &my_pd
->ib_pd
;
815 my_qp
->ib_qp
.device
= my_pd
->ib_pd
.device
;
817 my_qp
->ib_qp
.recv_cq
= init_attr
->recv_cq
;
818 my_qp
->ib_qp
.send_cq
= init_attr
->send_cq
;
820 my_qp
->ib_qp
.qp_type
= qp_type
;
821 my_qp
->ib_qp
.srq
= init_attr
->srq
;
823 my_qp
->ib_qp
.qp_context
= init_attr
->qp_context
;
824 my_qp
->ib_qp
.event_handler
= init_attr
->event_handler
;
827 init_attr
->cap
.max_inline_data
= 0; /* not supported yet */
828 init_attr
->cap
.max_recv_sge
= parms
.rqueue
.act_nr_sges
;
829 init_attr
->cap
.max_recv_wr
= parms
.rqueue
.act_nr_wqes
;
830 init_attr
->cap
.max_send_sge
= parms
.squeue
.act_nr_sges
;
831 init_attr
->cap
.max_send_wr
= parms
.squeue
.act_nr_wqes
;
832 my_qp
->init_attr
= *init_attr
;
834 if (qp_type
== IB_QPT_SMI
|| qp_type
== IB_QPT_GSI
) {
835 shca
->sport
[init_attr
->port_num
- 1].ibqp_sqp
[qp_type
] =
837 if (ehca_nr_ports
< 0) {
838 /* alloc array to cache subsequent modify qp parms
839 * for autodetect mode
842 kzalloc(EHCA_MOD_QP_PARM_MAX
*
843 sizeof(*my_qp
->mod_qp_parm
),
845 if (!my_qp
->mod_qp_parm
) {
847 "Could not alloc mod_qp_parm");
848 goto create_qp_exit5
;
853 /* NOTE: define_apq0() not supported yet */
854 if (qp_type
== IB_QPT_GSI
) {
855 h_ret
= ehca_define_sqp(shca
, my_qp
, init_attr
);
856 if (h_ret
!= H_SUCCESS
) {
857 ret
= ehca2ib_return_code(h_ret
);
858 goto create_qp_exit6
;
862 if (my_qp
->send_cq
) {
863 ret
= ehca_cq_assign_qp(my_qp
->send_cq
, my_qp
);
866 "Couldn't assign qp to send_cq ret=%i", ret
);
867 goto create_qp_exit7
;
871 /* copy queues, galpa data to user space */
872 if (context
&& udata
) {
873 struct ehca_create_qp_resp resp
;
874 memset(&resp
, 0, sizeof(resp
));
876 resp
.qp_num
= my_qp
->real_qp_num
;
877 resp
.token
= my_qp
->token
;
878 resp
.qp_type
= my_qp
->qp_type
;
879 resp
.ext_type
= my_qp
->ext_type
;
880 resp
.qkey
= my_qp
->qkey
;
881 resp
.real_qp_num
= my_qp
->real_qp_num
;
884 queue2resp(&resp
.ipz_squeue
, &my_qp
->ipz_squeue
);
886 queue2resp(&resp
.ipz_rqueue
, &my_qp
->ipz_rqueue
);
887 resp
.fw_handle_ofs
= (u32
)
888 (my_qp
->galpas
.user
.fw_handle
& (PAGE_SIZE
- 1));
890 if (ib_copy_to_udata(udata
, &resp
, sizeof resp
)) {
891 ehca_err(pd
->device
, "Copy to udata failed");
893 goto create_qp_exit8
;
900 ehca_cq_unassign_qp(my_qp
->send_cq
, my_qp
->real_qp_num
);
903 kfree(my_qp
->mod_qp_parm
);
907 vfree(my_qp
->rq_map
.map
);
911 ipz_queue_dtor(my_pd
, &my_qp
->ipz_rqueue
);
915 vfree(my_qp
->sq_map
.map
);
919 ipz_queue_dtor(my_pd
, &my_qp
->ipz_squeue
);
922 hipz_h_destroy_qp(shca
->ipz_hca_handle
, my_qp
);
925 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
926 idr_remove(&ehca_qp_idr
, my_qp
->token
);
927 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
930 kmem_cache_free(qp_cache
, my_qp
);
931 atomic_dec(&shca
->num_qps
);
935 struct ib_qp
*ehca_create_qp(struct ib_pd
*pd
,
936 struct ib_qp_init_attr
*qp_init_attr
,
937 struct ib_udata
*udata
)
941 ret
= internal_create_qp(pd
, qp_init_attr
, NULL
, udata
, 0);
942 return IS_ERR(ret
) ? (struct ib_qp
*)ret
: &ret
->ib_qp
;
945 static int internal_destroy_qp(struct ib_device
*dev
, struct ehca_qp
*my_qp
,
946 struct ib_uobject
*uobject
);
948 struct ib_srq
*ehca_create_srq(struct ib_pd
*pd
,
949 struct ib_srq_init_attr
*srq_init_attr
,
950 struct ib_udata
*udata
)
952 struct ib_qp_init_attr qp_init_attr
;
953 struct ehca_qp
*my_qp
;
955 struct ehca_shca
*shca
= container_of(pd
->device
, struct ehca_shca
,
957 struct hcp_modify_qp_control_block
*mqpcb
;
958 u64 hret
, update_mask
;
960 /* For common attributes, internal_create_qp() takes its info
961 * out of qp_init_attr, so copy all common attrs there.
963 memset(&qp_init_attr
, 0, sizeof(qp_init_attr
));
964 qp_init_attr
.event_handler
= srq_init_attr
->event_handler
;
965 qp_init_attr
.qp_context
= srq_init_attr
->srq_context
;
966 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
967 qp_init_attr
.qp_type
= IB_QPT_RC
;
968 qp_init_attr
.cap
.max_recv_wr
= srq_init_attr
->attr
.max_wr
;
969 qp_init_attr
.cap
.max_recv_sge
= srq_init_attr
->attr
.max_sge
;
971 my_qp
= internal_create_qp(pd
, &qp_init_attr
, srq_init_attr
, udata
, 1);
973 return (struct ib_srq
*)my_qp
;
975 /* copy back return values */
976 srq_init_attr
->attr
.max_wr
= qp_init_attr
.cap
.max_recv_wr
;
977 srq_init_attr
->attr
.max_sge
= 3;
979 /* drive SRQ into RTR state */
980 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
982 ehca_err(pd
->device
, "Could not get zeroed page for mqpcb "
983 "ehca_qp=%p qp_num=%x ", my_qp
, my_qp
->real_qp_num
);
984 ret
= ERR_PTR(-ENOMEM
);
988 mqpcb
->qp_state
= EHCA_QPS_INIT
;
989 mqpcb
->prim_phys_port
= 1;
990 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
991 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
992 my_qp
->ipz_qp_handle
,
995 mqpcb
, my_qp
->galpas
.kernel
);
996 if (hret
!= H_SUCCESS
) {
997 ehca_err(pd
->device
, "Could not modify SRQ to INIT "
998 "ehca_qp=%p qp_num=%x h_ret=%li",
999 my_qp
, my_qp
->real_qp_num
, hret
);
1003 mqpcb
->qp_enable
= 1;
1004 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE
, 1);
1005 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1006 my_qp
->ipz_qp_handle
,
1009 mqpcb
, my_qp
->galpas
.kernel
);
1010 if (hret
!= H_SUCCESS
) {
1011 ehca_err(pd
->device
, "Could not enable SRQ "
1012 "ehca_qp=%p qp_num=%x h_ret=%li",
1013 my_qp
, my_qp
->real_qp_num
, hret
);
1017 mqpcb
->qp_state
= EHCA_QPS_RTR
;
1018 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1019 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1020 my_qp
->ipz_qp_handle
,
1023 mqpcb
, my_qp
->galpas
.kernel
);
1024 if (hret
!= H_SUCCESS
) {
1025 ehca_err(pd
->device
, "Could not modify SRQ to RTR "
1026 "ehca_qp=%p qp_num=%x h_ret=%li",
1027 my_qp
, my_qp
->real_qp_num
, hret
);
1031 ehca_free_fw_ctrlblock(mqpcb
);
1033 return &my_qp
->ib_srq
;
1036 ret
= ERR_PTR(ehca2ib_return_code(hret
));
1037 ehca_free_fw_ctrlblock(mqpcb
);
1040 internal_destroy_qp(pd
->device
, my_qp
, my_qp
->ib_srq
.uobject
);
1046 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1047 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1048 * returns total number of bad wqes in bad_wqe_cnt
1050 static int prepare_sqe_rts(struct ehca_qp
*my_qp
, struct ehca_shca
*shca
,
1054 struct ipz_queue
*squeue
;
1055 void *bad_send_wqe_p
, *bad_send_wqe_v
;
1057 struct ehca_wqe
*wqe
;
1058 int qp_num
= my_qp
->ib_qp
.qp_num
;
1060 /* get send wqe pointer */
1061 h_ret
= hipz_h_disable_and_get_wqe(shca
->ipz_hca_handle
,
1062 my_qp
->ipz_qp_handle
, &my_qp
->pf
,
1063 &bad_send_wqe_p
, NULL
, 2);
1064 if (h_ret
!= H_SUCCESS
) {
1065 ehca_err(&shca
->ib_device
, "hipz_h_disable_and_get_wqe() failed"
1066 " ehca_qp=%p qp_num=%x h_ret=%li",
1067 my_qp
, qp_num
, h_ret
);
1068 return ehca2ib_return_code(h_ret
);
1070 bad_send_wqe_p
= (void *)((u64
)bad_send_wqe_p
& (~(1L << 63)));
1071 ehca_dbg(&shca
->ib_device
, "qp_num=%x bad_send_wqe_p=%p",
1072 qp_num
, bad_send_wqe_p
);
1073 /* convert wqe pointer to vadr */
1074 bad_send_wqe_v
= abs_to_virt((u64
)bad_send_wqe_p
);
1075 if (ehca_debug_level
>= 2)
1076 ehca_dmp(bad_send_wqe_v
, 32, "qp_num=%x bad_wqe", qp_num
);
1077 squeue
= &my_qp
->ipz_squeue
;
1078 if (ipz_queue_abs_to_offset(squeue
, (u64
)bad_send_wqe_p
, &q_ofs
)) {
1079 ehca_err(&shca
->ib_device
, "failed to get wqe offset qp_num=%x"
1080 " bad_send_wqe_p=%p", qp_num
, bad_send_wqe_p
);
1084 /* loop sets wqe's purge bit */
1085 wqe
= (struct ehca_wqe
*)ipz_qeit_calc(squeue
, q_ofs
);
1087 while (wqe
->optype
!= 0xff && wqe
->wqef
!= 0xff) {
1088 if (ehca_debug_level
>= 2)
1089 ehca_dmp(wqe
, 32, "qp_num=%x wqe", qp_num
);
1090 wqe
->nr_of_data_seg
= 0; /* suppress data access */
1091 wqe
->wqef
= WQEF_PURGE
; /* WQE to be purged */
1092 q_ofs
= ipz_queue_advance_offset(squeue
, q_ofs
);
1093 wqe
= (struct ehca_wqe
*)ipz_qeit_calc(squeue
, q_ofs
);
1094 *bad_wqe_cnt
= (*bad_wqe_cnt
)+1;
1097 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1098 * i.e. nr of wqes with flush error status is one less
1100 ehca_dbg(&shca
->ib_device
, "qp_num=%x flusherr_wqe_cnt=%x",
1101 qp_num
, (*bad_wqe_cnt
)-1);
1107 static int calc_left_cqes(u64 wqe_p
, struct ipz_queue
*ipz_queue
,
1108 struct ehca_queue_map
*qmap
)
1114 /* convert real to abs address */
1115 wqe_p
= wqe_p
& (~(1UL << 63));
1117 wqe_v
= abs_to_virt(wqe_p
);
1119 if (ipz_queue_abs_to_offset(ipz_queue
, wqe_p
, &q_ofs
)) {
1120 ehca_gen_err("Invalid offset for calculating left cqes "
1121 "wqe_p=%#lx wqe_v=%p\n", wqe_p
, wqe_v
);
1125 wqe_idx
= q_ofs
/ ipz_queue
->qe_size
;
1126 if (wqe_idx
< qmap
->tail
)
1127 qmap
->left_to_poll
= (qmap
->entries
- qmap
->tail
) + wqe_idx
;
1129 qmap
->left_to_poll
= wqe_idx
- qmap
->tail
;
1134 static int check_for_left_cqes(struct ehca_qp
*my_qp
, struct ehca_shca
*shca
)
1137 void *send_wqe_p
, *recv_wqe_p
;
1139 unsigned long flags
;
1140 int qp_num
= my_qp
->ib_qp
.qp_num
;
1142 /* this hcall is not supported on base QPs */
1143 if (my_qp
->ext_type
!= EQPT_SRQBASE
) {
1144 /* get send and receive wqe pointer */
1145 h_ret
= hipz_h_disable_and_get_wqe(shca
->ipz_hca_handle
,
1146 my_qp
->ipz_qp_handle
, &my_qp
->pf
,
1147 &send_wqe_p
, &recv_wqe_p
, 4);
1148 if (h_ret
!= H_SUCCESS
) {
1149 ehca_err(&shca
->ib_device
, "disable_and_get_wqe() "
1150 "failed ehca_qp=%p qp_num=%x h_ret=%li",
1151 my_qp
, qp_num
, h_ret
);
1152 return ehca2ib_return_code(h_ret
);
1156 * acquire lock to ensure that nobody is polling the cq which
1157 * could mean that the qmap->tail pointer is in an
1158 * inconsistent state.
1160 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1161 ret
= calc_left_cqes((u64
)send_wqe_p
, &my_qp
->ipz_squeue
,
1163 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1168 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1169 ret
= calc_left_cqes((u64
)recv_wqe_p
, &my_qp
->ipz_rqueue
,
1171 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
, flags
);
1175 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1176 my_qp
->sq_map
.left_to_poll
= 0;
1177 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1179 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1180 my_qp
->rq_map
.left_to_poll
= 0;
1181 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
, flags
);
1184 /* this assures flush cqes being generated only for pending wqes */
1185 if ((my_qp
->sq_map
.left_to_poll
== 0) &&
1186 (my_qp
->rq_map
.left_to_poll
== 0)) {
1187 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1188 ehca_add_to_err_list(my_qp
, 1);
1189 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1191 if (HAS_RQ(my_qp
)) {
1192 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1193 ehca_add_to_err_list(my_qp
, 0);
1194 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
,
1203 * internal_modify_qp with circumvention to handle aqp0 properly
1204 * smi_reset2init indicates if this is an internal reset-to-init-call for
1205 * smi. This flag must always be zero if called from ehca_modify_qp()!
1206 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1208 static int internal_modify_qp(struct ib_qp
*ibqp
,
1209 struct ib_qp_attr
*attr
,
1210 int attr_mask
, int smi_reset2init
)
1212 enum ib_qp_state qp_cur_state
, qp_new_state
;
1213 int cnt
, qp_attr_idx
, ret
= 0;
1214 enum ib_qp_statetrans statetrans
;
1215 struct hcp_modify_qp_control_block
*mqpcb
;
1216 struct ehca_qp
*my_qp
= container_of(ibqp
, struct ehca_qp
, ib_qp
);
1217 struct ehca_shca
*shca
=
1218 container_of(ibqp
->pd
->device
, struct ehca_shca
, ib_device
);
1221 int bad_wqe_cnt
= 0;
1222 int squeue_locked
= 0;
1223 unsigned long flags
= 0;
1225 /* do query_qp to obtain current attr values */
1226 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_ATOMIC
);
1228 ehca_err(ibqp
->device
, "Could not get zeroed page for mqpcb "
1229 "ehca_qp=%p qp_num=%x ", my_qp
, ibqp
->qp_num
);
1233 h_ret
= hipz_h_query_qp(shca
->ipz_hca_handle
,
1234 my_qp
->ipz_qp_handle
,
1236 mqpcb
, my_qp
->galpas
.kernel
);
1237 if (h_ret
!= H_SUCCESS
) {
1238 ehca_err(ibqp
->device
, "hipz_h_query_qp() failed "
1239 "ehca_qp=%p qp_num=%x h_ret=%li",
1240 my_qp
, ibqp
->qp_num
, h_ret
);
1241 ret
= ehca2ib_return_code(h_ret
);
1242 goto modify_qp_exit1
;
1245 qp_cur_state
= ehca2ib_qp_state(mqpcb
->qp_state
);
1247 if (qp_cur_state
== -EINVAL
) { /* invalid qp state */
1249 ehca_err(ibqp
->device
, "Invalid current ehca_qp_state=%x "
1250 "ehca_qp=%p qp_num=%x",
1251 mqpcb
->qp_state
, my_qp
, ibqp
->qp_num
);
1252 goto modify_qp_exit1
;
1255 * circumvention to set aqp0 initial state to init
1256 * as expected by IB spec
1258 if (smi_reset2init
== 0 &&
1259 ibqp
->qp_type
== IB_QPT_SMI
&&
1260 qp_cur_state
== IB_QPS_RESET
&&
1261 (attr_mask
& IB_QP_STATE
) &&
1262 attr
->qp_state
== IB_QPS_INIT
) { /* RESET -> INIT */
1263 struct ib_qp_attr smiqp_attr
= {
1264 .qp_state
= IB_QPS_INIT
,
1265 .port_num
= my_qp
->init_attr
.port_num
,
1269 int smiqp_attr_mask
= IB_QP_STATE
| IB_QP_PORT
|
1270 IB_QP_PKEY_INDEX
| IB_QP_QKEY
;
1271 int smirc
= internal_modify_qp(
1272 ibqp
, &smiqp_attr
, smiqp_attr_mask
, 1);
1274 ehca_err(ibqp
->device
, "SMI RESET -> INIT failed. "
1275 "ehca_modify_qp() rc=%i", smirc
);
1277 goto modify_qp_exit1
;
1279 qp_cur_state
= IB_QPS_INIT
;
1280 ehca_dbg(ibqp
->device
, "SMI RESET -> INIT succeeded");
1282 /* is transmitted current state equal to "real" current state */
1283 if ((attr_mask
& IB_QP_CUR_STATE
) &&
1284 qp_cur_state
!= attr
->cur_qp_state
) {
1286 ehca_err(ibqp
->device
,
1287 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1288 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1289 attr
->cur_qp_state
, qp_cur_state
, my_qp
, ibqp
->qp_num
);
1290 goto modify_qp_exit1
;
1293 ehca_dbg(ibqp
->device
, "ehca_qp=%p qp_num=%x current qp_state=%x "
1294 "new qp_state=%x attribute_mask=%x",
1295 my_qp
, ibqp
->qp_num
, qp_cur_state
, attr
->qp_state
, attr_mask
);
1297 qp_new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: qp_cur_state
;
1298 if (!smi_reset2init
&&
1299 !ib_modify_qp_is_ok(qp_cur_state
, qp_new_state
, ibqp
->qp_type
,
1302 ehca_err(ibqp
->device
,
1303 "Invalid qp transition new_state=%x cur_state=%x "
1304 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state
,
1305 qp_cur_state
, my_qp
, ibqp
->qp_num
, attr_mask
);
1306 goto modify_qp_exit1
;
1309 mqpcb
->qp_state
= ib2ehca_qp_state(qp_new_state
);
1310 if (mqpcb
->qp_state
)
1311 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1314 ehca_err(ibqp
->device
, "Invalid new qp state=%x "
1315 "ehca_qp=%p qp_num=%x",
1316 qp_new_state
, my_qp
, ibqp
->qp_num
);
1317 goto modify_qp_exit1
;
1320 /* retrieve state transition struct to get req and opt attrs */
1321 statetrans
= get_modqp_statetrans(qp_cur_state
, qp_new_state
);
1322 if (statetrans
< 0) {
1324 ehca_err(ibqp
->device
, "<INVALID STATE CHANGE> qp_cur_state=%x "
1325 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1326 "qp_num=%x", qp_cur_state
, qp_new_state
,
1327 statetrans
, my_qp
, ibqp
->qp_num
);
1328 goto modify_qp_exit1
;
1331 qp_attr_idx
= ib2ehcaqptype(ibqp
->qp_type
);
1333 if (qp_attr_idx
< 0) {
1335 ehca_err(ibqp
->device
,
1336 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1337 ibqp
->qp_type
, my_qp
, ibqp
->qp_num
);
1338 goto modify_qp_exit1
;
1341 ehca_dbg(ibqp
->device
,
1342 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1343 my_qp
, ibqp
->qp_num
, statetrans
);
1345 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1348 if ((my_qp
->qp_type
== IB_QPT_UD
) &&
1349 (my_qp
->ext_type
!= EQPT_LLQP
) &&
1350 (statetrans
== IB_QPST_INIT2RTR
) &&
1351 (shca
->hw_level
>= 0x22)) {
1352 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG
, 1);
1353 mqpcb
->send_grh_flag
= 1;
1356 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1357 if ((my_qp
->qp_type
== IB_QPT_UD
||
1358 my_qp
->qp_type
== IB_QPT_GSI
||
1359 my_qp
->qp_type
== IB_QPT_SMI
) &&
1360 statetrans
== IB_QPST_SQE2RTS
) {
1361 /* mark next free wqe if kernel */
1362 if (!ibqp
->uobject
) {
1363 struct ehca_wqe
*wqe
;
1364 /* lock send queue */
1365 spin_lock_irqsave(&my_qp
->spinlock_s
, flags
);
1367 /* mark next free wqe */
1368 wqe
= (struct ehca_wqe
*)
1369 ipz_qeit_get(&my_qp
->ipz_squeue
);
1370 wqe
->optype
= wqe
->wqef
= 0xff;
1371 ehca_dbg(ibqp
->device
, "qp_num=%x next_free_wqe=%p",
1374 ret
= prepare_sqe_rts(my_qp
, shca
, &bad_wqe_cnt
);
1376 ehca_err(ibqp
->device
, "prepare_sqe_rts() failed "
1377 "ehca_qp=%p qp_num=%x ret=%i",
1378 my_qp
, ibqp
->qp_num
, ret
);
1379 goto modify_qp_exit2
;
1384 * enable RDMA_Atomic_Control if reset->init und reliable con
1385 * this is necessary since gen2 does not provide that flag,
1386 * but pHyp requires it
1388 if (statetrans
== IB_QPST_RESET2INIT
&&
1389 (ibqp
->qp_type
== IB_QPT_RC
|| ibqp
->qp_type
== IB_QPT_UC
)) {
1390 mqpcb
->rdma_atomic_ctrl
= 3;
1391 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL
, 1);
1393 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1394 if (statetrans
== IB_QPST_INIT2RTR
&&
1395 (ibqp
->qp_type
== IB_QPT_UC
) &&
1396 !(attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)) {
1397 mqpcb
->rdma_nr_atomic_resp_res
= 1; /* default to 1 */
1399 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES
, 1);
1402 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1403 if (attr
->pkey_index
>= 16) {
1405 ehca_err(ibqp
->device
, "Invalid pkey_index=%x. "
1406 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1407 attr
->pkey_index
, my_qp
, ibqp
->qp_num
);
1408 goto modify_qp_exit2
;
1410 mqpcb
->prim_p_key_idx
= attr
->pkey_index
;
1411 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX
, 1);
1413 if (attr_mask
& IB_QP_PORT
) {
1414 struct ehca_sport
*sport
;
1415 struct ehca_qp
*aqp1
;
1416 if (attr
->port_num
< 1 || attr
->port_num
> shca
->num_ports
) {
1418 ehca_err(ibqp
->device
, "Invalid port=%x. "
1419 "ehca_qp=%p qp_num=%x num_ports=%x",
1420 attr
->port_num
, my_qp
, ibqp
->qp_num
,
1422 goto modify_qp_exit2
;
1424 sport
= &shca
->sport
[attr
->port_num
- 1];
1425 if (!sport
->ibqp_sqp
[IB_QPT_GSI
]) {
1426 /* should not occur */
1428 ehca_err(ibqp
->device
, "AQP1 was not created for "
1429 "port=%x", attr
->port_num
);
1430 goto modify_qp_exit2
;
1432 aqp1
= container_of(sport
->ibqp_sqp
[IB_QPT_GSI
],
1433 struct ehca_qp
, ib_qp
);
1434 if (ibqp
->qp_type
!= IB_QPT_GSI
&&
1435 ibqp
->qp_type
!= IB_QPT_SMI
&&
1436 aqp1
->mod_qp_parm
) {
1438 * firmware will reject this modify_qp() because
1439 * port is not activated/initialized fully
1442 ehca_warn(ibqp
->device
, "Couldn't modify qp port=%x: "
1443 "either port is being activated (try again) "
1444 "or cabling issue", attr
->port_num
);
1445 goto modify_qp_exit2
;
1447 mqpcb
->prim_phys_port
= attr
->port_num
;
1448 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT
, 1);
1450 if (attr_mask
& IB_QP_QKEY
) {
1451 mqpcb
->qkey
= attr
->qkey
;
1452 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_QKEY
, 1);
1454 if (attr_mask
& IB_QP_AV
) {
1455 mqpcb
->dlid
= attr
->ah_attr
.dlid
;
1456 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DLID
, 1);
1457 mqpcb
->source_path_bits
= attr
->ah_attr
.src_path_bits
;
1458 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS
, 1);
1459 mqpcb
->service_level
= attr
->ah_attr
.sl
;
1460 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL
, 1);
1462 if (ehca_calc_ipd(shca
, mqpcb
->prim_phys_port
,
1463 attr
->ah_attr
.static_rate
,
1464 &mqpcb
->max_static_rate
)) {
1466 goto modify_qp_exit2
;
1468 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE
, 1);
1471 * Always supply the GRH flag, even if it's zero, to give the
1472 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1474 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG
, 1);
1477 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1478 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1480 if (attr
->ah_attr
.ah_flags
== IB_AH_GRH
) {
1481 mqpcb
->send_grh_flag
= 1;
1483 mqpcb
->source_gid_idx
= attr
->ah_attr
.grh
.sgid_index
;
1485 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX
, 1);
1487 for (cnt
= 0; cnt
< 16; cnt
++)
1488 mqpcb
->dest_gid
.byte
[cnt
] =
1489 attr
->ah_attr
.grh
.dgid
.raw
[cnt
];
1491 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID
, 1);
1492 mqpcb
->flow_label
= attr
->ah_attr
.grh
.flow_label
;
1493 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL
, 1);
1494 mqpcb
->hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
1495 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT
, 1);
1496 mqpcb
->traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
1498 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS
, 1);
1502 if (attr_mask
& IB_QP_PATH_MTU
) {
1504 my_qp
->mtu_shift
= attr
->path_mtu
+ 7;
1505 mqpcb
->path_mtu
= attr
->path_mtu
;
1506 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU
, 1);
1508 if (attr_mask
& IB_QP_TIMEOUT
) {
1509 mqpcb
->timeout
= attr
->timeout
;
1510 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT
, 1);
1512 if (attr_mask
& IB_QP_RETRY_CNT
) {
1513 mqpcb
->retry_count
= attr
->retry_cnt
;
1514 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT
, 1);
1516 if (attr_mask
& IB_QP_RNR_RETRY
) {
1517 mqpcb
->rnr_retry_count
= attr
->rnr_retry
;
1518 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT
, 1);
1520 if (attr_mask
& IB_QP_RQ_PSN
) {
1521 mqpcb
->receive_psn
= attr
->rq_psn
;
1522 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN
, 1);
1524 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1525 mqpcb
->rdma_nr_atomic_resp_res
= attr
->max_dest_rd_atomic
< 3 ?
1526 attr
->max_dest_rd_atomic
: 2;
1528 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES
, 1);
1530 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1531 mqpcb
->rdma_atomic_outst_dest_qp
= attr
->max_rd_atomic
< 3 ?
1532 attr
->max_rd_atomic
: 2;
1535 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP
, 1);
1537 if (attr_mask
& IB_QP_ALT_PATH
) {
1538 if (attr
->alt_port_num
< 1
1539 || attr
->alt_port_num
> shca
->num_ports
) {
1541 ehca_err(ibqp
->device
, "Invalid alt_port=%x. "
1542 "ehca_qp=%p qp_num=%x num_ports=%x",
1543 attr
->alt_port_num
, my_qp
, ibqp
->qp_num
,
1545 goto modify_qp_exit2
;
1547 mqpcb
->alt_phys_port
= attr
->alt_port_num
;
1549 if (attr
->alt_pkey_index
>= 16) {
1551 ehca_err(ibqp
->device
, "Invalid alt_pkey_index=%x. "
1552 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1553 attr
->pkey_index
, my_qp
, ibqp
->qp_num
);
1554 goto modify_qp_exit2
;
1556 mqpcb
->alt_p_key_idx
= attr
->alt_pkey_index
;
1558 mqpcb
->timeout_al
= attr
->alt_timeout
;
1559 mqpcb
->dlid_al
= attr
->alt_ah_attr
.dlid
;
1560 mqpcb
->source_path_bits_al
= attr
->alt_ah_attr
.src_path_bits
;
1561 mqpcb
->service_level_al
= attr
->alt_ah_attr
.sl
;
1563 if (ehca_calc_ipd(shca
, mqpcb
->alt_phys_port
,
1564 attr
->alt_ah_attr
.static_rate
,
1565 &mqpcb
->max_static_rate_al
)) {
1567 goto modify_qp_exit2
;
1570 /* OpenIB doesn't support alternate retry counts - copy them */
1571 mqpcb
->retry_count_al
= mqpcb
->retry_count
;
1572 mqpcb
->rnr_retry_count_al
= mqpcb
->rnr_retry_count
;
1574 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT
, 1)
1575 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX
, 1)
1576 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL
, 1)
1577 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL
, 1)
1578 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL
, 1)
1579 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL
, 1)
1580 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL
, 1)
1581 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL
, 1)
1582 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL
, 1);
1585 * Always supply the GRH flag, even if it's zero, to give the
1586 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1588 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL
, 1);
1591 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1592 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1594 if (attr
->alt_ah_attr
.ah_flags
== IB_AH_GRH
) {
1595 mqpcb
->send_grh_flag_al
= 1;
1597 for (cnt
= 0; cnt
< 16; cnt
++)
1598 mqpcb
->dest_gid_al
.byte
[cnt
] =
1599 attr
->alt_ah_attr
.grh
.dgid
.raw
[cnt
];
1600 mqpcb
->source_gid_idx_al
=
1601 attr
->alt_ah_attr
.grh
.sgid_index
;
1602 mqpcb
->flow_label_al
= attr
->alt_ah_attr
.grh
.flow_label
;
1603 mqpcb
->hop_limit_al
= attr
->alt_ah_attr
.grh
.hop_limit
;
1604 mqpcb
->traffic_class_al
=
1605 attr
->alt_ah_attr
.grh
.traffic_class
;
1608 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL
, 1)
1609 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL
, 1)
1610 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL
, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL
, 1) |
1612 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL
, 1);
1616 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1617 mqpcb
->min_rnr_nak_timer_field
= attr
->min_rnr_timer
;
1619 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD
, 1);
1622 if (attr_mask
& IB_QP_SQ_PSN
) {
1623 mqpcb
->send_psn
= attr
->sq_psn
;
1624 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN
, 1);
1627 if (attr_mask
& IB_QP_DEST_QPN
) {
1628 mqpcb
->dest_qp_nr
= attr
->dest_qp_num
;
1629 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR
, 1);
1632 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1633 if (attr
->path_mig_state
!= IB_MIG_REARM
1634 && attr
->path_mig_state
!= IB_MIG_MIGRATED
) {
1636 ehca_err(ibqp
->device
, "Invalid mig_state=%x",
1637 attr
->path_mig_state
);
1638 goto modify_qp_exit2
;
1640 mqpcb
->path_migration_state
= attr
->path_mig_state
+ 1;
1641 if (attr
->path_mig_state
== IB_MIG_REARM
)
1642 my_qp
->mig_armed
= 1;
1644 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE
, 1);
1647 if (attr_mask
& IB_QP_CAP
) {
1648 mqpcb
->max_nr_outst_send_wr
= attr
->cap
.max_send_wr
+1;
1650 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR
, 1);
1651 mqpcb
->max_nr_outst_recv_wr
= attr
->cap
.max_recv_wr
+1;
1653 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR
, 1);
1654 /* no support for max_send/recv_sge yet */
1657 if (ehca_debug_level
>= 2)
1658 ehca_dmp(mqpcb
, 4*70, "qp_num=%x", ibqp
->qp_num
);
1660 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1661 my_qp
->ipz_qp_handle
,
1664 mqpcb
, my_qp
->galpas
.kernel
);
1666 if (h_ret
!= H_SUCCESS
) {
1667 ret
= ehca2ib_return_code(h_ret
);
1668 ehca_err(ibqp
->device
, "hipz_h_modify_qp() failed h_ret=%li "
1669 "ehca_qp=%p qp_num=%x", h_ret
, my_qp
, ibqp
->qp_num
);
1670 goto modify_qp_exit2
;
1673 if ((my_qp
->qp_type
== IB_QPT_UD
||
1674 my_qp
->qp_type
== IB_QPT_GSI
||
1675 my_qp
->qp_type
== IB_QPT_SMI
) &&
1676 statetrans
== IB_QPST_SQE2RTS
) {
1677 /* doorbell to reprocessing wqes */
1678 iosync(); /* serialize GAL register access */
1679 hipz_update_sqa(my_qp
, bad_wqe_cnt
-1);
1680 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt
);
1683 if (statetrans
== IB_QPST_RESET2INIT
||
1684 statetrans
== IB_QPST_INIT2INIT
) {
1685 mqpcb
->qp_enable
= 1;
1686 mqpcb
->qp_state
= EHCA_QPS_INIT
;
1688 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE
, 1);
1690 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1691 my_qp
->ipz_qp_handle
,
1695 my_qp
->galpas
.kernel
);
1697 if (h_ret
!= H_SUCCESS
) {
1698 ret
= ehca2ib_return_code(h_ret
);
1699 ehca_err(ibqp
->device
, "ENABLE in context of "
1700 "RESET_2_INIT failed! Maybe you didn't get "
1701 "a LID h_ret=%li ehca_qp=%p qp_num=%x",
1702 h_ret
, my_qp
, ibqp
->qp_num
);
1703 goto modify_qp_exit2
;
1706 if ((qp_new_state
== IB_QPS_ERR
) && (qp_cur_state
!= IB_QPS_ERR
)) {
1707 ret
= check_for_left_cqes(my_qp
, shca
);
1709 goto modify_qp_exit2
;
1712 if (statetrans
== IB_QPST_ANY2RESET
) {
1713 ipz_qeit_reset(&my_qp
->ipz_rqueue
);
1714 ipz_qeit_reset(&my_qp
->ipz_squeue
);
1716 if (qp_cur_state
== IB_QPS_ERR
) {
1717 del_from_err_list(my_qp
->send_cq
, &my_qp
->sq_err_node
);
1720 del_from_err_list(my_qp
->recv_cq
,
1721 &my_qp
->rq_err_node
);
1723 reset_queue_map(&my_qp
->sq_map
);
1726 reset_queue_map(&my_qp
->rq_map
);
1729 if (attr_mask
& IB_QP_QKEY
)
1730 my_qp
->qkey
= attr
->qkey
;
1733 if (squeue_locked
) { /* this means: sqe -> rts */
1734 spin_unlock_irqrestore(&my_qp
->spinlock_s
, flags
);
1735 my_qp
->sqerr_purgeflag
= 1;
1739 ehca_free_fw_ctrlblock(mqpcb
);
1744 int ehca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
,
1745 struct ib_udata
*udata
)
1749 struct ehca_shca
*shca
= container_of(ibqp
->device
, struct ehca_shca
,
1751 struct ehca_qp
*my_qp
= container_of(ibqp
, struct ehca_qp
, ib_qp
);
1753 /* The if-block below caches qp_attr to be modified for GSI and SMI
1754 * qps during the initialization by ib_mad. When the respective port
1755 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1756 * cached modify calls sequence, see ehca_recover_sqs() below.
1757 * Why that is required:
1758 * 1) If one port is connected, older code requires that port one
1759 * to be connected and module option nr_ports=1 to be given by
1760 * user, which is very inconvenient for end user.
1761 * 2) Firmware accepts modify_qp() only if respective port has become
1762 * active. Older code had a wait loop of 30sec create_qp()/
1763 * define_aqp1(), which is not appropriate in practice. This
1764 * code now removes that wait loop, see define_aqp1(), and always
1765 * reports all ports to ib_mad resp. users. Only activated ports
1766 * will then usable for the users.
1768 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1769 int port
= my_qp
->init_attr
.port_num
;
1770 struct ehca_sport
*sport
= &shca
->sport
[port
- 1];
1771 unsigned long flags
;
1772 spin_lock_irqsave(&sport
->mod_sqp_lock
, flags
);
1773 /* cache qp_attr only during init */
1774 if (my_qp
->mod_qp_parm
) {
1775 struct ehca_mod_qp_parm
*p
;
1776 if (my_qp
->mod_qp_parm_idx
>= EHCA_MOD_QP_PARM_MAX
) {
1777 ehca_err(&shca
->ib_device
,
1778 "mod_qp_parm overflow state=%x port=%x"
1779 " type=%x", attr
->qp_state
,
1780 my_qp
->init_attr
.port_num
,
1782 spin_unlock_irqrestore(&sport
->mod_sqp_lock
,
1786 p
= &my_qp
->mod_qp_parm
[my_qp
->mod_qp_parm_idx
];
1787 p
->mask
= attr_mask
;
1789 my_qp
->mod_qp_parm_idx
++;
1790 ehca_dbg(&shca
->ib_device
,
1791 "Saved qp_attr for state=%x port=%x type=%x",
1792 attr
->qp_state
, my_qp
->init_attr
.port_num
,
1794 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
1797 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
1800 ret
= internal_modify_qp(ibqp
, attr
, attr_mask
, 0);
1803 if ((ret
== 0) && (attr_mask
& IB_QP_STATE
))
1804 my_qp
->state
= attr
->qp_state
;
1809 void ehca_recover_sqp(struct ib_qp
*sqp
)
1811 struct ehca_qp
*my_sqp
= container_of(sqp
, struct ehca_qp
, ib_qp
);
1812 int port
= my_sqp
->init_attr
.port_num
;
1813 struct ib_qp_attr attr
;
1814 struct ehca_mod_qp_parm
*qp_parm
;
1815 int i
, qp_parm_idx
, ret
;
1816 unsigned long flags
, wr_cnt
;
1818 if (!my_sqp
->mod_qp_parm
)
1820 ehca_dbg(sqp
->device
, "SQP port=%x qp_num=%x", port
, sqp
->qp_num
);
1822 qp_parm
= my_sqp
->mod_qp_parm
;
1823 qp_parm_idx
= my_sqp
->mod_qp_parm_idx
;
1824 for (i
= 0; i
< qp_parm_idx
; i
++) {
1825 attr
= qp_parm
[i
].attr
;
1826 ret
= internal_modify_qp(sqp
, &attr
, qp_parm
[i
].mask
, 0);
1828 ehca_err(sqp
->device
, "Could not modify SQP port=%x "
1829 "qp_num=%x ret=%x", port
, sqp
->qp_num
, ret
);
1832 ehca_dbg(sqp
->device
, "SQP port=%x qp_num=%x in state=%x",
1833 port
, sqp
->qp_num
, attr
.qp_state
);
1836 /* re-trigger posted recv wrs */
1837 wr_cnt
= my_sqp
->ipz_rqueue
.current_q_offset
/
1838 my_sqp
->ipz_rqueue
.qe_size
;
1840 spin_lock_irqsave(&my_sqp
->spinlock_r
, flags
);
1841 hipz_update_rqa(my_sqp
, wr_cnt
);
1842 spin_unlock_irqrestore(&my_sqp
->spinlock_r
, flags
);
1843 ehca_dbg(sqp
->device
, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1844 port
, sqp
->qp_num
, wr_cnt
);
1849 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1850 my_sqp
->mod_qp_parm
= NULL
;
1853 int ehca_query_qp(struct ib_qp
*qp
,
1854 struct ib_qp_attr
*qp_attr
,
1855 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1857 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
1858 struct ehca_shca
*shca
= container_of(qp
->device
, struct ehca_shca
,
1860 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
1861 struct hcp_modify_qp_control_block
*qpcb
;
1865 if (qp_attr_mask
& QP_ATTR_QUERY_NOT_SUPPORTED
) {
1866 ehca_err(qp
->device
, "Invalid attribute mask "
1867 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1868 my_qp
, qp
->qp_num
, qp_attr_mask
);
1872 qpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1874 ehca_err(qp
->device
, "Out of memory for qpcb "
1875 "ehca_qp=%p qp_num=%x", my_qp
, qp
->qp_num
);
1879 h_ret
= hipz_h_query_qp(adapter_handle
,
1880 my_qp
->ipz_qp_handle
,
1882 qpcb
, my_qp
->galpas
.kernel
);
1884 if (h_ret
!= H_SUCCESS
) {
1885 ret
= ehca2ib_return_code(h_ret
);
1886 ehca_err(qp
->device
, "hipz_h_query_qp() failed "
1887 "ehca_qp=%p qp_num=%x h_ret=%li",
1888 my_qp
, qp
->qp_num
, h_ret
);
1889 goto query_qp_exit1
;
1892 qp_attr
->cur_qp_state
= ehca2ib_qp_state(qpcb
->qp_state
);
1893 qp_attr
->qp_state
= qp_attr
->cur_qp_state
;
1895 if (qp_attr
->cur_qp_state
== -EINVAL
) {
1897 ehca_err(qp
->device
, "Got invalid ehca_qp_state=%x "
1898 "ehca_qp=%p qp_num=%x",
1899 qpcb
->qp_state
, my_qp
, qp
->qp_num
);
1900 goto query_qp_exit1
;
1903 if (qp_attr
->qp_state
== IB_QPS_SQD
)
1904 qp_attr
->sq_draining
= 1;
1906 qp_attr
->qkey
= qpcb
->qkey
;
1907 qp_attr
->path_mtu
= qpcb
->path_mtu
;
1908 qp_attr
->path_mig_state
= qpcb
->path_migration_state
- 1;
1909 qp_attr
->rq_psn
= qpcb
->receive_psn
;
1910 qp_attr
->sq_psn
= qpcb
->send_psn
;
1911 qp_attr
->min_rnr_timer
= qpcb
->min_rnr_nak_timer_field
;
1912 qp_attr
->cap
.max_send_wr
= qpcb
->max_nr_outst_send_wr
-1;
1913 qp_attr
->cap
.max_recv_wr
= qpcb
->max_nr_outst_recv_wr
-1;
1914 /* UD_AV CIRCUMVENTION */
1915 if (my_qp
->qp_type
== IB_QPT_UD
) {
1916 qp_attr
->cap
.max_send_sge
=
1917 qpcb
->actual_nr_sges_in_sq_wqe
- 2;
1918 qp_attr
->cap
.max_recv_sge
=
1919 qpcb
->actual_nr_sges_in_rq_wqe
- 2;
1921 qp_attr
->cap
.max_send_sge
=
1922 qpcb
->actual_nr_sges_in_sq_wqe
;
1923 qp_attr
->cap
.max_recv_sge
=
1924 qpcb
->actual_nr_sges_in_rq_wqe
;
1927 qp_attr
->cap
.max_inline_data
= my_qp
->sq_max_inline_data_size
;
1928 qp_attr
->dest_qp_num
= qpcb
->dest_qp_nr
;
1930 qp_attr
->pkey_index
=
1931 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX
, qpcb
->prim_p_key_idx
);
1934 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT
, qpcb
->prim_phys_port
);
1936 qp_attr
->timeout
= qpcb
->timeout
;
1937 qp_attr
->retry_cnt
= qpcb
->retry_count
;
1938 qp_attr
->rnr_retry
= qpcb
->rnr_retry_count
;
1940 qp_attr
->alt_pkey_index
=
1941 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX
, qpcb
->alt_p_key_idx
);
1943 qp_attr
->alt_port_num
= qpcb
->alt_phys_port
;
1944 qp_attr
->alt_timeout
= qpcb
->timeout_al
;
1946 qp_attr
->max_dest_rd_atomic
= qpcb
->rdma_nr_atomic_resp_res
;
1947 qp_attr
->max_rd_atomic
= qpcb
->rdma_atomic_outst_dest_qp
;
1950 qp_attr
->ah_attr
.sl
= qpcb
->service_level
;
1952 if (qpcb
->send_grh_flag
) {
1953 qp_attr
->ah_attr
.ah_flags
= IB_AH_GRH
;
1956 qp_attr
->ah_attr
.static_rate
= qpcb
->max_static_rate
;
1957 qp_attr
->ah_attr
.dlid
= qpcb
->dlid
;
1958 qp_attr
->ah_attr
.src_path_bits
= qpcb
->source_path_bits
;
1959 qp_attr
->ah_attr
.port_num
= qp_attr
->port_num
;
1962 qp_attr
->ah_attr
.grh
.traffic_class
= qpcb
->traffic_class
;
1963 qp_attr
->ah_attr
.grh
.hop_limit
= qpcb
->hop_limit
;
1964 qp_attr
->ah_attr
.grh
.sgid_index
= qpcb
->source_gid_idx
;
1965 qp_attr
->ah_attr
.grh
.flow_label
= qpcb
->flow_label
;
1967 for (cnt
= 0; cnt
< 16; cnt
++)
1968 qp_attr
->ah_attr
.grh
.dgid
.raw
[cnt
] =
1969 qpcb
->dest_gid
.byte
[cnt
];
1972 qp_attr
->alt_ah_attr
.sl
= qpcb
->service_level_al
;
1973 if (qpcb
->send_grh_flag_al
) {
1974 qp_attr
->alt_ah_attr
.ah_flags
= IB_AH_GRH
;
1977 qp_attr
->alt_ah_attr
.static_rate
= qpcb
->max_static_rate_al
;
1978 qp_attr
->alt_ah_attr
.dlid
= qpcb
->dlid_al
;
1979 qp_attr
->alt_ah_attr
.src_path_bits
= qpcb
->source_path_bits_al
;
1982 qp_attr
->alt_ah_attr
.grh
.traffic_class
= qpcb
->traffic_class_al
;
1983 qp_attr
->alt_ah_attr
.grh
.hop_limit
= qpcb
->hop_limit_al
;
1984 qp_attr
->alt_ah_attr
.grh
.sgid_index
= qpcb
->source_gid_idx_al
;
1985 qp_attr
->alt_ah_attr
.grh
.flow_label
= qpcb
->flow_label_al
;
1987 for (cnt
= 0; cnt
< 16; cnt
++)
1988 qp_attr
->alt_ah_attr
.grh
.dgid
.raw
[cnt
] =
1989 qpcb
->dest_gid_al
.byte
[cnt
];
1991 /* return init attributes given in ehca_create_qp */
1993 *qp_init_attr
= my_qp
->init_attr
;
1995 if (ehca_debug_level
>= 2)
1996 ehca_dmp(qpcb
, 4*70, "qp_num=%x", qp
->qp_num
);
1999 ehca_free_fw_ctrlblock(qpcb
);
2004 int ehca_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
2005 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
2007 struct ehca_qp
*my_qp
=
2008 container_of(ibsrq
, struct ehca_qp
, ib_srq
);
2009 struct ehca_shca
*shca
=
2010 container_of(ibsrq
->pd
->device
, struct ehca_shca
, ib_device
);
2011 struct hcp_modify_qp_control_block
*mqpcb
;
2016 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2018 ehca_err(ibsrq
->device
, "Could not get zeroed page for mqpcb "
2019 "ehca_qp=%p qp_num=%x ", my_qp
, my_qp
->real_qp_num
);
2024 if (attr_mask
& IB_SRQ_LIMIT
) {
2025 attr_mask
&= ~IB_SRQ_LIMIT
;
2027 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT
, 1)
2028 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG
, 1);
2029 mqpcb
->curr_srq_limit
=
2030 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT
, attr
->srq_limit
);
2031 mqpcb
->qp_aff_asyn_ev_log_reg
=
2032 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT
, 1);
2035 /* by now, all bits in attr_mask should have been cleared */
2037 ehca_err(ibsrq
->device
, "invalid attribute mask bits set "
2038 "attr_mask=%x", attr_mask
);
2040 goto modify_srq_exit0
;
2043 if (ehca_debug_level
>= 2)
2044 ehca_dmp(mqpcb
, 4*70, "qp_num=%x", my_qp
->real_qp_num
);
2046 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
, my_qp
->ipz_qp_handle
,
2047 NULL
, update_mask
, mqpcb
,
2048 my_qp
->galpas
.kernel
);
2050 if (h_ret
!= H_SUCCESS
) {
2051 ret
= ehca2ib_return_code(h_ret
);
2052 ehca_err(ibsrq
->device
, "hipz_h_modify_qp() failed h_ret=%li "
2053 "ehca_qp=%p qp_num=%x",
2054 h_ret
, my_qp
, my_qp
->real_qp_num
);
2058 ehca_free_fw_ctrlblock(mqpcb
);
2063 int ehca_query_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2065 struct ehca_qp
*my_qp
= container_of(srq
, struct ehca_qp
, ib_srq
);
2066 struct ehca_shca
*shca
= container_of(srq
->device
, struct ehca_shca
,
2068 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
2069 struct hcp_modify_qp_control_block
*qpcb
;
2073 qpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2075 ehca_err(srq
->device
, "Out of memory for qpcb "
2076 "ehca_qp=%p qp_num=%x", my_qp
, my_qp
->real_qp_num
);
2080 h_ret
= hipz_h_query_qp(adapter_handle
, my_qp
->ipz_qp_handle
,
2081 NULL
, qpcb
, my_qp
->galpas
.kernel
);
2083 if (h_ret
!= H_SUCCESS
) {
2084 ret
= ehca2ib_return_code(h_ret
);
2085 ehca_err(srq
->device
, "hipz_h_query_qp() failed "
2086 "ehca_qp=%p qp_num=%x h_ret=%li",
2087 my_qp
, my_qp
->real_qp_num
, h_ret
);
2088 goto query_srq_exit1
;
2091 srq_attr
->max_wr
= qpcb
->max_nr_outst_recv_wr
- 1;
2092 srq_attr
->max_sge
= 3;
2093 srq_attr
->srq_limit
= EHCA_BMASK_GET(
2094 MQPCB_CURR_SRQ_LIMIT
, qpcb
->curr_srq_limit
);
2096 if (ehca_debug_level
>= 2)
2097 ehca_dmp(qpcb
, 4*70, "qp_num=%x", my_qp
->real_qp_num
);
2100 ehca_free_fw_ctrlblock(qpcb
);
2105 static int internal_destroy_qp(struct ib_device
*dev
, struct ehca_qp
*my_qp
,
2106 struct ib_uobject
*uobject
)
2108 struct ehca_shca
*shca
= container_of(dev
, struct ehca_shca
, ib_device
);
2109 struct ehca_pd
*my_pd
= container_of(my_qp
->ib_qp
.pd
, struct ehca_pd
,
2111 struct ehca_sport
*sport
= &shca
->sport
[my_qp
->init_attr
.port_num
- 1];
2112 u32 qp_num
= my_qp
->real_qp_num
;
2116 enum ib_qp_type qp_type
;
2117 unsigned long flags
;
2120 if (my_qp
->mm_count_galpa
||
2121 my_qp
->mm_count_rqueue
|| my_qp
->mm_count_squeue
) {
2122 ehca_err(dev
, "Resources still referenced in "
2123 "user space qp_num=%x", qp_num
);
2128 if (my_qp
->send_cq
) {
2129 ret
= ehca_cq_unassign_qp(my_qp
->send_cq
, qp_num
);
2131 ehca_err(dev
, "Couldn't unassign qp from "
2132 "send_cq ret=%i qp_num=%x cq_num=%x", ret
,
2133 qp_num
, my_qp
->send_cq
->cq_number
);
2138 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
2139 idr_remove(&ehca_qp_idr
, my_qp
->token
);
2140 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
2143 * SRQs will never get into an error list and do not have a recv_cq,
2144 * so we need to skip them here.
2146 if (HAS_RQ(my_qp
) && !IS_SRQ(my_qp
))
2147 del_from_err_list(my_qp
->recv_cq
, &my_qp
->rq_err_node
);
2150 del_from_err_list(my_qp
->send_cq
, &my_qp
->sq_err_node
);
2152 /* now wait until all pending events have completed */
2153 wait_event(my_qp
->wait_completion
, !atomic_read(&my_qp
->nr_events
));
2155 h_ret
= hipz_h_destroy_qp(shca
->ipz_hca_handle
, my_qp
);
2156 if (h_ret
!= H_SUCCESS
) {
2157 ehca_err(dev
, "hipz_h_destroy_qp() failed h_ret=%li "
2158 "ehca_qp=%p qp_num=%x", h_ret
, my_qp
, qp_num
);
2159 return ehca2ib_return_code(h_ret
);
2162 port_num
= my_qp
->init_attr
.port_num
;
2163 qp_type
= my_qp
->init_attr
.qp_type
;
2165 if (qp_type
== IB_QPT_SMI
|| qp_type
== IB_QPT_GSI
) {
2166 spin_lock_irqsave(&sport
->mod_sqp_lock
, flags
);
2167 kfree(my_qp
->mod_qp_parm
);
2168 my_qp
->mod_qp_parm
= NULL
;
2169 shca
->sport
[port_num
- 1].ibqp_sqp
[qp_type
] = NULL
;
2170 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
2173 /* no support for IB_QPT_SMI yet */
2174 if (qp_type
== IB_QPT_GSI
) {
2175 struct ib_event event
;
2176 ehca_info(dev
, "device %s: port %x is inactive.",
2177 shca
->ib_device
.name
, port_num
);
2178 event
.device
= &shca
->ib_device
;
2179 event
.event
= IB_EVENT_PORT_ERR
;
2180 event
.element
.port_num
= port_num
;
2181 shca
->sport
[port_num
- 1].port_state
= IB_PORT_DOWN
;
2182 ib_dispatch_event(&event
);
2185 if (HAS_RQ(my_qp
)) {
2186 ipz_queue_dtor(my_pd
, &my_qp
->ipz_rqueue
);
2188 vfree(my_qp
->rq_map
.map
);
2190 if (HAS_SQ(my_qp
)) {
2191 ipz_queue_dtor(my_pd
, &my_qp
->ipz_squeue
);
2193 vfree(my_qp
->sq_map
.map
);
2195 kmem_cache_free(qp_cache
, my_qp
);
2196 atomic_dec(&shca
->num_qps
);
2200 int ehca_destroy_qp(struct ib_qp
*qp
)
2202 return internal_destroy_qp(qp
->device
,
2203 container_of(qp
, struct ehca_qp
, ib_qp
),
2207 int ehca_destroy_srq(struct ib_srq
*srq
)
2209 return internal_destroy_qp(srq
->device
,
2210 container_of(srq
, struct ehca_qp
, ib_srq
),
2214 int ehca_init_qp_cache(void)
2216 qp_cache
= kmem_cache_create("ehca_cache_qp",
2217 sizeof(struct ehca_qp
), 0,
2225 void ehca_cleanup_qp_cache(void)
2228 kmem_cache_destroy(qp_cache
);