2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * post_send/recv, poll_cq, req_notify
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/system.h>
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 /* in RC traffic, insert an empty RDMA READ every this many packets */
54 #define ACK_CIRC_THRESHOLD 2000000
56 static inline int ehca_write_rwqe(struct ipz_queue
*ipz_rqueue
,
57 struct ehca_wqe
*wqe_p
,
58 struct ib_recv_wr
*recv_wr
)
61 if (unlikely((recv_wr
->num_sge
< 0) ||
62 (recv_wr
->num_sge
> ipz_rqueue
->act_nr_of_sg
))) {
63 ehca_gen_err("Invalid number of WQE SGE. "
64 "num_sqe=%x max_nr_of_sg=%x",
65 recv_wr
->num_sge
, ipz_rqueue
->act_nr_of_sg
);
66 return -EINVAL
; /* invalid SG list length */
69 /* clear wqe header until sglist */
70 memset(wqe_p
, 0, offsetof(struct ehca_wqe
, u
.ud_av
.sg_list
));
72 wqe_p
->work_request_id
= recv_wr
->wr_id
;
73 wqe_p
->nr_of_data_seg
= recv_wr
->num_sge
;
75 for (cnt_ds
= 0; cnt_ds
< recv_wr
->num_sge
; cnt_ds
++) {
76 wqe_p
->u
.all_rcv
.sg_list
[cnt_ds
].vaddr
=
77 recv_wr
->sg_list
[cnt_ds
].addr
;
78 wqe_p
->u
.all_rcv
.sg_list
[cnt_ds
].lkey
=
79 recv_wr
->sg_list
[cnt_ds
].lkey
;
80 wqe_p
->u
.all_rcv
.sg_list
[cnt_ds
].length
=
81 recv_wr
->sg_list
[cnt_ds
].length
;
84 if (ehca_debug_level
>= 3) {
85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
87 ehca_dmp(wqe_p
, 16*(6 + wqe_p
->nr_of_data_seg
), "recv wqe");
93 #if defined(DEBUG_GSI_SEND_WR)
95 /* need ib_mad struct */
96 #include <rdma/ib_mad.h>
98 static void trace_send_wr_ud(const struct ib_send_wr
*send_wr
)
103 struct ib_mad_hdr
*mad_hdr
= send_wr
->wr
.ud
.mad_hdr
;
104 struct ib_sge
*sge
= send_wr
->sg_list
;
105 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
106 "send_flags=%x opcode=%x", idx
, send_wr
->wr_id
,
107 send_wr
->num_sge
, send_wr
->send_flags
,
110 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
111 "mgmt_class=%x class_version=%x method=%x "
112 "status=%x class_specific=%x tid=%lx "
113 "attr_id=%x resv=%x attr_mod=%x",
114 idx
, mad_hdr
->base_version
,
116 mad_hdr
->class_version
, mad_hdr
->method
,
117 mad_hdr
->status
, mad_hdr
->class_specific
,
118 mad_hdr
->tid
, mad_hdr
->attr_id
,
122 for (j
= 0; j
< send_wr
->num_sge
; j
++) {
123 u8
*data
= (u8
*)abs_to_virt(sge
->addr
);
124 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
126 idx
, j
, data
, sge
->length
, sge
->lkey
);
127 /* assume length is n*16 */
128 ehca_dmp(data
, sge
->length
, "send_wr#%x sge#%x",
133 send_wr
= send_wr
->next
;
134 } /* eof while send_wr */
137 #endif /* DEBUG_GSI_SEND_WR */
139 static inline int ehca_write_swqe(struct ehca_qp
*qp
,
140 struct ehca_wqe
*wqe_p
,
141 const struct ib_send_wr
*send_wr
,
146 struct ehca_av
*my_av
;
147 u32 remote_qkey
= send_wr
->wr
.ud
.remote_qkey
;
149 if (unlikely((send_wr
->num_sge
< 0) ||
150 (send_wr
->num_sge
> qp
->ipz_squeue
.act_nr_of_sg
))) {
151 ehca_gen_err("Invalid number of WQE SGE. "
152 "num_sqe=%x max_nr_of_sg=%x",
153 send_wr
->num_sge
, qp
->ipz_squeue
.act_nr_of_sg
);
154 return -EINVAL
; /* invalid SG list length */
157 /* clear wqe header until sglist */
158 memset(wqe_p
, 0, offsetof(struct ehca_wqe
, u
.ud_av
.sg_list
));
160 wqe_p
->work_request_id
= send_wr
->wr_id
;
162 switch (send_wr
->opcode
) {
164 case IB_WR_SEND_WITH_IMM
:
165 wqe_p
->optype
= WQE_OPTYPE_SEND
;
167 case IB_WR_RDMA_WRITE
:
168 case IB_WR_RDMA_WRITE_WITH_IMM
:
169 wqe_p
->optype
= WQE_OPTYPE_RDMAWRITE
;
171 case IB_WR_RDMA_READ
:
172 wqe_p
->optype
= WQE_OPTYPE_RDMAREAD
;
175 ehca_gen_err("Invalid opcode=%x", send_wr
->opcode
);
176 return -EINVAL
; /* invalid opcode */
179 wqe_p
->wqef
= (send_wr
->opcode
) & WQEF_HIGH_NIBBLE
;
183 if ((send_wr
->send_flags
& IB_SEND_SIGNALED
||
184 qp
->init_attr
.sq_sig_type
== IB_SIGNAL_ALL_WR
)
186 wqe_p
->wr_flag
|= WQE_WRFLAG_REQ_SIGNAL_COM
;
188 if (send_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
189 send_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
190 /* this might not work as long as HW does not support it */
191 wqe_p
->immediate_data
= be32_to_cpu(send_wr
->ex
.imm_data
);
192 wqe_p
->wr_flag
|= WQE_WRFLAG_IMM_DATA_PRESENT
;
195 wqe_p
->nr_of_data_seg
= send_wr
->num_sge
;
197 switch (qp
->qp_type
) {
200 /* no break is intential here */
202 /* IB 1.2 spec C10-15 compliance */
203 if (send_wr
->wr
.ud
.remote_qkey
& 0x80000000)
204 remote_qkey
= qp
->qkey
;
206 wqe_p
->destination_qp_number
= send_wr
->wr
.ud
.remote_qpn
<< 8;
207 wqe_p
->local_ee_context_qkey
= remote_qkey
;
208 if (unlikely(!send_wr
->wr
.ud
.ah
)) {
209 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp
);
212 if (unlikely(send_wr
->wr
.ud
.remote_qpn
== 0)) {
213 ehca_gen_err("dest QP# is 0. qp=%x", qp
->real_qp_num
);
216 my_av
= container_of(send_wr
->wr
.ud
.ah
, struct ehca_av
, ib_ah
);
217 wqe_p
->u
.ud_av
.ud_av
= my_av
->av
;
220 * omitted check of IB_SEND_INLINE
221 * since HW does not support it
223 for (idx
= 0; idx
< send_wr
->num_sge
; idx
++) {
224 wqe_p
->u
.ud_av
.sg_list
[idx
].vaddr
=
225 send_wr
->sg_list
[idx
].addr
;
226 wqe_p
->u
.ud_av
.sg_list
[idx
].lkey
=
227 send_wr
->sg_list
[idx
].lkey
;
228 wqe_p
->u
.ud_av
.sg_list
[idx
].length
=
229 send_wr
->sg_list
[idx
].length
;
231 if (qp
->qp_type
== IB_QPT_SMI
||
232 qp
->qp_type
== IB_QPT_GSI
)
233 wqe_p
->u
.ud_av
.ud_av
.pmtu
= 1;
234 if (qp
->qp_type
== IB_QPT_GSI
) {
235 wqe_p
->pkeyi
= send_wr
->wr
.ud
.pkey_index
;
236 #ifdef DEBUG_GSI_SEND_WR
237 trace_send_wr_ud(send_wr
);
238 #endif /* DEBUG_GSI_SEND_WR */
243 if (send_wr
->send_flags
& IB_SEND_FENCE
)
244 wqe_p
->wr_flag
|= WQE_WRFLAG_FENCE
;
245 /* no break is intentional here */
247 /* TODO: atomic not implemented */
248 wqe_p
->u
.nud
.remote_virtual_adress
=
249 send_wr
->wr
.rdma
.remote_addr
;
250 wqe_p
->u
.nud
.rkey
= send_wr
->wr
.rdma
.rkey
;
253 * omitted checking of IB_SEND_INLINE
254 * since HW does not support it
257 for (idx
= 0; idx
< send_wr
->num_sge
; idx
++) {
258 wqe_p
->u
.nud
.sg_list
[idx
].vaddr
=
259 send_wr
->sg_list
[idx
].addr
;
260 wqe_p
->u
.nud
.sg_list
[idx
].lkey
=
261 send_wr
->sg_list
[idx
].lkey
;
262 wqe_p
->u
.nud
.sg_list
[idx
].length
=
263 send_wr
->sg_list
[idx
].length
;
264 dma_length
+= send_wr
->sg_list
[idx
].length
;
266 wqe_p
->u
.nud
.atomic_1st_op_dma_len
= dma_length
;
268 /* unsolicited ack circumvention */
269 if (send_wr
->opcode
== IB_WR_RDMA_READ
) {
270 /* on RDMA read, switch on and reset counters */
271 qp
->message_count
= qp
->packet_count
= 0;
272 qp
->unsol_ack_circ
= 1;
274 /* else estimate #packets */
275 qp
->packet_count
+= (dma_length
>> qp
->mtu_shift
) + 1;
280 ehca_gen_err("Invalid qptype=%x", qp
->qp_type
);
284 if (ehca_debug_level
>= 3) {
285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp
);
286 ehca_dmp( wqe_p
, 16*(6 + wqe_p
->nr_of_data_seg
), "send wqe");
291 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
292 static inline void map_ib_wc_status(u32 cqe_status
,
293 enum ib_wc_status
*wc_status
)
295 if (unlikely(cqe_status
& WC_STATUS_ERROR_BIT
)) {
296 switch (cqe_status
& 0x3F) {
299 *wc_status
= IB_WC_LOC_LEN_ERR
;
303 *wc_status
= IB_WC_LOC_QP_OP_ERR
;
307 *wc_status
= IB_WC_LOC_EEC_OP_ERR
;
311 *wc_status
= IB_WC_LOC_PROT_ERR
;
315 *wc_status
= IB_WC_WR_FLUSH_ERR
;
318 *wc_status
= IB_WC_MW_BIND_ERR
;
320 case 0x07: /* remote error - look into bits 20:24 */
322 & WC_STATUS_REMOTE_ERROR_FLAGS
) >> 11) {
325 * PSN Sequence Error!
326 * couldn't find a matching status!
328 *wc_status
= IB_WC_GENERAL_ERR
;
331 *wc_status
= IB_WC_REM_INV_REQ_ERR
;
334 *wc_status
= IB_WC_REM_ACCESS_ERR
;
337 *wc_status
= IB_WC_REM_OP_ERR
;
340 *wc_status
= IB_WC_REM_INV_RD_REQ_ERR
;
345 *wc_status
= IB_WC_RETRY_EXC_ERR
;
348 *wc_status
= IB_WC_RNR_RETRY_EXC_ERR
;
352 *wc_status
= IB_WC_REM_ABORT_ERR
;
356 *wc_status
= IB_WC_INV_EECN_ERR
;
360 *wc_status
= IB_WC_INV_EEC_STATE_ERR
;
363 *wc_status
= IB_WC_BAD_RESP_ERR
;
367 *wc_status
= IB_WC_WR_FLUSH_ERR
;
370 *wc_status
= IB_WC_FATAL_ERR
;
374 *wc_status
= IB_WC_SUCCESS
;
377 static inline int post_one_send(struct ehca_qp
*my_qp
,
378 struct ib_send_wr
*cur_send_wr
,
379 struct ib_send_wr
**bad_send_wr
,
382 struct ehca_wqe
*wqe_p
;
384 u64 start_offset
= my_qp
->ipz_squeue
.current_q_offset
;
386 /* get pointer next to free WQE */
387 wqe_p
= ipz_qeit_get_inc(&my_qp
->ipz_squeue
);
388 if (unlikely(!wqe_p
)) {
389 /* too many posted work requests: queue overflow */
391 *bad_send_wr
= cur_send_wr
;
392 ehca_err(my_qp
->ib_qp
.device
, "Too many posted WQEs "
393 "qp_num=%x", my_qp
->ib_qp
.qp_num
);
396 /* write a SEND WQE into the QUEUE */
397 ret
= ehca_write_swqe(my_qp
, wqe_p
, cur_send_wr
, hidden
);
399 * if something failed,
400 * reset the free entry pointer to the start value
403 my_qp
->ipz_squeue
.current_q_offset
= start_offset
;
405 *bad_send_wr
= cur_send_wr
;
406 ehca_err(my_qp
->ib_qp
.device
, "Could not write WQE "
407 "qp_num=%x", my_qp
->ib_qp
.qp_num
);
414 int ehca_post_send(struct ib_qp
*qp
,
415 struct ib_send_wr
*send_wr
,
416 struct ib_send_wr
**bad_send_wr
)
418 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
419 struct ib_send_wr
*cur_send_wr
;
424 /* Reject WR if QP is in RESET, INIT or RTR state */
425 if (unlikely(my_qp
->state
< IB_QPS_RTS
)) {
426 ehca_err(qp
->device
, "Invalid QP state qp_state=%d qpn=%x",
427 my_qp
->state
, qp
->qp_num
);
432 spin_lock_irqsave(&my_qp
->spinlock_s
, flags
);
434 /* Send an empty extra RDMA read if:
435 * 1) there has been an RDMA read on this connection before
436 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
437 * 3) we can be sure that any previous extra RDMA read has been
438 * processed so we don't overflow the SQ
440 if (unlikely(my_qp
->unsol_ack_circ
&&
441 my_qp
->packet_count
> ACK_CIRC_THRESHOLD
&&
442 my_qp
->message_count
> my_qp
->init_attr
.cap
.max_send_wr
)) {
443 /* insert an empty RDMA READ to fix up the remote QP state */
444 struct ib_send_wr circ_wr
;
445 memset(&circ_wr
, 0, sizeof(circ_wr
));
446 circ_wr
.opcode
= IB_WR_RDMA_READ
;
447 post_one_send(my_qp
, &circ_wr
, NULL
, 1); /* ignore retcode */
449 ehca_dbg(qp
->device
, "posted circ wr qp_num=%x", qp
->qp_num
);
450 my_qp
->message_count
= my_qp
->packet_count
= 0;
453 /* loop processes list of send reqs */
454 for (cur_send_wr
= send_wr
; cur_send_wr
!= NULL
;
455 cur_send_wr
= cur_send_wr
->next
) {
456 ret
= post_one_send(my_qp
, cur_send_wr
, bad_send_wr
, 0);
458 /* if one or more WQEs were successful, don't fail */
461 goto post_send_exit0
;
464 } /* eof for cur_send_wr */
467 iosync(); /* serialize GAL register access */
468 hipz_update_sqa(my_qp
, wqe_cnt
);
469 if (unlikely(ret
|| ehca_debug_level
>= 2))
470 ehca_dbg(qp
->device
, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
471 my_qp
, qp
->qp_num
, wqe_cnt
, ret
);
472 my_qp
->message_count
+= wqe_cnt
;
473 spin_unlock_irqrestore(&my_qp
->spinlock_s
, flags
);
477 static int internal_post_recv(struct ehca_qp
*my_qp
,
478 struct ib_device
*dev
,
479 struct ib_recv_wr
*recv_wr
,
480 struct ib_recv_wr
**bad_recv_wr
)
482 struct ib_recv_wr
*cur_recv_wr
;
483 struct ehca_wqe
*wqe_p
;
488 if (unlikely(!HAS_RQ(my_qp
))) {
489 ehca_err(dev
, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
490 my_qp
, my_qp
->real_qp_num
, my_qp
->ext_type
);
495 spin_lock_irqsave(&my_qp
->spinlock_r
, flags
);
497 /* loop processes list of send reqs */
498 for (cur_recv_wr
= recv_wr
; cur_recv_wr
!= NULL
;
499 cur_recv_wr
= cur_recv_wr
->next
) {
500 u64 start_offset
= my_qp
->ipz_rqueue
.current_q_offset
;
501 /* get pointer next to free WQE */
502 wqe_p
= ipz_qeit_get_inc(&my_qp
->ipz_rqueue
);
503 if (unlikely(!wqe_p
)) {
504 /* too many posted work requests: queue overflow */
506 *bad_recv_wr
= cur_recv_wr
;
509 ehca_err(dev
, "Too many posted WQEs "
510 "qp_num=%x", my_qp
->real_qp_num
);
512 goto post_recv_exit0
;
514 /* write a RECV WQE into the QUEUE */
515 ret
= ehca_write_rwqe(&my_qp
->ipz_rqueue
, wqe_p
, cur_recv_wr
);
517 * if something failed,
518 * reset the free entry pointer to the start value
521 my_qp
->ipz_rqueue
.current_q_offset
= start_offset
;
522 *bad_recv_wr
= cur_recv_wr
;
525 ehca_err(dev
, "Could not write WQE "
526 "qp_num=%x", my_qp
->real_qp_num
);
528 goto post_recv_exit0
;
531 } /* eof for cur_recv_wr */
534 iosync(); /* serialize GAL register access */
535 hipz_update_rqa(my_qp
, wqe_cnt
);
536 if (unlikely(ret
|| ehca_debug_level
>= 2))
537 ehca_dbg(dev
, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
538 my_qp
, my_qp
->real_qp_num
, wqe_cnt
, ret
);
539 spin_unlock_irqrestore(&my_qp
->spinlock_r
, flags
);
543 int ehca_post_recv(struct ib_qp
*qp
,
544 struct ib_recv_wr
*recv_wr
,
545 struct ib_recv_wr
**bad_recv_wr
)
547 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
549 /* Reject WR if QP is in RESET state */
550 if (unlikely(my_qp
->state
== IB_QPS_RESET
)) {
551 ehca_err(qp
->device
, "Invalid QP state qp_state=%d qpn=%x",
552 my_qp
->state
, qp
->qp_num
);
556 return internal_post_recv(my_qp
, qp
->device
, recv_wr
, bad_recv_wr
);
559 int ehca_post_srq_recv(struct ib_srq
*srq
,
560 struct ib_recv_wr
*recv_wr
,
561 struct ib_recv_wr
**bad_recv_wr
)
563 return internal_post_recv(container_of(srq
, struct ehca_qp
, ib_srq
),
564 srq
->device
, recv_wr
, bad_recv_wr
);
568 * ib_wc_opcode table converts ehca wc opcode to ib
569 * Since we use zero to indicate invalid opcode, the actual ib opcode must
572 static const u8 ib_wc_opcode
[255] = {
573 [0x01] = IB_WC_RECV
+1,
574 [0x02] = IB_WC_RECV_RDMA_WITH_IMM
+1,
575 [0x04] = IB_WC_BIND_MW
+1,
576 [0x08] = IB_WC_FETCH_ADD
+1,
577 [0x10] = IB_WC_COMP_SWAP
+1,
578 [0x20] = IB_WC_RDMA_WRITE
+1,
579 [0x40] = IB_WC_RDMA_READ
+1,
580 [0x80] = IB_WC_SEND
+1
583 /* internal function to poll one entry of cq */
584 static inline int ehca_poll_cq_one(struct ib_cq
*cq
, struct ib_wc
*wc
)
587 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
588 struct ehca_cqe
*cqe
;
589 struct ehca_qp
*my_qp
;
590 int cqe_count
= 0, is_error
;
593 cqe
= (struct ehca_cqe
*)
594 ipz_qeit_get_inc_valid(&my_cq
->ipz_queue
);
597 if (ehca_debug_level
>= 3)
598 ehca_dbg(cq
->device
, "Completion queue is empty "
599 "my_cq=%p cq_num=%x", my_cq
, my_cq
->cq_number
);
600 goto poll_cq_one_exit0
;
603 /* prevents loads being reordered across this point */
607 if (unlikely(cqe
->status
& WC_STATUS_PURGE_BIT
)) {
612 qp
= ehca_cq_get_qp(my_cq
, cqe
->local_qp_number
);
614 ehca_err(cq
->device
, "cq_num=%x qp_num=%x "
615 "could not find qp -> ignore cqe",
616 my_cq
->cq_number
, cqe
->local_qp_number
);
617 ehca_dmp(cqe
, 64, "cq_num=%x qp_num=%x",
618 my_cq
->cq_number
, cqe
->local_qp_number
);
619 /* ignore this purged cqe */
622 spin_lock_irqsave(&qp
->spinlock_s
, flags
);
623 purgeflag
= qp
->sqerr_purgeflag
;
624 spin_unlock_irqrestore(&qp
->spinlock_s
, flags
);
628 "Got CQE with purged bit qp_num=%x src_qp=%x",
629 cqe
->local_qp_number
, cqe
->remote_qp_number
);
630 if (ehca_debug_level
>= 2)
631 ehca_dmp(cqe
, 64, "qp_num=%x src_qp=%x",
632 cqe
->local_qp_number
,
633 cqe
->remote_qp_number
);
635 * ignore this to avoid double cqes of bad wqe
636 * that caused sqe and turn off purge flag
638 qp
->sqerr_purgeflag
= 0;
643 is_error
= cqe
->status
& WC_STATUS_ERROR_BIT
;
645 /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
646 if (unlikely(ehca_debug_level
>= 3 || (ehca_debug_level
&& is_error
))) {
648 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
649 is_error
? "ERROR " : "", my_cq
, my_cq
->cq_number
);
650 ehca_dmp(cqe
, 64, "ehca_cq=%p cq_num=%x",
651 my_cq
, my_cq
->cq_number
);
653 "ehca_cq=%p cq_num=%x -------------------------",
654 my_cq
, my_cq
->cq_number
);
657 /* we got a completion! */
658 wc
->wr_id
= cqe
->work_request_id
;
660 /* eval ib_wc_opcode */
661 wc
->opcode
= ib_wc_opcode
[cqe
->optype
]-1;
662 if (unlikely(wc
->opcode
== -1)) {
663 ehca_err(cq
->device
, "Invalid cqe->OPType=%x cqe->status=%x "
664 "ehca_cq=%p cq_num=%x",
665 cqe
->optype
, cqe
->status
, my_cq
, my_cq
->cq_number
);
666 /* dump cqe for other infos */
667 ehca_dmp(cqe
, 64, "ehca_cq=%p cq_num=%x",
668 my_cq
, my_cq
->cq_number
);
669 /* update also queue adder to throw away this entry!!! */
670 goto poll_cq_one_exit0
;
673 /* eval ib_wc_status */
674 if (unlikely(is_error
)) {
675 /* complete with errors */
676 map_ib_wc_status(cqe
->status
, &wc
->status
);
677 wc
->vendor_err
= wc
->status
;
679 wc
->status
= IB_WC_SUCCESS
;
681 read_lock(&ehca_qp_idr_lock
);
682 my_qp
= idr_find(&ehca_qp_idr
, cqe
->qp_token
);
683 wc
->qp
= &my_qp
->ib_qp
;
684 read_unlock(&ehca_qp_idr_lock
);
686 wc
->byte_len
= cqe
->nr_bytes_transferred
;
687 wc
->pkey_index
= cqe
->pkey_index
;
688 wc
->slid
= cqe
->rlid
;
689 wc
->dlid_path_bits
= cqe
->dlid
;
690 wc
->src_qp
= cqe
->remote_qp_number
;
691 wc
->wc_flags
= cqe
->w_completion_flags
;
692 wc
->ex
.imm_data
= cpu_to_be32(cqe
->immediate_data
);
693 wc
->sl
= cqe
->service_level
;
697 hipz_update_feca(my_cq
, cqe_count
);
702 int ehca_poll_cq(struct ib_cq
*cq
, int num_entries
, struct ib_wc
*wc
)
704 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
706 struct ib_wc
*current_wc
= wc
;
710 if (num_entries
< 1) {
711 ehca_err(cq
->device
, "Invalid num_entries=%d ehca_cq=%p "
712 "cq_num=%x", num_entries
, my_cq
, my_cq
->cq_number
);
717 spin_lock_irqsave(&my_cq
->spinlock
, flags
);
718 for (nr
= 0; nr
< num_entries
; nr
++) {
719 ret
= ehca_poll_cq_one(cq
, current_wc
);
724 spin_unlock_irqrestore(&my_cq
->spinlock
, flags
);
725 if (ret
== -EAGAIN
|| !ret
)
732 int ehca_req_notify_cq(struct ib_cq
*cq
, enum ib_cq_notify_flags notify_flags
)
734 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
737 switch (notify_flags
& IB_CQ_SOLICITED_MASK
) {
738 case IB_CQ_SOLICITED
:
739 hipz_set_cqx_n0(my_cq
, 1);
741 case IB_CQ_NEXT_COMP
:
742 hipz_set_cqx_n1(my_cq
, 1);
748 if (notify_flags
& IB_CQ_REPORT_MISSED_EVENTS
) {
749 unsigned long spl_flags
;
750 spin_lock_irqsave(&my_cq
->spinlock
, spl_flags
);
751 ret
= ipz_qeit_is_valid(&my_cq
->ipz_queue
);
752 spin_unlock_irqrestore(&my_cq
->spinlock
, spl_flags
);