4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd_cb.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
43 static void kiblnd_peer_alive(kib_peer_t
*peer
);
44 static void kiblnd_peer_connect_failed(kib_peer_t
*peer
, int active
, int error
);
45 static void kiblnd_check_sends(kib_conn_t
*conn
);
46 static void kiblnd_init_tx_msg(lnet_ni_t
*ni
, kib_tx_t
*tx
,
47 int type
, int body_nob
);
48 static int kiblnd_init_rdma(kib_conn_t
*conn
, kib_tx_t
*tx
, int type
,
49 int resid
, kib_rdma_desc_t
*dstrd
, __u64 dstcookie
);
50 static void kiblnd_queue_tx_locked(kib_tx_t
*tx
, kib_conn_t
*conn
);
51 static void kiblnd_queue_tx(kib_tx_t
*tx
, kib_conn_t
*conn
);
52 static void kiblnd_unmap_tx(lnet_ni_t
*ni
, kib_tx_t
*tx
);
55 kiblnd_tx_done(lnet_ni_t
*ni
, kib_tx_t
*tx
)
57 lnet_msg_t
*lntmsg
[2];
58 kib_net_t
*net
= ni
->ni_data
;
63 LASSERT(!in_interrupt());
64 LASSERT(!tx
->tx_queued
); /* mustn't be queued for sending */
65 LASSERT(!tx
->tx_sending
); /* mustn't be awaiting sent callback */
66 LASSERT(!tx
->tx_waiting
); /* mustn't be awaiting peer response */
69 kiblnd_unmap_tx(ni
, tx
);
71 /* tx may have up to 2 lnet msgs to finalise */
72 lntmsg
[0] = tx
->tx_lntmsg
[0]; tx
->tx_lntmsg
[0] = NULL
;
73 lntmsg
[1] = tx
->tx_lntmsg
[1]; tx
->tx_lntmsg
[1] = NULL
;
77 LASSERT(ni
== tx
->tx_conn
->ibc_peer
->ibp_ni
);
79 kiblnd_conn_decref(tx
->tx_conn
);
86 kiblnd_pool_free_node(&tx
->tx_pool
->tpo_pool
, &tx
->tx_list
);
88 /* delay finalize until my descs have been freed */
89 for (i
= 0; i
< 2; i
++) {
93 lnet_finalize(ni
, lntmsg
[i
], rc
);
98 kiblnd_txlist_done(lnet_ni_t
*ni
, struct list_head
*txlist
, int status
)
102 while (!list_empty(txlist
)) {
103 tx
= list_entry(txlist
->next
, kib_tx_t
, tx_list
);
105 list_del(&tx
->tx_list
);
108 tx
->tx_status
= status
;
109 kiblnd_tx_done(ni
, tx
);
114 kiblnd_get_idle_tx(lnet_ni_t
*ni
, lnet_nid_t target
)
116 kib_net_t
*net
= (kib_net_t
*)ni
->ni_data
;
117 struct list_head
*node
;
119 kib_tx_poolset_t
*tps
;
121 tps
= net
->ibn_tx_ps
[lnet_cpt_of_nid(target
)];
122 node
= kiblnd_pool_alloc_node(&tps
->tps_poolset
);
125 tx
= list_entry(node
, kib_tx_t
, tx_list
);
127 LASSERT(!tx
->tx_nwrq
);
128 LASSERT(!tx
->tx_queued
);
129 LASSERT(!tx
->tx_sending
);
130 LASSERT(!tx
->tx_waiting
);
131 LASSERT(!tx
->tx_status
);
132 LASSERT(!tx
->tx_conn
);
133 LASSERT(!tx
->tx_lntmsg
[0]);
134 LASSERT(!tx
->tx_lntmsg
[1]);
135 LASSERT(!tx
->tx_nfrags
);
141 kiblnd_drop_rx(kib_rx_t
*rx
)
143 kib_conn_t
*conn
= rx
->rx_conn
;
144 struct kib_sched_info
*sched
= conn
->ibc_sched
;
147 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
148 LASSERT(conn
->ibc_nrx
> 0);
150 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
152 kiblnd_conn_decref(conn
);
156 kiblnd_post_rx(kib_rx_t
*rx
, int credit
)
158 kib_conn_t
*conn
= rx
->rx_conn
;
159 kib_net_t
*net
= conn
->ibc_peer
->ibp_ni
->ni_data
;
160 struct ib_recv_wr
*bad_wrq
= NULL
;
161 struct ib_mr
*mr
= conn
->ibc_hdev
->ibh_mrs
;
165 LASSERT(!in_interrupt());
166 LASSERT(credit
== IBLND_POSTRX_NO_CREDIT
||
167 credit
== IBLND_POSTRX_PEER_CREDIT
||
168 credit
== IBLND_POSTRX_RSRVD_CREDIT
);
171 rx
->rx_sge
.lkey
= mr
->lkey
;
172 rx
->rx_sge
.addr
= rx
->rx_msgaddr
;
173 rx
->rx_sge
.length
= IBLND_MSG_SIZE
;
175 rx
->rx_wrq
.next
= NULL
;
176 rx
->rx_wrq
.sg_list
= &rx
->rx_sge
;
177 rx
->rx_wrq
.num_sge
= 1;
178 rx
->rx_wrq
.wr_id
= kiblnd_ptr2wreqid(rx
, IBLND_WID_RX
);
180 LASSERT(conn
->ibc_state
>= IBLND_CONN_INIT
);
181 LASSERT(rx
->rx_nob
>= 0); /* not posted */
183 if (conn
->ibc_state
> IBLND_CONN_ESTABLISHED
) {
184 kiblnd_drop_rx(rx
); /* No more posts for this rx */
188 rx
->rx_nob
= -1; /* flag posted */
190 /* NB: need an extra reference after ib_post_recv because we don't
191 * own this rx (and rx::rx_conn) anymore, LU-5678.
193 kiblnd_conn_addref(conn
);
194 rc
= ib_post_recv(conn
->ibc_cmid
->qp
, &rx
->rx_wrq
, &bad_wrq
);
196 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
197 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), rc
, bad_wrq
);
201 if (conn
->ibc_state
< IBLND_CONN_ESTABLISHED
) /* Initial post */
205 kiblnd_close_conn(conn
, rc
);
206 kiblnd_drop_rx(rx
); /* No more posts for this rx */
210 if (credit
== IBLND_POSTRX_NO_CREDIT
)
213 spin_lock(&conn
->ibc_lock
);
214 if (credit
== IBLND_POSTRX_PEER_CREDIT
)
215 conn
->ibc_outstanding_credits
++;
217 conn
->ibc_reserved_credits
++;
218 spin_unlock(&conn
->ibc_lock
);
220 kiblnd_check_sends(conn
);
222 kiblnd_conn_decref(conn
);
227 kiblnd_find_waiting_tx_locked(kib_conn_t
*conn
, int txtype
, __u64 cookie
)
229 struct list_head
*tmp
;
231 list_for_each(tmp
, &conn
->ibc_active_txs
) {
232 kib_tx_t
*tx
= list_entry(tmp
, kib_tx_t
, tx_list
);
234 LASSERT(!tx
->tx_queued
);
235 LASSERT(tx
->tx_sending
|| tx
->tx_waiting
);
237 if (tx
->tx_cookie
!= cookie
)
240 if (tx
->tx_waiting
&&
241 tx
->tx_msg
->ibm_type
== txtype
)
244 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
245 tx
->tx_waiting
? "" : "NOT ",
246 tx
->tx_msg
->ibm_type
, txtype
);
252 kiblnd_handle_completion(kib_conn_t
*conn
, int txtype
, int status
, __u64 cookie
)
255 lnet_ni_t
*ni
= conn
->ibc_peer
->ibp_ni
;
258 spin_lock(&conn
->ibc_lock
);
260 tx
= kiblnd_find_waiting_tx_locked(conn
, txtype
, cookie
);
262 spin_unlock(&conn
->ibc_lock
);
264 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
265 txtype
, cookie
, libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
266 kiblnd_close_conn(conn
, -EPROTO
);
270 if (!tx
->tx_status
) { /* success so far */
271 if (status
< 0) /* failed? */
272 tx
->tx_status
= status
;
273 else if (txtype
== IBLND_MSG_GET_REQ
)
274 lnet_set_reply_msg_len(ni
, tx
->tx_lntmsg
[1], status
);
279 idle
= !tx
->tx_queued
&& !tx
->tx_sending
;
281 list_del(&tx
->tx_list
);
283 spin_unlock(&conn
->ibc_lock
);
286 kiblnd_tx_done(ni
, tx
);
290 kiblnd_send_completion(kib_conn_t
*conn
, int type
, int status
, __u64 cookie
)
292 lnet_ni_t
*ni
= conn
->ibc_peer
->ibp_ni
;
293 kib_tx_t
*tx
= kiblnd_get_idle_tx(ni
, conn
->ibc_peer
->ibp_nid
);
296 CERROR("Can't get tx for completion %x for %s\n",
297 type
, libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
301 tx
->tx_msg
->ibm_u
.completion
.ibcm_status
= status
;
302 tx
->tx_msg
->ibm_u
.completion
.ibcm_cookie
= cookie
;
303 kiblnd_init_tx_msg(ni
, tx
, type
, sizeof(kib_completion_msg_t
));
305 kiblnd_queue_tx(tx
, conn
);
309 kiblnd_handle_rx(kib_rx_t
*rx
)
311 kib_msg_t
*msg
= rx
->rx_msg
;
312 kib_conn_t
*conn
= rx
->rx_conn
;
313 lnet_ni_t
*ni
= conn
->ibc_peer
->ibp_ni
;
314 int credits
= msg
->ibm_credits
;
320 LASSERT(conn
->ibc_state
>= IBLND_CONN_ESTABLISHED
);
322 CDEBUG(D_NET
, "Received %x[%d] from %s\n",
323 msg
->ibm_type
, credits
,
324 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
327 /* Have I received credits that will let me send? */
328 spin_lock(&conn
->ibc_lock
);
330 if (conn
->ibc_credits
+ credits
>
331 conn
->ibc_queue_depth
) {
332 rc2
= conn
->ibc_credits
;
333 spin_unlock(&conn
->ibc_lock
);
335 CERROR("Bad credits from %s: %d + %d > %d\n",
336 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
),
337 rc2
, credits
, conn
->ibc_queue_depth
);
339 kiblnd_close_conn(conn
, -EPROTO
);
340 kiblnd_post_rx(rx
, IBLND_POSTRX_NO_CREDIT
);
344 conn
->ibc_credits
+= credits
;
346 /* This ensures the credit taken by NOOP can be returned */
347 if (msg
->ibm_type
== IBLND_MSG_NOOP
&&
348 !IBLND_OOB_CAPABLE(conn
->ibc_version
)) /* v1 only */
349 conn
->ibc_outstanding_credits
++;
351 spin_unlock(&conn
->ibc_lock
);
352 kiblnd_check_sends(conn
);
355 switch (msg
->ibm_type
) {
357 CERROR("Bad IBLND message type %x from %s\n",
358 msg
->ibm_type
, libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
359 post_credit
= IBLND_POSTRX_NO_CREDIT
;
364 if (IBLND_OOB_CAPABLE(conn
->ibc_version
)) {
365 post_credit
= IBLND_POSTRX_NO_CREDIT
;
369 if (credits
) /* credit already posted */
370 post_credit
= IBLND_POSTRX_NO_CREDIT
;
371 else /* a keepalive NOOP */
372 post_credit
= IBLND_POSTRX_PEER_CREDIT
;
375 case IBLND_MSG_IMMEDIATE
:
376 post_credit
= IBLND_POSTRX_DONT_POST
;
377 rc
= lnet_parse(ni
, &msg
->ibm_u
.immediate
.ibim_hdr
,
378 msg
->ibm_srcnid
, rx
, 0);
379 if (rc
< 0) /* repost on error */
380 post_credit
= IBLND_POSTRX_PEER_CREDIT
;
383 case IBLND_MSG_PUT_REQ
:
384 post_credit
= IBLND_POSTRX_DONT_POST
;
385 rc
= lnet_parse(ni
, &msg
->ibm_u
.putreq
.ibprm_hdr
,
386 msg
->ibm_srcnid
, rx
, 1);
387 if (rc
< 0) /* repost on error */
388 post_credit
= IBLND_POSTRX_PEER_CREDIT
;
391 case IBLND_MSG_PUT_NAK
:
392 CWARN("PUT_NACK from %s\n",
393 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
394 post_credit
= IBLND_POSTRX_RSRVD_CREDIT
;
395 kiblnd_handle_completion(conn
, IBLND_MSG_PUT_REQ
,
396 msg
->ibm_u
.completion
.ibcm_status
,
397 msg
->ibm_u
.completion
.ibcm_cookie
);
400 case IBLND_MSG_PUT_ACK
:
401 post_credit
= IBLND_POSTRX_RSRVD_CREDIT
;
403 spin_lock(&conn
->ibc_lock
);
404 tx
= kiblnd_find_waiting_tx_locked(conn
, IBLND_MSG_PUT_REQ
,
405 msg
->ibm_u
.putack
.ibpam_src_cookie
);
407 list_del(&tx
->tx_list
);
408 spin_unlock(&conn
->ibc_lock
);
411 CERROR("Unmatched PUT_ACK from %s\n",
412 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
417 LASSERT(tx
->tx_waiting
);
419 * CAVEAT EMPTOR: I could be racing with tx_complete, but...
420 * (a) I can overwrite tx_msg since my peer has received it!
421 * (b) tx_waiting set tells tx_complete() it's not done.
423 tx
->tx_nwrq
= 0; /* overwrite PUT_REQ */
425 rc2
= kiblnd_init_rdma(conn
, tx
, IBLND_MSG_PUT_DONE
,
426 kiblnd_rd_size(&msg
->ibm_u
.putack
.ibpam_rd
),
427 &msg
->ibm_u
.putack
.ibpam_rd
,
428 msg
->ibm_u
.putack
.ibpam_dst_cookie
);
430 CERROR("Can't setup rdma for PUT to %s: %d\n",
431 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), rc2
);
433 spin_lock(&conn
->ibc_lock
);
434 tx
->tx_waiting
= 0; /* clear waiting and queue atomically */
435 kiblnd_queue_tx_locked(tx
, conn
);
436 spin_unlock(&conn
->ibc_lock
);
439 case IBLND_MSG_PUT_DONE
:
440 post_credit
= IBLND_POSTRX_PEER_CREDIT
;
441 kiblnd_handle_completion(conn
, IBLND_MSG_PUT_ACK
,
442 msg
->ibm_u
.completion
.ibcm_status
,
443 msg
->ibm_u
.completion
.ibcm_cookie
);
446 case IBLND_MSG_GET_REQ
:
447 post_credit
= IBLND_POSTRX_DONT_POST
;
448 rc
= lnet_parse(ni
, &msg
->ibm_u
.get
.ibgm_hdr
,
449 msg
->ibm_srcnid
, rx
, 1);
450 if (rc
< 0) /* repost on error */
451 post_credit
= IBLND_POSTRX_PEER_CREDIT
;
454 case IBLND_MSG_GET_DONE
:
455 post_credit
= IBLND_POSTRX_RSRVD_CREDIT
;
456 kiblnd_handle_completion(conn
, IBLND_MSG_GET_REQ
,
457 msg
->ibm_u
.completion
.ibcm_status
,
458 msg
->ibm_u
.completion
.ibcm_cookie
);
462 if (rc
< 0) /* protocol error */
463 kiblnd_close_conn(conn
, rc
);
465 if (post_credit
!= IBLND_POSTRX_DONT_POST
)
466 kiblnd_post_rx(rx
, post_credit
);
470 kiblnd_rx_complete(kib_rx_t
*rx
, int status
, int nob
)
472 kib_msg_t
*msg
= rx
->rx_msg
;
473 kib_conn_t
*conn
= rx
->rx_conn
;
474 lnet_ni_t
*ni
= conn
->ibc_peer
->ibp_ni
;
475 kib_net_t
*net
= ni
->ni_data
;
480 LASSERT(rx
->rx_nob
< 0); /* was posted */
481 rx
->rx_nob
= 0; /* isn't now */
483 if (conn
->ibc_state
> IBLND_CONN_ESTABLISHED
)
486 if (status
!= IB_WC_SUCCESS
) {
487 CNETERR("Rx from %s failed: %d\n",
488 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), status
);
495 rc
= kiblnd_unpack_msg(msg
, rx
->rx_nob
);
497 CERROR("Error %d unpacking rx from %s\n",
498 rc
, libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
502 if (msg
->ibm_srcnid
!= conn
->ibc_peer
->ibp_nid
||
503 msg
->ibm_dstnid
!= ni
->ni_nid
||
504 msg
->ibm_srcstamp
!= conn
->ibc_incarnation
||
505 msg
->ibm_dststamp
!= net
->ibn_incarnation
) {
506 CERROR("Stale rx from %s\n",
507 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
512 /* set time last known alive */
513 kiblnd_peer_alive(conn
->ibc_peer
);
515 /* racing with connection establishment/teardown! */
517 if (conn
->ibc_state
< IBLND_CONN_ESTABLISHED
) {
518 rwlock_t
*g_lock
= &kiblnd_data
.kib_global_lock
;
521 write_lock_irqsave(g_lock
, flags
);
522 /* must check holding global lock to eliminate race */
523 if (conn
->ibc_state
< IBLND_CONN_ESTABLISHED
) {
524 list_add_tail(&rx
->rx_list
, &conn
->ibc_early_rxs
);
525 write_unlock_irqrestore(g_lock
, flags
);
528 write_unlock_irqrestore(g_lock
, flags
);
530 kiblnd_handle_rx(rx
);
534 CDEBUG(D_NET
, "rx %p conn %p\n", rx
, conn
);
535 kiblnd_close_conn(conn
, err
);
537 kiblnd_drop_rx(rx
); /* Don't re-post rx. */
541 kiblnd_kvaddr_to_page(unsigned long vaddr
)
545 if (is_vmalloc_addr((void *)vaddr
)) {
546 page
= vmalloc_to_page((void *)vaddr
);
550 #ifdef CONFIG_HIGHMEM
551 if (vaddr
>= PKMAP_BASE
&&
552 vaddr
< (PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
)) {
553 /* No highmem pages only used for bulk (kiov) I/O */
554 CERROR("find page for address in highmem\n");
558 page
= virt_to_page(vaddr
);
564 kiblnd_fmr_map_tx(kib_net_t
*net
, kib_tx_t
*tx
, kib_rdma_desc_t
*rd
, int nob
)
567 __u64
*pages
= tx
->tx_pages
;
568 kib_fmr_poolset_t
*fps
;
575 LASSERT(tx
->tx_pool
);
576 LASSERT(tx
->tx_pool
->tpo_pool
.po_owner
);
578 hdev
= tx
->tx_pool
->tpo_hdev
;
580 for (i
= 0, npages
= 0; i
< rd
->rd_nfrags
; i
++) {
581 for (size
= 0; size
< rd
->rd_frags
[i
].rf_nob
;
582 size
+= hdev
->ibh_page_size
) {
583 pages
[npages
++] = (rd
->rd_frags
[i
].rf_addr
&
584 hdev
->ibh_page_mask
) + size
;
588 cpt
= tx
->tx_pool
->tpo_pool
.po_owner
->ps_cpt
;
590 fps
= net
->ibn_fmr_ps
[cpt
];
591 rc
= kiblnd_fmr_pool_map(fps
, pages
, npages
, 0, &tx
->fmr
);
593 CERROR("Can't map %d pages: %d\n", npages
, rc
);
598 * If rd is not tx_rd, it's going to get sent to a peer, who will need
601 rd
->rd_key
= (rd
!= tx
->tx_rd
) ? tx
->fmr
.fmr_pfmr
->fmr
->rkey
:
602 tx
->fmr
.fmr_pfmr
->fmr
->lkey
;
603 rd
->rd_frags
[0].rf_addr
&= ~hdev
->ibh_page_mask
;
604 rd
->rd_frags
[0].rf_nob
= nob
;
610 static void kiblnd_unmap_tx(lnet_ni_t
*ni
, kib_tx_t
*tx
)
612 kib_net_t
*net
= ni
->ni_data
;
616 if (net
->ibn_fmr_ps
&& tx
->fmr
.fmr_pfmr
) {
617 kiblnd_fmr_pool_unmap(&tx
->fmr
, tx
->tx_status
);
618 tx
->fmr
.fmr_pfmr
= NULL
;
622 kiblnd_dma_unmap_sg(tx
->tx_pool
->tpo_hdev
->ibh_ibdev
,
623 tx
->tx_frags
, tx
->tx_nfrags
, tx
->tx_dmadir
);
628 static int kiblnd_map_tx(lnet_ni_t
*ni
, kib_tx_t
*tx
, kib_rdma_desc_t
*rd
,
631 kib_hca_dev_t
*hdev
= tx
->tx_pool
->tpo_hdev
;
632 kib_net_t
*net
= ni
->ni_data
;
633 struct ib_mr
*mr
= NULL
;
638 * If rd is not tx_rd, it's going to get sent to a peer and I'm the
641 tx
->tx_dmadir
= (rd
!= tx
->tx_rd
) ? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
642 tx
->tx_nfrags
= nfrags
;
644 rd
->rd_nfrags
= kiblnd_dma_map_sg(hdev
->ibh_ibdev
, tx
->tx_frags
,
645 tx
->tx_nfrags
, tx
->tx_dmadir
);
647 for (i
= 0, nob
= 0; i
< rd
->rd_nfrags
; i
++) {
648 rd
->rd_frags
[i
].rf_nob
= kiblnd_sg_dma_len(
649 hdev
->ibh_ibdev
, &tx
->tx_frags
[i
]);
650 rd
->rd_frags
[i
].rf_addr
= kiblnd_sg_dma_address(
651 hdev
->ibh_ibdev
, &tx
->tx_frags
[i
]);
652 nob
+= rd
->rd_frags
[i
].rf_nob
;
655 mr
= kiblnd_find_rd_dma_mr(hdev
, rd
, tx
->tx_conn
?
656 tx
->tx_conn
->ibc_max_frags
: -1);
658 /* found pre-mapping MR */
659 rd
->rd_key
= (rd
!= tx
->tx_rd
) ? mr
->rkey
: mr
->lkey
;
664 return kiblnd_fmr_map_tx(net
, tx
, rd
, nob
);
670 kiblnd_setup_rd_iov(lnet_ni_t
*ni
, kib_tx_t
*tx
, kib_rdma_desc_t
*rd
,
671 unsigned int niov
, struct kvec
*iov
, int offset
, int nob
)
673 kib_net_t
*net
= ni
->ni_data
;
675 struct scatterlist
*sg
;
684 while (offset
>= iov
->iov_len
) {
685 offset
-= iov
->iov_len
;
695 vaddr
= ((unsigned long)iov
->iov_base
) + offset
;
696 page_offset
= vaddr
& (PAGE_SIZE
- 1);
697 page
= kiblnd_kvaddr_to_page(vaddr
);
699 CERROR("Can't find page\n");
703 fragnob
= min((int)(iov
->iov_len
- offset
), nob
);
704 fragnob
= min(fragnob
, (int)PAGE_SIZE
- page_offset
);
706 sg_set_page(sg
, page
, fragnob
, page_offset
);
709 if (offset
+ fragnob
< iov
->iov_len
) {
719 return kiblnd_map_tx(ni
, tx
, rd
, sg
- tx
->tx_frags
);
723 kiblnd_setup_rd_kiov(lnet_ni_t
*ni
, kib_tx_t
*tx
, kib_rdma_desc_t
*rd
,
724 int nkiov
, lnet_kiov_t
*kiov
, int offset
, int nob
)
726 kib_net_t
*net
= ni
->ni_data
;
727 struct scatterlist
*sg
;
730 CDEBUG(D_NET
, "niov %d offset %d nob %d\n", nkiov
, offset
, nob
);
736 while (offset
>= kiov
->kiov_len
) {
737 offset
-= kiov
->kiov_len
;
747 fragnob
= min((int)(kiov
->kiov_len
- offset
), nob
);
749 sg_set_page(sg
, kiov
->kiov_page
, fragnob
,
750 kiov
->kiov_offset
+ offset
);
759 return kiblnd_map_tx(ni
, tx
, rd
, sg
- tx
->tx_frags
);
763 kiblnd_post_tx_locked(kib_conn_t
*conn
, kib_tx_t
*tx
, int credit
)
764 __must_hold(&conn
->ibc_lock
)
766 kib_msg_t
*msg
= tx
->tx_msg
;
767 kib_peer_t
*peer
= conn
->ibc_peer
;
768 int ver
= conn
->ibc_version
;
772 LASSERT(tx
->tx_queued
);
773 /* We rely on this for QP sizing */
774 LASSERT(tx
->tx_nwrq
> 0);
775 LASSERT(tx
->tx_nwrq
<= 1 + conn
->ibc_max_frags
);
777 LASSERT(!credit
|| credit
== 1);
778 LASSERT(conn
->ibc_outstanding_credits
>= 0);
779 LASSERT(conn
->ibc_outstanding_credits
<= conn
->ibc_queue_depth
);
780 LASSERT(conn
->ibc_credits
>= 0);
781 LASSERT(conn
->ibc_credits
<= conn
->ibc_queue_depth
);
783 if (conn
->ibc_nsends_posted
== IBLND_CONCURRENT_SENDS(ver
)) {
784 /* tx completions outstanding... */
785 CDEBUG(D_NET
, "%s: posted enough\n",
786 libcfs_nid2str(peer
->ibp_nid
));
790 if (credit
&& !conn
->ibc_credits
) { /* no credits */
791 CDEBUG(D_NET
, "%s: no credits\n",
792 libcfs_nid2str(peer
->ibp_nid
));
796 if (credit
&& !IBLND_OOB_CAPABLE(ver
) &&
797 conn
->ibc_credits
== 1 && /* last credit reserved */
798 msg
->ibm_type
!= IBLND_MSG_NOOP
) { /* for NOOP */
799 CDEBUG(D_NET
, "%s: not using last credit\n",
800 libcfs_nid2str(peer
->ibp_nid
));
804 /* NB don't drop ibc_lock before bumping tx_sending */
805 list_del(&tx
->tx_list
);
808 if (msg
->ibm_type
== IBLND_MSG_NOOP
&&
809 (!kiblnd_need_noop(conn
) || /* redundant NOOP */
810 (IBLND_OOB_CAPABLE(ver
) && /* posted enough NOOP */
811 conn
->ibc_noops_posted
== IBLND_OOB_MSGS(ver
)))) {
813 * OK to drop when posted enough NOOPs, since
814 * kiblnd_check_sends will queue NOOP again when
815 * posted NOOPs complete
817 spin_unlock(&conn
->ibc_lock
);
818 kiblnd_tx_done(peer
->ibp_ni
, tx
);
819 spin_lock(&conn
->ibc_lock
);
820 CDEBUG(D_NET
, "%s(%d): redundant or enough NOOP\n",
821 libcfs_nid2str(peer
->ibp_nid
),
822 conn
->ibc_noops_posted
);
826 kiblnd_pack_msg(peer
->ibp_ni
, msg
, ver
, conn
->ibc_outstanding_credits
,
827 peer
->ibp_nid
, conn
->ibc_incarnation
);
829 conn
->ibc_credits
-= credit
;
830 conn
->ibc_outstanding_credits
= 0;
831 conn
->ibc_nsends_posted
++;
832 if (msg
->ibm_type
== IBLND_MSG_NOOP
)
833 conn
->ibc_noops_posted
++;
836 * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
837 * PUT. If so, it was first queued here as a PUT_REQ, sent and
838 * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
839 * and then re-queued here. It's (just) possible that
840 * tx_sending is non-zero if we've not done the tx_complete()
841 * from the first send; hence the ++ rather than = below.
844 list_add(&tx
->tx_list
, &conn
->ibc_active_txs
);
846 /* I'm still holding ibc_lock! */
847 if (conn
->ibc_state
!= IBLND_CONN_ESTABLISHED
) {
849 } else if (tx
->tx_pool
->tpo_pool
.po_failed
||
850 conn
->ibc_hdev
!= tx
->tx_pool
->tpo_hdev
) {
851 /* close_conn will launch failover */
854 struct ib_send_wr
*wrq
= &tx
->tx_wrq
[tx
->tx_nwrq
- 1].wr
;
856 LASSERTF(wrq
->wr_id
== kiblnd_ptr2wreqid(tx
, IBLND_WID_TX
),
857 "bad wr_id %llx, opc %d, flags %d, peer: %s\n",
858 wrq
->wr_id
, wrq
->opcode
, wrq
->send_flags
,
859 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
861 rc
= ib_post_send(conn
->ibc_cmid
->qp
, &tx
->tx_wrq
->wr
, &wrq
);
864 conn
->ibc_last_send
= jiffies
;
870 * NB credits are transferred in the actual
871 * message, which can only be the last work item
873 conn
->ibc_credits
+= credit
;
874 conn
->ibc_outstanding_credits
+= msg
->ibm_credits
;
875 conn
->ibc_nsends_posted
--;
876 if (msg
->ibm_type
== IBLND_MSG_NOOP
)
877 conn
->ibc_noops_posted
--;
883 done
= !tx
->tx_sending
;
885 list_del(&tx
->tx_list
);
887 spin_unlock(&conn
->ibc_lock
);
889 if (conn
->ibc_state
== IBLND_CONN_ESTABLISHED
)
890 CERROR("Error %d posting transmit to %s\n",
891 rc
, libcfs_nid2str(peer
->ibp_nid
));
893 CDEBUG(D_NET
, "Error %d posting transmit to %s\n",
894 rc
, libcfs_nid2str(peer
->ibp_nid
));
896 kiblnd_close_conn(conn
, rc
);
899 kiblnd_tx_done(peer
->ibp_ni
, tx
);
901 spin_lock(&conn
->ibc_lock
);
907 kiblnd_check_sends(kib_conn_t
*conn
)
909 int ver
= conn
->ibc_version
;
910 lnet_ni_t
*ni
= conn
->ibc_peer
->ibp_ni
;
913 /* Don't send anything until after the connection is established */
914 if (conn
->ibc_state
< IBLND_CONN_ESTABLISHED
) {
915 CDEBUG(D_NET
, "%s too soon\n",
916 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
920 spin_lock(&conn
->ibc_lock
);
922 LASSERT(conn
->ibc_nsends_posted
<= IBLND_CONCURRENT_SENDS(ver
));
923 LASSERT(!IBLND_OOB_CAPABLE(ver
) ||
924 conn
->ibc_noops_posted
<= IBLND_OOB_MSGS(ver
));
925 LASSERT(conn
->ibc_reserved_credits
>= 0);
927 while (conn
->ibc_reserved_credits
> 0 &&
928 !list_empty(&conn
->ibc_tx_queue_rsrvd
)) {
929 tx
= list_entry(conn
->ibc_tx_queue_rsrvd
.next
,
931 list_del(&tx
->tx_list
);
932 list_add_tail(&tx
->tx_list
, &conn
->ibc_tx_queue
);
933 conn
->ibc_reserved_credits
--;
936 if (kiblnd_need_noop(conn
)) {
937 spin_unlock(&conn
->ibc_lock
);
939 tx
= kiblnd_get_idle_tx(ni
, conn
->ibc_peer
->ibp_nid
);
941 kiblnd_init_tx_msg(ni
, tx
, IBLND_MSG_NOOP
, 0);
943 spin_lock(&conn
->ibc_lock
);
945 kiblnd_queue_tx_locked(tx
, conn
);
951 if (!list_empty(&conn
->ibc_tx_queue_nocred
)) {
953 tx
= list_entry(conn
->ibc_tx_queue_nocred
.next
,
955 } else if (!list_empty(&conn
->ibc_tx_noops
)) {
956 LASSERT(!IBLND_OOB_CAPABLE(ver
));
958 tx
= list_entry(conn
->ibc_tx_noops
.next
,
960 } else if (!list_empty(&conn
->ibc_tx_queue
)) {
962 tx
= list_entry(conn
->ibc_tx_queue
.next
,
968 if (kiblnd_post_tx_locked(conn
, tx
, credit
))
972 spin_unlock(&conn
->ibc_lock
);
976 kiblnd_tx_complete(kib_tx_t
*tx
, int status
)
978 int failed
= (status
!= IB_WC_SUCCESS
);
979 kib_conn_t
*conn
= tx
->tx_conn
;
982 LASSERT(tx
->tx_sending
> 0);
985 if (conn
->ibc_state
== IBLND_CONN_ESTABLISHED
)
986 CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
987 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
),
988 tx
->tx_cookie
, tx
->tx_sending
, tx
->tx_waiting
,
991 kiblnd_close_conn(conn
, -EIO
);
993 kiblnd_peer_alive(conn
->ibc_peer
);
996 spin_lock(&conn
->ibc_lock
);
999 * I could be racing with rdma completion. Whoever makes 'tx' idle
1000 * gets to free it, which also drops its ref on 'conn'.
1003 conn
->ibc_nsends_posted
--;
1004 if (tx
->tx_msg
->ibm_type
== IBLND_MSG_NOOP
)
1005 conn
->ibc_noops_posted
--;
1008 tx
->tx_waiting
= 0; /* don't wait for peer */
1009 tx
->tx_status
= -EIO
;
1012 idle
= !tx
->tx_sending
&& /* This is the final callback */
1013 !tx
->tx_waiting
&& /* Not waiting for peer */
1014 !tx
->tx_queued
; /* Not re-queued (PUT_DONE) */
1016 list_del(&tx
->tx_list
);
1018 kiblnd_conn_addref(conn
); /* 1 ref for me.... */
1020 spin_unlock(&conn
->ibc_lock
);
1023 kiblnd_tx_done(conn
->ibc_peer
->ibp_ni
, tx
);
1025 kiblnd_check_sends(conn
);
1027 kiblnd_conn_decref(conn
); /* ...until here */
1031 kiblnd_init_tx_msg(lnet_ni_t
*ni
, kib_tx_t
*tx
, int type
, int body_nob
)
1033 kib_hca_dev_t
*hdev
= tx
->tx_pool
->tpo_hdev
;
1034 struct ib_sge
*sge
= &tx
->tx_sge
[tx
->tx_nwrq
];
1035 struct ib_rdma_wr
*wrq
= &tx
->tx_wrq
[tx
->tx_nwrq
];
1036 int nob
= offsetof(kib_msg_t
, ibm_u
) + body_nob
;
1037 struct ib_mr
*mr
= hdev
->ibh_mrs
;
1039 LASSERT(tx
->tx_nwrq
>= 0);
1040 LASSERT(tx
->tx_nwrq
< IBLND_MAX_RDMA_FRAGS
+ 1);
1041 LASSERT(nob
<= IBLND_MSG_SIZE
);
1044 kiblnd_init_msg(tx
->tx_msg
, type
, body_nob
);
1046 sge
->lkey
= mr
->lkey
;
1047 sge
->addr
= tx
->tx_msgaddr
;
1050 memset(wrq
, 0, sizeof(*wrq
));
1052 wrq
->wr
.next
= NULL
;
1053 wrq
->wr
.wr_id
= kiblnd_ptr2wreqid(tx
, IBLND_WID_TX
);
1054 wrq
->wr
.sg_list
= sge
;
1055 wrq
->wr
.num_sge
= 1;
1056 wrq
->wr
.opcode
= IB_WR_SEND
;
1057 wrq
->wr
.send_flags
= IB_SEND_SIGNALED
;
1063 kiblnd_init_rdma(kib_conn_t
*conn
, kib_tx_t
*tx
, int type
,
1064 int resid
, kib_rdma_desc_t
*dstrd
, __u64 dstcookie
)
1066 kib_msg_t
*ibmsg
= tx
->tx_msg
;
1067 kib_rdma_desc_t
*srcrd
= tx
->tx_rd
;
1068 struct ib_sge
*sge
= &tx
->tx_sge
[0];
1069 struct ib_rdma_wr
*wrq
= &tx
->tx_wrq
[0], *next
;
1075 LASSERT(!in_interrupt());
1076 LASSERT(!tx
->tx_nwrq
);
1077 LASSERT(type
== IBLND_MSG_GET_DONE
||
1078 type
== IBLND_MSG_PUT_DONE
);
1081 if (srcidx
>= srcrd
->rd_nfrags
) {
1082 CERROR("Src buffer exhausted: %d frags\n", srcidx
);
1087 if (dstidx
== dstrd
->rd_nfrags
) {
1088 CERROR("Dst buffer exhausted: %d frags\n", dstidx
);
1093 if (tx
->tx_nwrq
>= conn
->ibc_max_frags
) {
1094 CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1095 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
),
1096 conn
->ibc_max_frags
,
1097 srcidx
, srcrd
->rd_nfrags
,
1098 dstidx
, dstrd
->rd_nfrags
);
1103 wrknob
= min(min(kiblnd_rd_frag_size(srcrd
, srcidx
),
1104 kiblnd_rd_frag_size(dstrd
, dstidx
)),
1107 sge
= &tx
->tx_sge
[tx
->tx_nwrq
];
1108 sge
->addr
= kiblnd_rd_frag_addr(srcrd
, srcidx
);
1109 sge
->lkey
= kiblnd_rd_frag_key(srcrd
, srcidx
);
1110 sge
->length
= wrknob
;
1112 wrq
= &tx
->tx_wrq
[tx
->tx_nwrq
];
1115 wrq
->wr
.next
= &next
->wr
;
1116 wrq
->wr
.wr_id
= kiblnd_ptr2wreqid(tx
, IBLND_WID_RDMA
);
1117 wrq
->wr
.sg_list
= sge
;
1118 wrq
->wr
.num_sge
= 1;
1119 wrq
->wr
.opcode
= IB_WR_RDMA_WRITE
;
1120 wrq
->wr
.send_flags
= 0;
1122 wrq
->remote_addr
= kiblnd_rd_frag_addr(dstrd
, dstidx
);
1123 wrq
->rkey
= kiblnd_rd_frag_key(dstrd
, dstidx
);
1125 srcidx
= kiblnd_rd_consume_frag(srcrd
, srcidx
, wrknob
);
1126 dstidx
= kiblnd_rd_consume_frag(dstrd
, dstidx
, wrknob
);
1135 if (rc
< 0) /* no RDMA if completing with failure */
1138 ibmsg
->ibm_u
.completion
.ibcm_status
= rc
;
1139 ibmsg
->ibm_u
.completion
.ibcm_cookie
= dstcookie
;
1140 kiblnd_init_tx_msg(conn
->ibc_peer
->ibp_ni
, tx
,
1141 type
, sizeof(kib_completion_msg_t
));
1147 kiblnd_queue_tx_locked(kib_tx_t
*tx
, kib_conn_t
*conn
)
1149 struct list_head
*q
;
1151 LASSERT(tx
->tx_nwrq
> 0); /* work items set up */
1152 LASSERT(!tx
->tx_queued
); /* not queued for sending already */
1153 LASSERT(conn
->ibc_state
>= IBLND_CONN_ESTABLISHED
);
1156 tx
->tx_deadline
= jiffies
+
1157 msecs_to_jiffies(*kiblnd_tunables
.kib_timeout
*
1161 kiblnd_conn_addref(conn
);
1163 LASSERT(tx
->tx_msg
->ibm_type
!= IBLND_MSG_PUT_DONE
);
1165 /* PUT_DONE first attached to conn as a PUT_REQ */
1166 LASSERT(tx
->tx_conn
== conn
);
1167 LASSERT(tx
->tx_msg
->ibm_type
== IBLND_MSG_PUT_DONE
);
1170 switch (tx
->tx_msg
->ibm_type
) {
1174 case IBLND_MSG_PUT_REQ
:
1175 case IBLND_MSG_GET_REQ
:
1176 q
= &conn
->ibc_tx_queue_rsrvd
;
1179 case IBLND_MSG_PUT_NAK
:
1180 case IBLND_MSG_PUT_ACK
:
1181 case IBLND_MSG_PUT_DONE
:
1182 case IBLND_MSG_GET_DONE
:
1183 q
= &conn
->ibc_tx_queue_nocred
;
1186 case IBLND_MSG_NOOP
:
1187 if (IBLND_OOB_CAPABLE(conn
->ibc_version
))
1188 q
= &conn
->ibc_tx_queue_nocred
;
1190 q
= &conn
->ibc_tx_noops
;
1193 case IBLND_MSG_IMMEDIATE
:
1194 q
= &conn
->ibc_tx_queue
;
1198 list_add_tail(&tx
->tx_list
, q
);
1202 kiblnd_queue_tx(kib_tx_t
*tx
, kib_conn_t
*conn
)
1204 spin_lock(&conn
->ibc_lock
);
1205 kiblnd_queue_tx_locked(tx
, conn
);
1206 spin_unlock(&conn
->ibc_lock
);
1208 kiblnd_check_sends(conn
);
1211 static int kiblnd_resolve_addr(struct rdma_cm_id
*cmid
,
1212 struct sockaddr_in
*srcaddr
,
1213 struct sockaddr_in
*dstaddr
,
1216 unsigned short port
;
1219 /* allow the port to be reused */
1220 rc
= rdma_set_reuseaddr(cmid
, 1);
1222 CERROR("Unable to set reuse on cmid: %d\n", rc
);
1226 /* look for a free privileged port */
1227 for (port
= PROT_SOCK
- 1; port
> 0; port
--) {
1228 srcaddr
->sin_port
= htons(port
);
1229 rc
= rdma_resolve_addr(cmid
,
1230 (struct sockaddr
*)srcaddr
,
1231 (struct sockaddr
*)dstaddr
,
1234 CDEBUG(D_NET
, "bound to port %hu\n", port
);
1236 } else if (rc
== -EADDRINUSE
|| rc
== -EADDRNOTAVAIL
) {
1237 CDEBUG(D_NET
, "bind to port %hu failed: %d\n",
1244 CERROR("Failed to bind to a free privileged port\n");
1249 kiblnd_connect_peer(kib_peer_t
*peer
)
1251 struct rdma_cm_id
*cmid
;
1253 kib_net_t
*net
= peer
->ibp_ni
->ni_data
;
1254 struct sockaddr_in srcaddr
;
1255 struct sockaddr_in dstaddr
;
1259 LASSERT(peer
->ibp_connecting
> 0);
1260 LASSERT(!peer
->ibp_reconnecting
);
1262 cmid
= kiblnd_rdma_create_id(kiblnd_cm_callback
, peer
, RDMA_PS_TCP
,
1266 CERROR("Can't create CMID for %s: %ld\n",
1267 libcfs_nid2str(peer
->ibp_nid
), PTR_ERR(cmid
));
1273 memset(&srcaddr
, 0, sizeof(srcaddr
));
1274 srcaddr
.sin_family
= AF_INET
;
1275 srcaddr
.sin_addr
.s_addr
= htonl(dev
->ibd_ifip
);
1277 memset(&dstaddr
, 0, sizeof(dstaddr
));
1278 dstaddr
.sin_family
= AF_INET
;
1279 dstaddr
.sin_port
= htons(*kiblnd_tunables
.kib_service
);
1280 dstaddr
.sin_addr
.s_addr
= htonl(LNET_NIDADDR(peer
->ibp_nid
));
1282 kiblnd_peer_addref(peer
); /* cmid's ref */
1284 if (*kiblnd_tunables
.kib_use_priv_port
) {
1285 rc
= kiblnd_resolve_addr(cmid
, &srcaddr
, &dstaddr
,
1286 *kiblnd_tunables
.kib_timeout
* 1000);
1288 rc
= rdma_resolve_addr(cmid
,
1289 (struct sockaddr
*)&srcaddr
,
1290 (struct sockaddr
*)&dstaddr
,
1291 *kiblnd_tunables
.kib_timeout
* 1000);
1294 /* Can't initiate address resolution: */
1295 CERROR("Can't resolve addr for %s: %d\n",
1296 libcfs_nid2str(peer
->ibp_nid
), rc
);
1300 LASSERT(cmid
->device
);
1301 CDEBUG(D_NET
, "%s: connection bound to %s:%pI4h:%s\n",
1302 libcfs_nid2str(peer
->ibp_nid
), dev
->ibd_ifname
,
1303 &dev
->ibd_ifip
, cmid
->device
->name
);
1308 kiblnd_peer_connect_failed(peer
, 1, rc
);
1309 kiblnd_peer_decref(peer
); /* cmid's ref */
1310 rdma_destroy_id(cmid
);
1313 kiblnd_peer_connect_failed(peer
, 1, rc
);
1317 kiblnd_reconnect_peer(kib_peer_t
*peer
)
1319 rwlock_t
*glock
= &kiblnd_data
.kib_global_lock
;
1320 char *reason
= NULL
;
1321 struct list_head txs
;
1322 unsigned long flags
;
1324 INIT_LIST_HEAD(&txs
);
1326 write_lock_irqsave(glock
, flags
);
1327 if (!peer
->ibp_reconnecting
) {
1328 if (peer
->ibp_accepting
)
1329 reason
= "accepting";
1330 else if (peer
->ibp_connecting
)
1331 reason
= "connecting";
1332 else if (!list_empty(&peer
->ibp_conns
))
1333 reason
= "connected";
1334 else /* connected then closed */
1340 LASSERT(!peer
->ibp_accepting
&& !peer
->ibp_connecting
&&
1341 list_empty(&peer
->ibp_conns
));
1342 peer
->ibp_reconnecting
= 0;
1344 if (!kiblnd_peer_active(peer
)) {
1345 list_splice_init(&peer
->ibp_tx_queue
, &txs
);
1346 reason
= "unlinked";
1350 peer
->ibp_connecting
++;
1351 peer
->ibp_reconnected
++;
1352 write_unlock_irqrestore(glock
, flags
);
1354 kiblnd_connect_peer(peer
);
1358 write_unlock_irqrestore(glock
, flags
);
1360 CWARN("Abort reconnection of %s: %s\n",
1361 libcfs_nid2str(peer
->ibp_nid
), reason
);
1362 kiblnd_txlist_done(peer
->ibp_ni
, &txs
, -ECONNABORTED
);
1367 kiblnd_launch_tx(lnet_ni_t
*ni
, kib_tx_t
*tx
, lnet_nid_t nid
)
1372 rwlock_t
*g_lock
= &kiblnd_data
.kib_global_lock
;
1373 unsigned long flags
;
1377 * If I get here, I've committed to send, so I complete the tx with
1378 * failure on any problems
1380 LASSERT(!tx
|| !tx
->tx_conn
); /* only set when assigned a conn */
1381 LASSERT(!tx
|| tx
->tx_nwrq
> 0); /* work items have been set up */
1384 * First time, just use a read lock since I expect to find my peer
1387 read_lock_irqsave(g_lock
, flags
);
1389 peer
= kiblnd_find_peer_locked(nid
);
1390 if (peer
&& !list_empty(&peer
->ibp_conns
)) {
1391 /* Found a peer with an established connection */
1392 conn
= kiblnd_get_conn_locked(peer
);
1393 kiblnd_conn_addref(conn
); /* 1 ref for me... */
1395 read_unlock_irqrestore(g_lock
, flags
);
1398 kiblnd_queue_tx(tx
, conn
);
1399 kiblnd_conn_decref(conn
); /* ...to here */
1403 read_unlock(g_lock
);
1404 /* Re-try with a write lock */
1407 peer
= kiblnd_find_peer_locked(nid
);
1409 if (list_empty(&peer
->ibp_conns
)) {
1410 /* found a peer, but it's still connecting... */
1411 LASSERT(kiblnd_peer_connecting(peer
));
1413 list_add_tail(&tx
->tx_list
,
1414 &peer
->ibp_tx_queue
);
1415 write_unlock_irqrestore(g_lock
, flags
);
1417 conn
= kiblnd_get_conn_locked(peer
);
1418 kiblnd_conn_addref(conn
); /* 1 ref for me... */
1420 write_unlock_irqrestore(g_lock
, flags
);
1423 kiblnd_queue_tx(tx
, conn
);
1424 kiblnd_conn_decref(conn
); /* ...to here */
1429 write_unlock_irqrestore(g_lock
, flags
);
1431 /* Allocate a peer ready to add to the peer table and retry */
1432 rc
= kiblnd_create_peer(ni
, &peer
, nid
);
1434 CERROR("Can't create peer %s\n", libcfs_nid2str(nid
));
1436 tx
->tx_status
= -EHOSTUNREACH
;
1438 kiblnd_tx_done(ni
, tx
);
1443 write_lock_irqsave(g_lock
, flags
);
1445 peer2
= kiblnd_find_peer_locked(nid
);
1447 if (list_empty(&peer2
->ibp_conns
)) {
1448 /* found a peer, but it's still connecting... */
1449 LASSERT(kiblnd_peer_connecting(peer2
));
1451 list_add_tail(&tx
->tx_list
,
1452 &peer2
->ibp_tx_queue
);
1453 write_unlock_irqrestore(g_lock
, flags
);
1455 conn
= kiblnd_get_conn_locked(peer2
);
1456 kiblnd_conn_addref(conn
); /* 1 ref for me... */
1458 write_unlock_irqrestore(g_lock
, flags
);
1461 kiblnd_queue_tx(tx
, conn
);
1462 kiblnd_conn_decref(conn
); /* ...to here */
1465 kiblnd_peer_decref(peer
);
1469 /* Brand new peer */
1470 LASSERT(!peer
->ibp_connecting
);
1471 peer
->ibp_connecting
= 1;
1473 /* always called with a ref on ni, which prevents ni being shutdown */
1474 LASSERT(!((kib_net_t
*)ni
->ni_data
)->ibn_shutdown
);
1477 list_add_tail(&tx
->tx_list
, &peer
->ibp_tx_queue
);
1479 kiblnd_peer_addref(peer
);
1480 list_add_tail(&peer
->ibp_list
, kiblnd_nid2peerlist(nid
));
1482 write_unlock_irqrestore(g_lock
, flags
);
1484 kiblnd_connect_peer(peer
);
1485 kiblnd_peer_decref(peer
);
1489 kiblnd_send(lnet_ni_t
*ni
, void *private, lnet_msg_t
*lntmsg
)
1491 lnet_hdr_t
*hdr
= &lntmsg
->msg_hdr
;
1492 int type
= lntmsg
->msg_type
;
1493 lnet_process_id_t target
= lntmsg
->msg_target
;
1494 int target_is_router
= lntmsg
->msg_target_is_router
;
1495 int routing
= lntmsg
->msg_routing
;
1496 unsigned int payload_niov
= lntmsg
->msg_niov
;
1497 struct kvec
*payload_iov
= lntmsg
->msg_iov
;
1498 lnet_kiov_t
*payload_kiov
= lntmsg
->msg_kiov
;
1499 unsigned int payload_offset
= lntmsg
->msg_offset
;
1500 unsigned int payload_nob
= lntmsg
->msg_len
;
1502 kib_rdma_desc_t
*rd
;
1507 /* NB 'private' is different depending on what we're sending.... */
1509 CDEBUG(D_NET
, "sending %d bytes in %d frags to %s\n",
1510 payload_nob
, payload_niov
, libcfs_id2str(target
));
1512 LASSERT(!payload_nob
|| payload_niov
> 0);
1513 LASSERT(payload_niov
<= LNET_MAX_IOV
);
1515 /* Thread context */
1516 LASSERT(!in_interrupt());
1517 /* payload is either all vaddrs or all pages */
1518 LASSERT(!(payload_kiov
&& payload_iov
));
1526 LASSERT(!payload_nob
);
1530 if (routing
|| target_is_router
)
1531 break; /* send IMMEDIATE */
1533 /* is the REPLY message too small for RDMA? */
1534 nob
= offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
[lntmsg
->msg_md
->md_length
]);
1535 if (nob
<= IBLND_MSG_SIZE
)
1536 break; /* send IMMEDIATE */
1538 tx
= kiblnd_get_idle_tx(ni
, target
.nid
);
1540 CERROR("Can't allocate txd for GET to %s\n",
1541 libcfs_nid2str(target
.nid
));
1546 rd
= &ibmsg
->ibm_u
.get
.ibgm_rd
;
1547 if (!(lntmsg
->msg_md
->md_options
& LNET_MD_KIOV
))
1548 rc
= kiblnd_setup_rd_iov(ni
, tx
, rd
,
1549 lntmsg
->msg_md
->md_niov
,
1550 lntmsg
->msg_md
->md_iov
.iov
,
1551 0, lntmsg
->msg_md
->md_length
);
1553 rc
= kiblnd_setup_rd_kiov(ni
, tx
, rd
,
1554 lntmsg
->msg_md
->md_niov
,
1555 lntmsg
->msg_md
->md_iov
.kiov
,
1556 0, lntmsg
->msg_md
->md_length
);
1558 CERROR("Can't setup GET sink for %s: %d\n",
1559 libcfs_nid2str(target
.nid
), rc
);
1560 kiblnd_tx_done(ni
, tx
);
1564 nob
= offsetof(kib_get_msg_t
, ibgm_rd
.rd_frags
[rd
->rd_nfrags
]);
1565 ibmsg
->ibm_u
.get
.ibgm_cookie
= tx
->tx_cookie
;
1566 ibmsg
->ibm_u
.get
.ibgm_hdr
= *hdr
;
1568 kiblnd_init_tx_msg(ni
, tx
, IBLND_MSG_GET_REQ
, nob
);
1570 tx
->tx_lntmsg
[1] = lnet_create_reply_msg(ni
, lntmsg
);
1571 if (!tx
->tx_lntmsg
[1]) {
1572 CERROR("Can't create reply for GET -> %s\n",
1573 libcfs_nid2str(target
.nid
));
1574 kiblnd_tx_done(ni
, tx
);
1578 tx
->tx_lntmsg
[0] = lntmsg
; /* finalise lntmsg[0,1] on completion */
1579 tx
->tx_waiting
= 1; /* waiting for GET_DONE */
1580 kiblnd_launch_tx(ni
, tx
, target
.nid
);
1583 case LNET_MSG_REPLY
:
1585 /* Is the payload small enough not to need RDMA? */
1586 nob
= offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
[payload_nob
]);
1587 if (nob
<= IBLND_MSG_SIZE
)
1588 break; /* send IMMEDIATE */
1590 tx
= kiblnd_get_idle_tx(ni
, target
.nid
);
1592 CERROR("Can't allocate %s txd for %s\n",
1593 type
== LNET_MSG_PUT
? "PUT" : "REPLY",
1594 libcfs_nid2str(target
.nid
));
1599 rc
= kiblnd_setup_rd_iov(ni
, tx
, tx
->tx_rd
,
1600 payload_niov
, payload_iov
,
1601 payload_offset
, payload_nob
);
1603 rc
= kiblnd_setup_rd_kiov(ni
, tx
, tx
->tx_rd
,
1604 payload_niov
, payload_kiov
,
1605 payload_offset
, payload_nob
);
1607 CERROR("Can't setup PUT src for %s: %d\n",
1608 libcfs_nid2str(target
.nid
), rc
);
1609 kiblnd_tx_done(ni
, tx
);
1614 ibmsg
->ibm_u
.putreq
.ibprm_hdr
= *hdr
;
1615 ibmsg
->ibm_u
.putreq
.ibprm_cookie
= tx
->tx_cookie
;
1616 kiblnd_init_tx_msg(ni
, tx
, IBLND_MSG_PUT_REQ
, sizeof(kib_putreq_msg_t
));
1618 tx
->tx_lntmsg
[0] = lntmsg
; /* finalise lntmsg on completion */
1619 tx
->tx_waiting
= 1; /* waiting for PUT_{ACK,NAK} */
1620 kiblnd_launch_tx(ni
, tx
, target
.nid
);
1624 /* send IMMEDIATE */
1626 LASSERT(offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
[payload_nob
])
1629 tx
= kiblnd_get_idle_tx(ni
, target
.nid
);
1631 CERROR("Can't send %d to %s: tx descs exhausted\n",
1632 type
, libcfs_nid2str(target
.nid
));
1637 ibmsg
->ibm_u
.immediate
.ibim_hdr
= *hdr
;
1640 lnet_copy_kiov2flat(IBLND_MSG_SIZE
, ibmsg
,
1641 offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
),
1642 payload_niov
, payload_kiov
,
1643 payload_offset
, payload_nob
);
1645 lnet_copy_iov2flat(IBLND_MSG_SIZE
, ibmsg
,
1646 offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
),
1647 payload_niov
, payload_iov
,
1648 payload_offset
, payload_nob
);
1650 nob
= offsetof(kib_immediate_msg_t
, ibim_payload
[payload_nob
]);
1651 kiblnd_init_tx_msg(ni
, tx
, IBLND_MSG_IMMEDIATE
, nob
);
1653 tx
->tx_lntmsg
[0] = lntmsg
; /* finalise lntmsg on completion */
1654 kiblnd_launch_tx(ni
, tx
, target
.nid
);
1659 kiblnd_reply(lnet_ni_t
*ni
, kib_rx_t
*rx
, lnet_msg_t
*lntmsg
)
1661 lnet_process_id_t target
= lntmsg
->msg_target
;
1662 unsigned int niov
= lntmsg
->msg_niov
;
1663 struct kvec
*iov
= lntmsg
->msg_iov
;
1664 lnet_kiov_t
*kiov
= lntmsg
->msg_kiov
;
1665 unsigned int offset
= lntmsg
->msg_offset
;
1666 unsigned int nob
= lntmsg
->msg_len
;
1670 tx
= kiblnd_get_idle_tx(ni
, rx
->rx_conn
->ibc_peer
->ibp_nid
);
1672 CERROR("Can't get tx for REPLY to %s\n",
1673 libcfs_nid2str(target
.nid
));
1680 rc
= kiblnd_setup_rd_iov(ni
, tx
, tx
->tx_rd
,
1681 niov
, iov
, offset
, nob
);
1683 rc
= kiblnd_setup_rd_kiov(ni
, tx
, tx
->tx_rd
,
1684 niov
, kiov
, offset
, nob
);
1687 CERROR("Can't setup GET src for %s: %d\n",
1688 libcfs_nid2str(target
.nid
), rc
);
1692 rc
= kiblnd_init_rdma(rx
->rx_conn
, tx
,
1693 IBLND_MSG_GET_DONE
, nob
,
1694 &rx
->rx_msg
->ibm_u
.get
.ibgm_rd
,
1695 rx
->rx_msg
->ibm_u
.get
.ibgm_cookie
);
1697 CERROR("Can't setup rdma for GET from %s: %d\n",
1698 libcfs_nid2str(target
.nid
), rc
);
1703 /* No RDMA: local completion may happen now! */
1704 lnet_finalize(ni
, lntmsg
, 0);
1706 /* RDMA: lnet_finalize(lntmsg) when it completes */
1707 tx
->tx_lntmsg
[0] = lntmsg
;
1710 kiblnd_queue_tx(tx
, rx
->rx_conn
);
1714 kiblnd_tx_done(ni
, tx
);
1716 lnet_finalize(ni
, lntmsg
, -EIO
);
1720 kiblnd_recv(lnet_ni_t
*ni
, void *private, lnet_msg_t
*lntmsg
, int delayed
,
1721 unsigned int niov
, struct kvec
*iov
, lnet_kiov_t
*kiov
,
1722 unsigned int offset
, unsigned int mlen
, unsigned int rlen
)
1724 kib_rx_t
*rx
= private;
1725 kib_msg_t
*rxmsg
= rx
->rx_msg
;
1726 kib_conn_t
*conn
= rx
->rx_conn
;
1729 int post_credit
= IBLND_POSTRX_PEER_CREDIT
;
1732 LASSERT(mlen
<= rlen
);
1733 LASSERT(!in_interrupt());
1734 /* Either all pages or all vaddrs */
1735 LASSERT(!(kiov
&& iov
));
1737 switch (rxmsg
->ibm_type
) {
1741 case IBLND_MSG_IMMEDIATE
:
1742 nob
= offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
[rlen
]);
1743 if (nob
> rx
->rx_nob
) {
1744 CERROR("Immediate message from %s too big: %d(%d)\n",
1745 libcfs_nid2str(rxmsg
->ibm_u
.immediate
.ibim_hdr
.src_nid
),
1752 lnet_copy_flat2kiov(niov
, kiov
, offset
,
1753 IBLND_MSG_SIZE
, rxmsg
,
1754 offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
),
1757 lnet_copy_flat2iov(niov
, iov
, offset
,
1758 IBLND_MSG_SIZE
, rxmsg
,
1759 offsetof(kib_msg_t
, ibm_u
.immediate
.ibim_payload
),
1761 lnet_finalize(ni
, lntmsg
, 0);
1764 case IBLND_MSG_PUT_REQ
: {
1766 kib_rdma_desc_t
*rd
;
1769 lnet_finalize(ni
, lntmsg
, 0);
1770 kiblnd_send_completion(rx
->rx_conn
, IBLND_MSG_PUT_NAK
, 0,
1771 rxmsg
->ibm_u
.putreq
.ibprm_cookie
);
1775 tx
= kiblnd_get_idle_tx(ni
, conn
->ibc_peer
->ibp_nid
);
1777 CERROR("Can't allocate tx for %s\n",
1778 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
1779 /* Not replying will break the connection */
1785 rd
= &txmsg
->ibm_u
.putack
.ibpam_rd
;
1787 rc
= kiblnd_setup_rd_iov(ni
, tx
, rd
,
1788 niov
, iov
, offset
, mlen
);
1790 rc
= kiblnd_setup_rd_kiov(ni
, tx
, rd
,
1791 niov
, kiov
, offset
, mlen
);
1793 CERROR("Can't setup PUT sink for %s: %d\n",
1794 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), rc
);
1795 kiblnd_tx_done(ni
, tx
);
1796 /* tell peer it's over */
1797 kiblnd_send_completion(rx
->rx_conn
, IBLND_MSG_PUT_NAK
, rc
,
1798 rxmsg
->ibm_u
.putreq
.ibprm_cookie
);
1802 nob
= offsetof(kib_putack_msg_t
, ibpam_rd
.rd_frags
[rd
->rd_nfrags
]);
1803 txmsg
->ibm_u
.putack
.ibpam_src_cookie
= rxmsg
->ibm_u
.putreq
.ibprm_cookie
;
1804 txmsg
->ibm_u
.putack
.ibpam_dst_cookie
= tx
->tx_cookie
;
1806 kiblnd_init_tx_msg(ni
, tx
, IBLND_MSG_PUT_ACK
, nob
);
1808 tx
->tx_lntmsg
[0] = lntmsg
; /* finalise lntmsg on completion */
1809 tx
->tx_waiting
= 1; /* waiting for PUT_DONE */
1810 kiblnd_queue_tx(tx
, conn
);
1812 /* reposted buffer reserved for PUT_DONE */
1813 post_credit
= IBLND_POSTRX_NO_CREDIT
;
1817 case IBLND_MSG_GET_REQ
:
1819 /* Optimized GET; RDMA lntmsg's payload */
1820 kiblnd_reply(ni
, rx
, lntmsg
);
1822 /* GET didn't match anything */
1823 kiblnd_send_completion(rx
->rx_conn
, IBLND_MSG_GET_DONE
,
1825 rxmsg
->ibm_u
.get
.ibgm_cookie
);
1830 kiblnd_post_rx(rx
, post_credit
);
1835 kiblnd_thread_start(int (*fn
)(void *arg
), void *arg
, char *name
)
1837 struct task_struct
*task
= kthread_run(fn
, arg
, "%s", name
);
1840 return PTR_ERR(task
);
1842 atomic_inc(&kiblnd_data
.kib_nthreads
);
1847 kiblnd_thread_fini(void)
1849 atomic_dec(&kiblnd_data
.kib_nthreads
);
1853 kiblnd_peer_alive(kib_peer_t
*peer
)
1855 /* This is racy, but everyone's only writing cfs_time_current() */
1856 peer
->ibp_last_alive
= cfs_time_current();
1861 kiblnd_peer_notify(kib_peer_t
*peer
)
1864 unsigned long last_alive
= 0;
1865 unsigned long flags
;
1867 read_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
1869 if (kiblnd_peer_idle(peer
) && peer
->ibp_error
) {
1870 error
= peer
->ibp_error
;
1871 peer
->ibp_error
= 0;
1873 last_alive
= peer
->ibp_last_alive
;
1876 read_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
1879 lnet_notify(peer
->ibp_ni
,
1880 peer
->ibp_nid
, 0, last_alive
);
1884 kiblnd_close_conn_locked(kib_conn_t
*conn
, int error
)
1887 * This just does the immediate housekeeping. 'error' is zero for a
1888 * normal shutdown which can happen only after the connection has been
1889 * established. If the connection is established, schedule the
1890 * connection to be finished off by the connd. Otherwise the connd is
1891 * already dealing with it (either to set it up or tear it down).
1892 * Caller holds kib_global_lock exclusively in irq context
1894 kib_peer_t
*peer
= conn
->ibc_peer
;
1896 unsigned long flags
;
1898 LASSERT(error
|| conn
->ibc_state
>= IBLND_CONN_ESTABLISHED
);
1900 if (error
&& !conn
->ibc_comms_error
)
1901 conn
->ibc_comms_error
= error
;
1903 if (conn
->ibc_state
!= IBLND_CONN_ESTABLISHED
)
1904 return; /* already being handled */
1907 list_empty(&conn
->ibc_tx_noops
) &&
1908 list_empty(&conn
->ibc_tx_queue
) &&
1909 list_empty(&conn
->ibc_tx_queue_rsrvd
) &&
1910 list_empty(&conn
->ibc_tx_queue_nocred
) &&
1911 list_empty(&conn
->ibc_active_txs
)) {
1912 CDEBUG(D_NET
, "closing conn to %s\n",
1913 libcfs_nid2str(peer
->ibp_nid
));
1915 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
1916 libcfs_nid2str(peer
->ibp_nid
), error
,
1917 list_empty(&conn
->ibc_tx_queue
) ? "" : "(sending)",
1918 list_empty(&conn
->ibc_tx_noops
) ? "" : "(sending_noops)",
1919 list_empty(&conn
->ibc_tx_queue_rsrvd
) ? "" : "(sending_rsrvd)",
1920 list_empty(&conn
->ibc_tx_queue_nocred
) ? "" : "(sending_nocred)",
1921 list_empty(&conn
->ibc_active_txs
) ? "" : "(waiting)");
1924 dev
= ((kib_net_t
*)peer
->ibp_ni
->ni_data
)->ibn_dev
;
1925 list_del(&conn
->ibc_list
);
1926 /* connd (see below) takes over ibc_list's ref */
1928 if (list_empty(&peer
->ibp_conns
) && /* no more conns */
1929 kiblnd_peer_active(peer
)) { /* still in peer table */
1930 kiblnd_unlink_peer_locked(peer
);
1932 /* set/clear error on last conn */
1933 peer
->ibp_error
= conn
->ibc_comms_error
;
1936 kiblnd_set_conn_state(conn
, IBLND_CONN_CLOSING
);
1939 kiblnd_dev_can_failover(dev
)) {
1940 list_add_tail(&dev
->ibd_fail_list
,
1941 &kiblnd_data
.kib_failed_devs
);
1942 wake_up(&kiblnd_data
.kib_failover_waitq
);
1945 spin_lock_irqsave(&kiblnd_data
.kib_connd_lock
, flags
);
1947 list_add_tail(&conn
->ibc_list
, &kiblnd_data
.kib_connd_conns
);
1948 wake_up(&kiblnd_data
.kib_connd_waitq
);
1950 spin_unlock_irqrestore(&kiblnd_data
.kib_connd_lock
, flags
);
1954 kiblnd_close_conn(kib_conn_t
*conn
, int error
)
1956 unsigned long flags
;
1958 write_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
1960 kiblnd_close_conn_locked(conn
, error
);
1962 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
1966 kiblnd_handle_early_rxs(kib_conn_t
*conn
)
1968 unsigned long flags
;
1972 LASSERT(!in_interrupt());
1973 LASSERT(conn
->ibc_state
>= IBLND_CONN_ESTABLISHED
);
1975 write_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
1976 list_for_each_entry_safe(rx
, tmp
, &conn
->ibc_early_rxs
, rx_list
) {
1977 list_del(&rx
->rx_list
);
1978 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
1980 kiblnd_handle_rx(rx
);
1982 write_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
1984 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
1988 kiblnd_abort_txs(kib_conn_t
*conn
, struct list_head
*txs
)
1991 struct list_head
*tmp
;
1992 struct list_head
*nxt
;
1995 spin_lock(&conn
->ibc_lock
);
1997 list_for_each_safe(tmp
, nxt
, txs
) {
1998 tx
= list_entry(tmp
, kib_tx_t
, tx_list
);
2000 if (txs
== &conn
->ibc_active_txs
) {
2001 LASSERT(!tx
->tx_queued
);
2002 LASSERT(tx
->tx_waiting
|| tx
->tx_sending
);
2004 LASSERT(tx
->tx_queued
);
2007 tx
->tx_status
= -ECONNABORTED
;
2010 if (!tx
->tx_sending
) {
2012 list_del(&tx
->tx_list
);
2013 list_add(&tx
->tx_list
, &zombies
);
2017 spin_unlock(&conn
->ibc_lock
);
2019 kiblnd_txlist_done(conn
->ibc_peer
->ibp_ni
, &zombies
, -ECONNABORTED
);
2023 kiblnd_finalise_conn(kib_conn_t
*conn
)
2025 LASSERT(!in_interrupt());
2026 LASSERT(conn
->ibc_state
> IBLND_CONN_INIT
);
2028 kiblnd_set_conn_state(conn
, IBLND_CONN_DISCONNECTED
);
2031 * abort_receives moves QP state to IB_QPS_ERR. This is only required
2032 * for connections that didn't get as far as being connected, because
2033 * rdma_disconnect() does this for free.
2035 kiblnd_abort_receives(conn
);
2038 * Complete all tx descs not waiting for sends to complete.
2039 * NB we should be safe from RDMA now that the QP has changed state
2041 kiblnd_abort_txs(conn
, &conn
->ibc_tx_noops
);
2042 kiblnd_abort_txs(conn
, &conn
->ibc_tx_queue
);
2043 kiblnd_abort_txs(conn
, &conn
->ibc_tx_queue_rsrvd
);
2044 kiblnd_abort_txs(conn
, &conn
->ibc_tx_queue_nocred
);
2045 kiblnd_abort_txs(conn
, &conn
->ibc_active_txs
);
2047 kiblnd_handle_early_rxs(conn
);
2051 kiblnd_peer_connect_failed(kib_peer_t
*peer
, int active
, int error
)
2054 unsigned long flags
;
2057 LASSERT(!in_interrupt());
2059 write_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
2062 LASSERT(peer
->ibp_connecting
> 0);
2063 peer
->ibp_connecting
--;
2065 LASSERT(peer
->ibp_accepting
> 0);
2066 peer
->ibp_accepting
--;
2069 if (kiblnd_peer_connecting(peer
)) {
2070 /* another connection attempt under way... */
2071 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
,
2076 peer
->ibp_reconnected
= 0;
2077 if (list_empty(&peer
->ibp_conns
)) {
2078 /* Take peer's blocked transmits to complete with error */
2079 list_add(&zombies
, &peer
->ibp_tx_queue
);
2080 list_del_init(&peer
->ibp_tx_queue
);
2082 if (kiblnd_peer_active(peer
))
2083 kiblnd_unlink_peer_locked(peer
);
2085 peer
->ibp_error
= error
;
2087 /* Can't have blocked transmits if there are connections */
2088 LASSERT(list_empty(&peer
->ibp_tx_queue
));
2091 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
2093 kiblnd_peer_notify(peer
);
2095 if (list_empty(&zombies
))
2098 CNETERR("Deleting messages for %s: connection failed\n",
2099 libcfs_nid2str(peer
->ibp_nid
));
2101 kiblnd_txlist_done(peer
->ibp_ni
, &zombies
, -EHOSTUNREACH
);
2105 kiblnd_connreq_done(kib_conn_t
*conn
, int status
)
2107 kib_peer_t
*peer
= conn
->ibc_peer
;
2110 struct list_head txs
;
2111 unsigned long flags
;
2114 active
= (conn
->ibc_state
== IBLND_CONN_ACTIVE_CONNECT
);
2116 CDEBUG(D_NET
, "%s: active(%d), version(%x), status(%d)\n",
2117 libcfs_nid2str(peer
->ibp_nid
), active
,
2118 conn
->ibc_version
, status
);
2120 LASSERT(!in_interrupt());
2121 LASSERT((conn
->ibc_state
== IBLND_CONN_ACTIVE_CONNECT
&&
2122 peer
->ibp_connecting
> 0) ||
2123 (conn
->ibc_state
== IBLND_CONN_PASSIVE_WAIT
&&
2124 peer
->ibp_accepting
> 0));
2126 LIBCFS_FREE(conn
->ibc_connvars
, sizeof(*conn
->ibc_connvars
));
2127 conn
->ibc_connvars
= NULL
;
2130 /* failed to establish connection */
2131 kiblnd_peer_connect_failed(peer
, active
, status
);
2132 kiblnd_finalise_conn(conn
);
2136 /* connection established */
2137 write_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
2139 conn
->ibc_last_send
= jiffies
;
2140 kiblnd_set_conn_state(conn
, IBLND_CONN_ESTABLISHED
);
2141 kiblnd_peer_alive(peer
);
2144 * Add conn to peer's list and nuke any dangling conns from a different
2147 kiblnd_conn_addref(conn
); /* +1 ref for ibc_list */
2148 list_add(&conn
->ibc_list
, &peer
->ibp_conns
);
2149 peer
->ibp_reconnected
= 0;
2151 peer
->ibp_connecting
--;
2153 peer
->ibp_accepting
--;
2155 if (!peer
->ibp_version
) {
2156 peer
->ibp_version
= conn
->ibc_version
;
2157 peer
->ibp_incarnation
= conn
->ibc_incarnation
;
2160 if (peer
->ibp_version
!= conn
->ibc_version
||
2161 peer
->ibp_incarnation
!= conn
->ibc_incarnation
) {
2162 kiblnd_close_stale_conns_locked(peer
, conn
->ibc_version
,
2163 conn
->ibc_incarnation
);
2164 peer
->ibp_version
= conn
->ibc_version
;
2165 peer
->ibp_incarnation
= conn
->ibc_incarnation
;
2168 /* grab pending txs while I have the lock */
2169 list_add(&txs
, &peer
->ibp_tx_queue
);
2170 list_del_init(&peer
->ibp_tx_queue
);
2172 if (!kiblnd_peer_active(peer
) || /* peer has been deleted */
2173 conn
->ibc_comms_error
) { /* error has happened already */
2174 lnet_ni_t
*ni
= peer
->ibp_ni
;
2176 /* start to shut down connection */
2177 kiblnd_close_conn_locked(conn
, -ECONNABORTED
);
2178 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
2180 kiblnd_txlist_done(ni
, &txs
, -ECONNABORTED
);
2186 * refcount taken by cmid is not reliable after I released the glock
2187 * because this connection is visible to other threads now, another
2188 * thread can find and close this connection right after I released
2189 * the glock, if kiblnd_cm_callback for RDMA_CM_EVENT_DISCONNECTED is
2190 * called, it can release the connection refcount taken by cmid.
2191 * It means the connection could be destroyed before I finish my
2194 kiblnd_conn_addref(conn
);
2195 write_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
2197 /* Schedule blocked txs */
2198 spin_lock(&conn
->ibc_lock
);
2199 list_for_each_entry_safe(tx
, tmp
, &txs
, tx_list
) {
2200 list_del(&tx
->tx_list
);
2202 kiblnd_queue_tx_locked(tx
, conn
);
2204 spin_unlock(&conn
->ibc_lock
);
2206 kiblnd_check_sends(conn
);
2208 /* schedule blocked rxs */
2209 kiblnd_handle_early_rxs(conn
);
2211 kiblnd_conn_decref(conn
);
2215 kiblnd_reject(struct rdma_cm_id
*cmid
, kib_rej_t
*rej
)
2219 rc
= rdma_reject(cmid
, rej
, sizeof(*rej
));
2222 CWARN("Error %d sending reject\n", rc
);
2226 kiblnd_passive_connect(struct rdma_cm_id
*cmid
, void *priv
, int priv_nob
)
2228 rwlock_t
*g_lock
= &kiblnd_data
.kib_global_lock
;
2229 kib_msg_t
*reqmsg
= priv
;
2235 lnet_ni_t
*ni
= NULL
;
2236 kib_net_t
*net
= NULL
;
2238 struct rdma_conn_param cp
;
2240 int version
= IBLND_MSG_VERSION
;
2241 unsigned long flags
;
2243 struct sockaddr_in
*peer_addr
;
2245 LASSERT(!in_interrupt());
2247 /* cmid inherits 'context' from the corresponding listener id */
2248 ibdev
= (kib_dev_t
*)cmid
->context
;
2251 memset(&rej
, 0, sizeof(rej
));
2252 rej
.ibr_magic
= IBLND_MSG_MAGIC
;
2253 rej
.ibr_why
= IBLND_REJECT_FATAL
;
2254 rej
.ibr_cp
.ibcp_max_msg_size
= IBLND_MSG_SIZE
;
2256 peer_addr
= (struct sockaddr_in
*)&cmid
->route
.addr
.dst_addr
;
2257 if (*kiblnd_tunables
.kib_require_priv_port
&&
2258 ntohs(peer_addr
->sin_port
) >= PROT_SOCK
) {
2259 __u32 ip
= ntohl(peer_addr
->sin_addr
.s_addr
);
2261 CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
2262 &ip
, ntohs(peer_addr
->sin_port
));
2266 if (priv_nob
< offsetof(kib_msg_t
, ibm_type
)) {
2267 CERROR("Short connection request\n");
2272 * Future protocol version compatibility support! If the
2273 * o2iblnd-specific protocol changes, or when LNET unifies
2274 * protocols over all LNDs, the initial connection will
2275 * negotiate a protocol version. I trap this here to avoid
2276 * console errors; the reject tells the peer which protocol I
2279 if (reqmsg
->ibm_magic
== LNET_PROTO_MAGIC
||
2280 reqmsg
->ibm_magic
== __swab32(LNET_PROTO_MAGIC
))
2282 if (reqmsg
->ibm_magic
== IBLND_MSG_MAGIC
&&
2283 reqmsg
->ibm_version
!= IBLND_MSG_VERSION
&&
2284 reqmsg
->ibm_version
!= IBLND_MSG_VERSION_1
)
2286 if (reqmsg
->ibm_magic
== __swab32(IBLND_MSG_MAGIC
) &&
2287 reqmsg
->ibm_version
!= __swab16(IBLND_MSG_VERSION
) &&
2288 reqmsg
->ibm_version
!= __swab16(IBLND_MSG_VERSION_1
))
2291 rc
= kiblnd_unpack_msg(reqmsg
, priv_nob
);
2293 CERROR("Can't parse connection request: %d\n", rc
);
2297 nid
= reqmsg
->ibm_srcnid
;
2298 ni
= lnet_net2ni(LNET_NIDNET(reqmsg
->ibm_dstnid
));
2301 net
= (kib_net_t
*)ni
->ni_data
;
2302 rej
.ibr_incarnation
= net
->ibn_incarnation
;
2305 if (!ni
|| /* no matching net */
2306 ni
->ni_nid
!= reqmsg
->ibm_dstnid
|| /* right NET, wrong NID! */
2307 net
->ibn_dev
!= ibdev
) { /* wrong device */
2308 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
2309 libcfs_nid2str(nid
),
2310 !ni
? "NA" : libcfs_nid2str(ni
->ni_nid
),
2311 ibdev
->ibd_ifname
, ibdev
->ibd_nnets
,
2313 libcfs_nid2str(reqmsg
->ibm_dstnid
));
2318 /* check time stamp as soon as possible */
2319 if (reqmsg
->ibm_dststamp
&&
2320 reqmsg
->ibm_dststamp
!= net
->ibn_incarnation
) {
2321 CWARN("Stale connection request\n");
2322 rej
.ibr_why
= IBLND_REJECT_CONN_STALE
;
2326 /* I can accept peer's version */
2327 version
= reqmsg
->ibm_version
;
2329 if (reqmsg
->ibm_type
!= IBLND_MSG_CONNREQ
) {
2330 CERROR("Unexpected connreq msg type: %x from %s\n",
2331 reqmsg
->ibm_type
, libcfs_nid2str(nid
));
2335 if (reqmsg
->ibm_u
.connparams
.ibcp_queue_depth
>
2336 IBLND_MSG_QUEUE_SIZE(version
)) {
2337 CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
2338 libcfs_nid2str(nid
),
2339 reqmsg
->ibm_u
.connparams
.ibcp_queue_depth
,
2340 IBLND_MSG_QUEUE_SIZE(version
));
2342 if (version
== IBLND_MSG_VERSION
)
2343 rej
.ibr_why
= IBLND_REJECT_MSG_QUEUE_SIZE
;
2348 if (reqmsg
->ibm_u
.connparams
.ibcp_max_frags
>
2349 IBLND_RDMA_FRAGS(version
)) {
2350 CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
2351 libcfs_nid2str(nid
), version
,
2352 reqmsg
->ibm_u
.connparams
.ibcp_max_frags
,
2353 IBLND_RDMA_FRAGS(version
));
2355 if (version
>= IBLND_MSG_VERSION
)
2356 rej
.ibr_why
= IBLND_REJECT_RDMA_FRAGS
;
2359 } else if (reqmsg
->ibm_u
.connparams
.ibcp_max_frags
<
2360 IBLND_RDMA_FRAGS(version
) && !net
->ibn_fmr_ps
) {
2361 CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
2362 libcfs_nid2str(nid
), version
,
2363 reqmsg
->ibm_u
.connparams
.ibcp_max_frags
,
2364 IBLND_RDMA_FRAGS(version
));
2366 if (version
>= IBLND_MSG_VERSION
)
2367 rej
.ibr_why
= IBLND_REJECT_RDMA_FRAGS
;
2372 if (reqmsg
->ibm_u
.connparams
.ibcp_max_msg_size
> IBLND_MSG_SIZE
) {
2373 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2374 libcfs_nid2str(nid
),
2375 reqmsg
->ibm_u
.connparams
.ibcp_max_msg_size
,
2380 /* assume 'nid' is a new peer; create */
2381 rc
= kiblnd_create_peer(ni
, &peer
, nid
);
2383 CERROR("Can't create peer for %s\n", libcfs_nid2str(nid
));
2384 rej
.ibr_why
= IBLND_REJECT_NO_RESOURCES
;
2388 /* We have validated the peer's parameters so use those */
2389 peer
->ibp_max_frags
= reqmsg
->ibm_u
.connparams
.ibcp_max_frags
;
2390 peer
->ibp_queue_depth
= reqmsg
->ibm_u
.connparams
.ibcp_queue_depth
;
2392 write_lock_irqsave(g_lock
, flags
);
2394 peer2
= kiblnd_find_peer_locked(nid
);
2396 if (!peer2
->ibp_version
) {
2397 peer2
->ibp_version
= version
;
2398 peer2
->ibp_incarnation
= reqmsg
->ibm_srcstamp
;
2401 /* not the guy I've talked with */
2402 if (peer2
->ibp_incarnation
!= reqmsg
->ibm_srcstamp
||
2403 peer2
->ibp_version
!= version
) {
2404 kiblnd_close_peer_conns_locked(peer2
, -ESTALE
);
2406 if (kiblnd_peer_active(peer2
)) {
2407 peer2
->ibp_incarnation
= reqmsg
->ibm_srcstamp
;
2408 peer2
->ibp_version
= version
;
2410 write_unlock_irqrestore(g_lock
, flags
);
2412 CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2413 libcfs_nid2str(nid
), peer2
->ibp_version
, version
,
2414 peer2
->ibp_incarnation
, reqmsg
->ibm_srcstamp
);
2416 kiblnd_peer_decref(peer
);
2417 rej
.ibr_why
= IBLND_REJECT_CONN_STALE
;
2421 /* tie-break connection race in favour of the higher NID */
2422 if (peer2
->ibp_connecting
&&
2424 write_unlock_irqrestore(g_lock
, flags
);
2426 CWARN("Conn race %s\n", libcfs_nid2str(peer2
->ibp_nid
));
2428 kiblnd_peer_decref(peer
);
2429 rej
.ibr_why
= IBLND_REJECT_CONN_RACE
;
2434 * passive connection is allowed even this peer is waiting for
2437 peer2
->ibp_reconnecting
= 0;
2438 peer2
->ibp_accepting
++;
2439 kiblnd_peer_addref(peer2
);
2442 * Race with kiblnd_launch_tx (active connect) to create peer
2443 * so copy validated parameters since we now know what the
2446 peer2
->ibp_max_frags
= peer
->ibp_max_frags
;
2447 peer2
->ibp_queue_depth
= peer
->ibp_queue_depth
;
2449 write_unlock_irqrestore(g_lock
, flags
);
2450 kiblnd_peer_decref(peer
);
2453 /* Brand new peer */
2454 LASSERT(!peer
->ibp_accepting
);
2455 LASSERT(!peer
->ibp_version
&&
2456 !peer
->ibp_incarnation
);
2458 peer
->ibp_accepting
= 1;
2459 peer
->ibp_version
= version
;
2460 peer
->ibp_incarnation
= reqmsg
->ibm_srcstamp
;
2462 /* I have a ref on ni that prevents it being shutdown */
2463 LASSERT(!net
->ibn_shutdown
);
2465 kiblnd_peer_addref(peer
);
2466 list_add_tail(&peer
->ibp_list
, kiblnd_nid2peerlist(nid
));
2468 write_unlock_irqrestore(g_lock
, flags
);
2471 conn
= kiblnd_create_conn(peer
, cmid
, IBLND_CONN_PASSIVE_WAIT
,
2474 kiblnd_peer_connect_failed(peer
, 0, -ENOMEM
);
2475 kiblnd_peer_decref(peer
);
2476 rej
.ibr_why
= IBLND_REJECT_NO_RESOURCES
;
2481 * conn now "owns" cmid, so I return success from here on to ensure the
2482 * CM callback doesn't destroy cmid.
2484 conn
->ibc_incarnation
= reqmsg
->ibm_srcstamp
;
2485 conn
->ibc_credits
= conn
->ibc_queue_depth
;
2486 conn
->ibc_reserved_credits
= conn
->ibc_queue_depth
;
2487 LASSERT(conn
->ibc_credits
+ conn
->ibc_reserved_credits
+
2488 IBLND_OOB_MSGS(version
) <= IBLND_RX_MSGS(conn
));
2490 ackmsg
= &conn
->ibc_connvars
->cv_msg
;
2491 memset(ackmsg
, 0, sizeof(*ackmsg
));
2493 kiblnd_init_msg(ackmsg
, IBLND_MSG_CONNACK
,
2494 sizeof(ackmsg
->ibm_u
.connparams
));
2495 ackmsg
->ibm_u
.connparams
.ibcp_queue_depth
= conn
->ibc_queue_depth
;
2496 ackmsg
->ibm_u
.connparams
.ibcp_max_frags
= conn
->ibc_max_frags
;
2497 ackmsg
->ibm_u
.connparams
.ibcp_max_msg_size
= IBLND_MSG_SIZE
;
2499 kiblnd_pack_msg(ni
, ackmsg
, version
, 0, nid
, reqmsg
->ibm_srcstamp
);
2501 memset(&cp
, 0, sizeof(cp
));
2502 cp
.private_data
= ackmsg
;
2503 cp
.private_data_len
= ackmsg
->ibm_nob
;
2504 cp
.responder_resources
= 0; /* No atomic ops or RDMA reads */
2505 cp
.initiator_depth
= 0;
2506 cp
.flow_control
= 1;
2507 cp
.retry_count
= *kiblnd_tunables
.kib_retry_count
;
2508 cp
.rnr_retry_count
= *kiblnd_tunables
.kib_rnr_retry_count
;
2510 CDEBUG(D_NET
, "Accept %s\n", libcfs_nid2str(nid
));
2512 rc
= rdma_accept(cmid
, &cp
);
2514 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid
), rc
);
2515 rej
.ibr_version
= version
;
2516 rej
.ibr_why
= IBLND_REJECT_FATAL
;
2518 kiblnd_reject(cmid
, &rej
);
2519 kiblnd_connreq_done(conn
, rc
);
2520 kiblnd_conn_decref(conn
);
2530 rej
.ibr_version
= version
;
2531 rej
.ibr_cp
.ibcp_queue_depth
= IBLND_MSG_QUEUE_SIZE(version
);
2532 rej
.ibr_cp
.ibcp_max_frags
= IBLND_RDMA_FRAGS(version
);
2533 kiblnd_reject(cmid
, &rej
);
2535 return -ECONNREFUSED
;
2539 kiblnd_check_reconnect(kib_conn_t
*conn
, int version
,
2540 __u64 incarnation
, int why
, kib_connparams_t
*cp
)
2542 rwlock_t
*glock
= &kiblnd_data
.kib_global_lock
;
2543 kib_peer_t
*peer
= conn
->ibc_peer
;
2545 int msg_size
= IBLND_MSG_SIZE
;
2549 unsigned long flags
;
2551 LASSERT(conn
->ibc_state
== IBLND_CONN_ACTIVE_CONNECT
);
2552 LASSERT(peer
->ibp_connecting
> 0); /* 'conn' at least */
2553 LASSERT(!peer
->ibp_reconnecting
);
2556 msg_size
= cp
->ibcp_max_msg_size
;
2557 frag_num
= cp
->ibcp_max_frags
;
2558 queue_dep
= cp
->ibcp_queue_depth
;
2561 write_lock_irqsave(glock
, flags
);
2563 * retry connection if it's still needed and no other connection
2564 * attempts (active or passive) are in progress
2565 * NB: reconnect is still needed even when ibp_tx_queue is
2566 * empty if ibp_version != version because reconnect may be
2567 * initiated by kiblnd_query()
2569 reconnect
= (!list_empty(&peer
->ibp_tx_queue
) ||
2570 peer
->ibp_version
!= version
) &&
2571 peer
->ibp_connecting
== 1 &&
2572 !peer
->ibp_accepting
;
2583 case IBLND_REJECT_RDMA_FRAGS
:
2585 reason
= "can't negotiate max frags";
2588 if (!*kiblnd_tunables
.kib_map_on_demand
) {
2589 reason
= "map_on_demand must be enabled";
2592 if (conn
->ibc_max_frags
<= frag_num
) {
2593 reason
= "unsupported max frags";
2597 peer
->ibp_max_frags
= frag_num
;
2598 reason
= "rdma fragments";
2601 case IBLND_REJECT_MSG_QUEUE_SIZE
:
2603 reason
= "can't negotiate queue depth";
2606 if (conn
->ibc_queue_depth
<= queue_dep
) {
2607 reason
= "unsupported queue depth";
2611 peer
->ibp_queue_depth
= queue_dep
;
2612 reason
= "queue depth";
2615 case IBLND_REJECT_CONN_STALE
:
2619 case IBLND_REJECT_CONN_RACE
:
2620 reason
= "conn race";
2623 case IBLND_REJECT_CONN_UNCOMPAT
:
2624 reason
= "version negotiation";
2628 conn
->ibc_reconnect
= 1;
2629 peer
->ibp_reconnecting
= 1;
2630 peer
->ibp_version
= version
;
2632 peer
->ibp_incarnation
= incarnation
;
2634 write_unlock_irqrestore(glock
, flags
);
2636 CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2637 libcfs_nid2str(peer
->ibp_nid
),
2638 reconnect
? "reconnect" : "don't reconnect",
2639 reason
, IBLND_MSG_VERSION
, version
, msg_size
,
2640 conn
->ibc_queue_depth
, queue_dep
,
2641 conn
->ibc_max_frags
, frag_num
);
2643 * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
2644 * while destroying the zombie
2649 kiblnd_rejected(kib_conn_t
*conn
, int reason
, void *priv
, int priv_nob
)
2651 kib_peer_t
*peer
= conn
->ibc_peer
;
2653 LASSERT(!in_interrupt());
2654 LASSERT(conn
->ibc_state
== IBLND_CONN_ACTIVE_CONNECT
);
2657 case IB_CM_REJ_STALE_CONN
:
2658 kiblnd_check_reconnect(conn
, IBLND_MSG_VERSION
, 0,
2659 IBLND_REJECT_CONN_STALE
, NULL
);
2662 case IB_CM_REJ_INVALID_SERVICE_ID
:
2663 CNETERR("%s rejected: no listener at %d\n",
2664 libcfs_nid2str(peer
->ibp_nid
),
2665 *kiblnd_tunables
.kib_service
);
2668 case IB_CM_REJ_CONSUMER_DEFINED
:
2669 if (priv_nob
>= offsetof(kib_rej_t
, ibr_padding
)) {
2670 kib_rej_t
*rej
= priv
;
2671 kib_connparams_t
*cp
= NULL
;
2673 __u64 incarnation
= -1;
2675 /* NB. default incarnation is -1 because:
2676 * a) V1 will ignore dst incarnation in connreq.
2677 * b) V2 will provide incarnation while rejecting me,
2678 * -1 will be overwrote.
2680 * if I try to connect to a V1 peer with V2 protocol,
2681 * it rejected me then upgrade to V2, I have no idea
2682 * about the upgrading and try to reconnect with V1,
2683 * in this case upgraded V2 can find out I'm trying to
2684 * talk to the old guy and reject me(incarnation is -1).
2687 if (rej
->ibr_magic
== __swab32(IBLND_MSG_MAGIC
) ||
2688 rej
->ibr_magic
== __swab32(LNET_PROTO_MAGIC
)) {
2689 __swab32s(&rej
->ibr_magic
);
2690 __swab16s(&rej
->ibr_version
);
2694 if (priv_nob
>= sizeof(kib_rej_t
) &&
2695 rej
->ibr_version
> IBLND_MSG_VERSION_1
) {
2697 * priv_nob is always 148 in current version
2698 * of OFED, so we still need to check version.
2699 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
2704 __swab64s(&rej
->ibr_incarnation
);
2705 __swab16s(&cp
->ibcp_queue_depth
);
2706 __swab16s(&cp
->ibcp_max_frags
);
2707 __swab32s(&cp
->ibcp_max_msg_size
);
2710 incarnation
= rej
->ibr_incarnation
;
2713 if (rej
->ibr_magic
!= IBLND_MSG_MAGIC
&&
2714 rej
->ibr_magic
!= LNET_PROTO_MAGIC
) {
2715 CERROR("%s rejected: consumer defined fatal error\n",
2716 libcfs_nid2str(peer
->ibp_nid
));
2720 if (rej
->ibr_version
!= IBLND_MSG_VERSION
&&
2721 rej
->ibr_version
!= IBLND_MSG_VERSION_1
) {
2722 CERROR("%s rejected: o2iblnd version %x error\n",
2723 libcfs_nid2str(peer
->ibp_nid
),
2728 if (rej
->ibr_why
== IBLND_REJECT_FATAL
&&
2729 rej
->ibr_version
== IBLND_MSG_VERSION_1
) {
2730 CDEBUG(D_NET
, "rejected by old version peer %s: %x\n",
2731 libcfs_nid2str(peer
->ibp_nid
), rej
->ibr_version
);
2733 if (conn
->ibc_version
!= IBLND_MSG_VERSION_1
)
2734 rej
->ibr_why
= IBLND_REJECT_CONN_UNCOMPAT
;
2737 switch (rej
->ibr_why
) {
2738 case IBLND_REJECT_CONN_RACE
:
2739 case IBLND_REJECT_CONN_STALE
:
2740 case IBLND_REJECT_CONN_UNCOMPAT
:
2741 case IBLND_REJECT_MSG_QUEUE_SIZE
:
2742 case IBLND_REJECT_RDMA_FRAGS
:
2743 kiblnd_check_reconnect(conn
, rej
->ibr_version
,
2748 case IBLND_REJECT_NO_RESOURCES
:
2749 CERROR("%s rejected: o2iblnd no resources\n",
2750 libcfs_nid2str(peer
->ibp_nid
));
2753 case IBLND_REJECT_FATAL
:
2754 CERROR("%s rejected: o2iblnd fatal error\n",
2755 libcfs_nid2str(peer
->ibp_nid
));
2759 CERROR("%s rejected: o2iblnd reason %d\n",
2760 libcfs_nid2str(peer
->ibp_nid
),
2768 CNETERR("%s rejected: reason %d, size %d\n",
2769 libcfs_nid2str(peer
->ibp_nid
), reason
, priv_nob
);
2773 kiblnd_connreq_done(conn
, -ECONNREFUSED
);
2777 kiblnd_check_connreply(kib_conn_t
*conn
, void *priv
, int priv_nob
)
2779 kib_peer_t
*peer
= conn
->ibc_peer
;
2780 lnet_ni_t
*ni
= peer
->ibp_ni
;
2781 kib_net_t
*net
= ni
->ni_data
;
2782 kib_msg_t
*msg
= priv
;
2783 int ver
= conn
->ibc_version
;
2784 int rc
= kiblnd_unpack_msg(msg
, priv_nob
);
2785 unsigned long flags
;
2790 CERROR("Can't unpack connack from %s: %d\n",
2791 libcfs_nid2str(peer
->ibp_nid
), rc
);
2795 if (msg
->ibm_type
!= IBLND_MSG_CONNACK
) {
2796 CERROR("Unexpected message %d from %s\n",
2797 msg
->ibm_type
, libcfs_nid2str(peer
->ibp_nid
));
2802 if (ver
!= msg
->ibm_version
) {
2803 CERROR("%s replied version %x is different with requested version %x\n",
2804 libcfs_nid2str(peer
->ibp_nid
), msg
->ibm_version
, ver
);
2809 if (msg
->ibm_u
.connparams
.ibcp_queue_depth
>
2810 conn
->ibc_queue_depth
) {
2811 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
2812 libcfs_nid2str(peer
->ibp_nid
),
2813 msg
->ibm_u
.connparams
.ibcp_queue_depth
,
2814 conn
->ibc_queue_depth
);
2819 if (msg
->ibm_u
.connparams
.ibcp_max_frags
>
2820 conn
->ibc_max_frags
) {
2821 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
2822 libcfs_nid2str(peer
->ibp_nid
),
2823 msg
->ibm_u
.connparams
.ibcp_max_frags
,
2824 conn
->ibc_max_frags
);
2829 if (msg
->ibm_u
.connparams
.ibcp_max_msg_size
> IBLND_MSG_SIZE
) {
2830 CERROR("%s max message size %d too big (%d max)\n",
2831 libcfs_nid2str(peer
->ibp_nid
),
2832 msg
->ibm_u
.connparams
.ibcp_max_msg_size
,
2838 read_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
2839 if (msg
->ibm_dstnid
== ni
->ni_nid
&&
2840 msg
->ibm_dststamp
== net
->ibn_incarnation
)
2844 read_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
2847 CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
2848 libcfs_nid2str(peer
->ibp_nid
), rc
,
2849 msg
->ibm_version
, msg
->ibm_u
.connparams
.ibcp_max_frags
);
2853 conn
->ibc_incarnation
= msg
->ibm_srcstamp
;
2854 conn
->ibc_credits
= msg
->ibm_u
.connparams
.ibcp_queue_depth
;
2855 conn
->ibc_reserved_credits
= msg
->ibm_u
.connparams
.ibcp_queue_depth
;
2856 conn
->ibc_queue_depth
= msg
->ibm_u
.connparams
.ibcp_queue_depth
;
2857 conn
->ibc_max_frags
= msg
->ibm_u
.connparams
.ibcp_max_frags
;
2858 LASSERT(conn
->ibc_credits
+ conn
->ibc_reserved_credits
+
2859 IBLND_OOB_MSGS(ver
) <= IBLND_RX_MSGS(conn
));
2861 kiblnd_connreq_done(conn
, 0);
2866 * NB My QP has already established itself, so I handle anything going
2867 * wrong here by setting ibc_comms_error.
2868 * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
2869 * immediately tears it down.
2872 conn
->ibc_comms_error
= rc
;
2873 kiblnd_connreq_done(conn
, 0);
2877 kiblnd_active_connect(struct rdma_cm_id
*cmid
)
2879 kib_peer_t
*peer
= (kib_peer_t
*)cmid
->context
;
2882 struct rdma_conn_param cp
;
2885 unsigned long flags
;
2888 read_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
2890 incarnation
= peer
->ibp_incarnation
;
2891 version
= !peer
->ibp_version
? IBLND_MSG_VERSION
:
2894 read_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
2896 conn
= kiblnd_create_conn(peer
, cmid
, IBLND_CONN_ACTIVE_CONNECT
,
2899 kiblnd_peer_connect_failed(peer
, 1, -ENOMEM
);
2900 kiblnd_peer_decref(peer
); /* lose cmid's ref */
2905 * conn "owns" cmid now, so I return success from here on to ensure the
2906 * CM callback doesn't destroy cmid. conn also takes over cmid's ref
2909 msg
= &conn
->ibc_connvars
->cv_msg
;
2911 memset(msg
, 0, sizeof(*msg
));
2912 kiblnd_init_msg(msg
, IBLND_MSG_CONNREQ
, sizeof(msg
->ibm_u
.connparams
));
2913 msg
->ibm_u
.connparams
.ibcp_queue_depth
= conn
->ibc_queue_depth
;
2914 msg
->ibm_u
.connparams
.ibcp_max_frags
= conn
->ibc_max_frags
;
2915 msg
->ibm_u
.connparams
.ibcp_max_msg_size
= IBLND_MSG_SIZE
;
2917 kiblnd_pack_msg(peer
->ibp_ni
, msg
, version
,
2918 0, peer
->ibp_nid
, incarnation
);
2920 memset(&cp
, 0, sizeof(cp
));
2921 cp
.private_data
= msg
;
2922 cp
.private_data_len
= msg
->ibm_nob
;
2923 cp
.responder_resources
= 0; /* No atomic ops or RDMA reads */
2924 cp
.initiator_depth
= 0;
2925 cp
.flow_control
= 1;
2926 cp
.retry_count
= *kiblnd_tunables
.kib_retry_count
;
2927 cp
.rnr_retry_count
= *kiblnd_tunables
.kib_rnr_retry_count
;
2929 LASSERT(cmid
->context
== (void *)conn
);
2930 LASSERT(conn
->ibc_cmid
== cmid
);
2932 rc
= rdma_connect(cmid
, &cp
);
2934 CERROR("Can't connect to %s: %d\n",
2935 libcfs_nid2str(peer
->ibp_nid
), rc
);
2936 kiblnd_connreq_done(conn
, rc
);
2937 kiblnd_conn_decref(conn
);
2944 kiblnd_cm_callback(struct rdma_cm_id
*cmid
, struct rdma_cm_event
*event
)
2950 switch (event
->event
) {
2952 CERROR("Unexpected event: %d, status: %d\n",
2953 event
->event
, event
->status
);
2956 case RDMA_CM_EVENT_CONNECT_REQUEST
:
2957 /* destroy cmid on failure */
2958 rc
= kiblnd_passive_connect(cmid
,
2959 (void *)KIBLND_CONN_PARAM(event
),
2960 KIBLND_CONN_PARAM_LEN(event
));
2961 CDEBUG(D_NET
, "connreq: %d\n", rc
);
2964 case RDMA_CM_EVENT_ADDR_ERROR
:
2965 peer
= (kib_peer_t
*)cmid
->context
;
2966 CNETERR("%s: ADDR ERROR %d\n",
2967 libcfs_nid2str(peer
->ibp_nid
), event
->status
);
2968 kiblnd_peer_connect_failed(peer
, 1, -EHOSTUNREACH
);
2969 kiblnd_peer_decref(peer
);
2970 return -EHOSTUNREACH
; /* rc destroys cmid */
2972 case RDMA_CM_EVENT_ADDR_RESOLVED
:
2973 peer
= (kib_peer_t
*)cmid
->context
;
2975 CDEBUG(D_NET
, "%s Addr resolved: %d\n",
2976 libcfs_nid2str(peer
->ibp_nid
), event
->status
);
2978 if (event
->status
) {
2979 CNETERR("Can't resolve address for %s: %d\n",
2980 libcfs_nid2str(peer
->ibp_nid
), event
->status
);
2983 rc
= rdma_resolve_route(
2984 cmid
, *kiblnd_tunables
.kib_timeout
* 1000);
2987 /* Can't initiate route resolution */
2988 CERROR("Can't resolve route for %s: %d\n",
2989 libcfs_nid2str(peer
->ibp_nid
), rc
);
2991 kiblnd_peer_connect_failed(peer
, 1, rc
);
2992 kiblnd_peer_decref(peer
);
2993 return rc
; /* rc destroys cmid */
2995 case RDMA_CM_EVENT_ROUTE_ERROR
:
2996 peer
= (kib_peer_t
*)cmid
->context
;
2997 CNETERR("%s: ROUTE ERROR %d\n",
2998 libcfs_nid2str(peer
->ibp_nid
), event
->status
);
2999 kiblnd_peer_connect_failed(peer
, 1, -EHOSTUNREACH
);
3000 kiblnd_peer_decref(peer
);
3001 return -EHOSTUNREACH
; /* rc destroys cmid */
3003 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
3004 peer
= (kib_peer_t
*)cmid
->context
;
3005 CDEBUG(D_NET
, "%s Route resolved: %d\n",
3006 libcfs_nid2str(peer
->ibp_nid
), event
->status
);
3009 return kiblnd_active_connect(cmid
);
3011 CNETERR("Can't resolve route for %s: %d\n",
3012 libcfs_nid2str(peer
->ibp_nid
), event
->status
);
3013 kiblnd_peer_connect_failed(peer
, 1, event
->status
);
3014 kiblnd_peer_decref(peer
);
3015 return event
->status
; /* rc destroys cmid */
3017 case RDMA_CM_EVENT_UNREACHABLE
:
3018 conn
= (kib_conn_t
*)cmid
->context
;
3019 LASSERT(conn
->ibc_state
== IBLND_CONN_ACTIVE_CONNECT
||
3020 conn
->ibc_state
== IBLND_CONN_PASSIVE_WAIT
);
3021 CNETERR("%s: UNREACHABLE %d\n",
3022 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), event
->status
);
3023 kiblnd_connreq_done(conn
, -ENETDOWN
);
3024 kiblnd_conn_decref(conn
);
3027 case RDMA_CM_EVENT_CONNECT_ERROR
:
3028 conn
= (kib_conn_t
*)cmid
->context
;
3029 LASSERT(conn
->ibc_state
== IBLND_CONN_ACTIVE_CONNECT
||
3030 conn
->ibc_state
== IBLND_CONN_PASSIVE_WAIT
);
3031 CNETERR("%s: CONNECT ERROR %d\n",
3032 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), event
->status
);
3033 kiblnd_connreq_done(conn
, -ENOTCONN
);
3034 kiblnd_conn_decref(conn
);
3037 case RDMA_CM_EVENT_REJECTED
:
3038 conn
= (kib_conn_t
*)cmid
->context
;
3039 switch (conn
->ibc_state
) {
3043 case IBLND_CONN_PASSIVE_WAIT
:
3044 CERROR("%s: REJECTED %d\n",
3045 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
),
3047 kiblnd_connreq_done(conn
, -ECONNRESET
);
3050 case IBLND_CONN_ACTIVE_CONNECT
:
3051 kiblnd_rejected(conn
, event
->status
,
3052 (void *)KIBLND_CONN_PARAM(event
),
3053 KIBLND_CONN_PARAM_LEN(event
));
3056 kiblnd_conn_decref(conn
);
3059 case RDMA_CM_EVENT_ESTABLISHED
:
3060 conn
= (kib_conn_t
*)cmid
->context
;
3061 switch (conn
->ibc_state
) {
3065 case IBLND_CONN_PASSIVE_WAIT
:
3066 CDEBUG(D_NET
, "ESTABLISHED (passive): %s\n",
3067 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
3068 kiblnd_connreq_done(conn
, 0);
3071 case IBLND_CONN_ACTIVE_CONNECT
:
3072 CDEBUG(D_NET
, "ESTABLISHED(active): %s\n",
3073 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
3074 kiblnd_check_connreply(conn
,
3075 (void *)KIBLND_CONN_PARAM(event
),
3076 KIBLND_CONN_PARAM_LEN(event
));
3079 /* net keeps its ref on conn! */
3082 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
3083 CDEBUG(D_NET
, "Ignore TIMEWAIT_EXIT event\n");
3085 case RDMA_CM_EVENT_DISCONNECTED
:
3086 conn
= (kib_conn_t
*)cmid
->context
;
3087 if (conn
->ibc_state
< IBLND_CONN_ESTABLISHED
) {
3088 CERROR("%s DISCONNECTED\n",
3089 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
3090 kiblnd_connreq_done(conn
, -ECONNRESET
);
3092 kiblnd_close_conn(conn
, 0);
3094 kiblnd_conn_decref(conn
);
3095 cmid
->context
= NULL
;
3098 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
3099 LCONSOLE_ERROR_MSG(0x131,
3100 "Received notification of device removal\n"
3101 "Please shutdown LNET to allow this to proceed\n");
3103 * Can't remove network from underneath LNET for now, so I have
3108 case RDMA_CM_EVENT_ADDR_CHANGE
:
3109 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3115 kiblnd_check_txs_locked(kib_conn_t
*conn
, struct list_head
*txs
)
3118 struct list_head
*ttmp
;
3120 list_for_each(ttmp
, txs
) {
3121 tx
= list_entry(ttmp
, kib_tx_t
, tx_list
);
3123 if (txs
!= &conn
->ibc_active_txs
) {
3124 LASSERT(tx
->tx_queued
);
3126 LASSERT(!tx
->tx_queued
);
3127 LASSERT(tx
->tx_waiting
|| tx
->tx_sending
);
3130 if (cfs_time_aftereq(jiffies
, tx
->tx_deadline
)) {
3131 CERROR("Timed out tx: %s, %lu seconds\n",
3132 kiblnd_queue2str(conn
, txs
),
3133 cfs_duration_sec(jiffies
- tx
->tx_deadline
));
3142 kiblnd_conn_timed_out_locked(kib_conn_t
*conn
)
3144 return kiblnd_check_txs_locked(conn
, &conn
->ibc_tx_queue
) ||
3145 kiblnd_check_txs_locked(conn
, &conn
->ibc_tx_noops
) ||
3146 kiblnd_check_txs_locked(conn
, &conn
->ibc_tx_queue_rsrvd
) ||
3147 kiblnd_check_txs_locked(conn
, &conn
->ibc_tx_queue_nocred
) ||
3148 kiblnd_check_txs_locked(conn
, &conn
->ibc_active_txs
);
3152 kiblnd_check_conns(int idx
)
3155 LIST_HEAD(checksends
);
3156 struct list_head
*peers
= &kiblnd_data
.kib_peers
[idx
];
3157 struct list_head
*ptmp
;
3162 struct list_head
*ctmp
;
3163 unsigned long flags
;
3166 * NB. We expect to have a look at all the peers and not find any
3167 * RDMAs to time out, so we just use a shared lock while we
3170 read_lock_irqsave(&kiblnd_data
.kib_global_lock
, flags
);
3172 list_for_each(ptmp
, peers
) {
3173 peer
= list_entry(ptmp
, kib_peer_t
, ibp_list
);
3175 list_for_each(ctmp
, &peer
->ibp_conns
) {
3179 conn
= list_entry(ctmp
, kib_conn_t
, ibc_list
);
3181 LASSERT(conn
->ibc_state
== IBLND_CONN_ESTABLISHED
);
3183 spin_lock(&conn
->ibc_lock
);
3185 sendnoop
= kiblnd_need_noop(conn
);
3186 timedout
= kiblnd_conn_timed_out_locked(conn
);
3187 if (!sendnoop
&& !timedout
) {
3188 spin_unlock(&conn
->ibc_lock
);
3193 CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
3194 libcfs_nid2str(peer
->ibp_nid
),
3195 cfs_duration_sec(cfs_time_current() -
3196 peer
->ibp_last_alive
),
3198 conn
->ibc_outstanding_credits
,
3199 conn
->ibc_reserved_credits
);
3200 list_add(&conn
->ibc_connd_list
, &closes
);
3202 list_add(&conn
->ibc_connd_list
, &checksends
);
3204 /* +ref for 'closes' or 'checksends' */
3205 kiblnd_conn_addref(conn
);
3207 spin_unlock(&conn
->ibc_lock
);
3211 read_unlock_irqrestore(&kiblnd_data
.kib_global_lock
, flags
);
3214 * Handle timeout by closing the whole
3215 * connection. We can only be sure RDMA activity
3216 * has ceased once the QP has been modified.
3218 list_for_each_entry_safe(conn
, tmp
, &closes
, ibc_connd_list
) {
3219 list_del(&conn
->ibc_connd_list
);
3220 kiblnd_close_conn(conn
, -ETIMEDOUT
);
3221 kiblnd_conn_decref(conn
);
3225 * In case we have enough credits to return via a
3226 * NOOP, but there were no non-blocking tx descs
3227 * free to do it last time...
3229 list_for_each_entry_safe(conn
, temp
, &checksends
, ibc_connd_list
) {
3230 list_del(&conn
->ibc_connd_list
);
3231 kiblnd_check_sends(conn
);
3232 kiblnd_conn_decref(conn
);
3237 kiblnd_disconnect_conn(kib_conn_t
*conn
)
3239 LASSERT(!in_interrupt());
3240 LASSERT(current
== kiblnd_data
.kib_connd
);
3241 LASSERT(conn
->ibc_state
== IBLND_CONN_CLOSING
);
3243 rdma_disconnect(conn
->ibc_cmid
);
3244 kiblnd_finalise_conn(conn
);
3246 kiblnd_peer_notify(conn
->ibc_peer
);
3250 * High-water for reconnection to the same peer, reconnection attempt should
3251 * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3253 #define KIB_RECONN_HIGH_RACE 10
3255 * Allow connd to take a break and handle other things after consecutive
3256 * reconnection attemps.
3258 #define KIB_RECONN_BREAK 100
3261 kiblnd_connd(void *arg
)
3263 spinlock_t
*lock
= &kiblnd_data
.kib_connd_lock
;
3265 unsigned long flags
;
3271 unsigned long deadline
= jiffies
;
3273 cfs_block_allsigs();
3275 init_waitqueue_entry(&wait
, current
);
3276 kiblnd_data
.kib_connd
= current
;
3278 spin_lock_irqsave(lock
, flags
);
3280 while (!kiblnd_data
.kib_shutdown
) {
3285 if (!list_empty(&kiblnd_data
.kib_connd_zombies
)) {
3286 kib_peer_t
*peer
= NULL
;
3288 conn
= list_entry(kiblnd_data
.kib_connd_zombies
.next
,
3289 kib_conn_t
, ibc_list
);
3290 list_del(&conn
->ibc_list
);
3291 if (conn
->ibc_reconnect
) {
3292 peer
= conn
->ibc_peer
;
3293 kiblnd_peer_addref(peer
);
3296 spin_unlock_irqrestore(lock
, flags
);
3299 kiblnd_destroy_conn(conn
, !peer
);
3301 spin_lock_irqsave(lock
, flags
);
3305 conn
->ibc_peer
= peer
;
3306 if (peer
->ibp_reconnected
< KIB_RECONN_HIGH_RACE
)
3307 list_add_tail(&conn
->ibc_list
,
3308 &kiblnd_data
.kib_reconn_list
);
3310 list_add_tail(&conn
->ibc_list
,
3311 &kiblnd_data
.kib_reconn_wait
);
3314 if (!list_empty(&kiblnd_data
.kib_connd_conns
)) {
3315 conn
= list_entry(kiblnd_data
.kib_connd_conns
.next
,
3316 kib_conn_t
, ibc_list
);
3317 list_del(&conn
->ibc_list
);
3319 spin_unlock_irqrestore(lock
, flags
);
3322 kiblnd_disconnect_conn(conn
);
3323 kiblnd_conn_decref(conn
);
3325 spin_lock_irqsave(lock
, flags
);
3328 while (reconn
< KIB_RECONN_BREAK
) {
3329 if (kiblnd_data
.kib_reconn_sec
!=
3330 ktime_get_real_seconds()) {
3331 kiblnd_data
.kib_reconn_sec
= ktime_get_real_seconds();
3332 list_splice_init(&kiblnd_data
.kib_reconn_wait
,
3333 &kiblnd_data
.kib_reconn_list
);
3336 if (list_empty(&kiblnd_data
.kib_reconn_list
))
3339 conn
= list_entry(kiblnd_data
.kib_reconn_list
.next
,
3340 kib_conn_t
, ibc_list
);
3341 list_del(&conn
->ibc_list
);
3343 spin_unlock_irqrestore(lock
, flags
);
3346 reconn
+= kiblnd_reconnect_peer(conn
->ibc_peer
);
3347 kiblnd_peer_decref(conn
->ibc_peer
);
3348 LIBCFS_FREE(conn
, sizeof(*conn
));
3350 spin_lock_irqsave(lock
, flags
);
3353 /* careful with the jiffy wrap... */
3354 timeout
= (int)(deadline
- jiffies
);
3358 int chunk
= kiblnd_data
.kib_peer_hash_size
;
3360 spin_unlock_irqrestore(lock
, flags
);
3364 * Time to check for RDMA timeouts on a few more
3365 * peers: I do checks every 'p' seconds on a
3366 * proportion of the peer table and I need to check
3367 * every connection 'n' times within a timeout
3368 * interval, to ensure I detect a timeout on any
3369 * connection within (n+1)/n times the timeout
3372 if (*kiblnd_tunables
.kib_timeout
> n
* p
)
3373 chunk
= (chunk
* n
* p
) /
3374 *kiblnd_tunables
.kib_timeout
;
3378 for (i
= 0; i
< chunk
; i
++) {
3379 kiblnd_check_conns(peer_index
);
3380 peer_index
= (peer_index
+ 1) %
3381 kiblnd_data
.kib_peer_hash_size
;
3384 deadline
+= msecs_to_jiffies(p
* MSEC_PER_SEC
);
3385 spin_lock_irqsave(lock
, flags
);
3391 /* Nothing to do for 'timeout' */
3392 set_current_state(TASK_INTERRUPTIBLE
);
3393 add_wait_queue(&kiblnd_data
.kib_connd_waitq
, &wait
);
3394 spin_unlock_irqrestore(lock
, flags
);
3396 schedule_timeout(timeout
);
3398 remove_wait_queue(&kiblnd_data
.kib_connd_waitq
, &wait
);
3399 spin_lock_irqsave(lock
, flags
);
3402 spin_unlock_irqrestore(lock
, flags
);
3404 kiblnd_thread_fini();
3409 kiblnd_qp_event(struct ib_event
*event
, void *arg
)
3411 kib_conn_t
*conn
= arg
;
3413 switch (event
->event
) {
3414 case IB_EVENT_COMM_EST
:
3415 CDEBUG(D_NET
, "%s established\n",
3416 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
));
3420 CERROR("%s: Async QP event type %d\n",
3421 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), event
->event
);
3427 kiblnd_complete(struct ib_wc
*wc
)
3429 switch (kiblnd_wreqid2type(wc
->wr_id
)) {
3433 case IBLND_WID_RDMA
:
3435 * We only get RDMA completion notification if it fails. All
3436 * subsequent work items, including the final SEND will fail
3437 * too. However we can't print out any more info about the
3438 * failing RDMA because 'tx' might be back on the idle list or
3439 * even reused already if we didn't manage to post all our work
3442 CNETERR("RDMA (tx: %p) failed: %d\n",
3443 kiblnd_wreqid2ptr(wc
->wr_id
), wc
->status
);
3447 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc
->wr_id
), wc
->status
);
3451 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc
->wr_id
), wc
->status
,
3458 kiblnd_cq_completion(struct ib_cq
*cq
, void *arg
)
3461 * NB I'm not allowed to schedule this conn once its refcount has
3462 * reached 0. Since fundamentally I'm racing with scheduler threads
3463 * consuming my CQ I could be called after all completions have
3464 * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted
3465 * and this CQ is about to be destroyed so I NOOP.
3467 kib_conn_t
*conn
= arg
;
3468 struct kib_sched_info
*sched
= conn
->ibc_sched
;
3469 unsigned long flags
;
3471 LASSERT(cq
== conn
->ibc_cq
);
3473 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3475 conn
->ibc_ready
= 1;
3477 if (!conn
->ibc_scheduled
&&
3478 (conn
->ibc_nrx
> 0 ||
3479 conn
->ibc_nsends_posted
> 0)) {
3480 kiblnd_conn_addref(conn
); /* +1 ref for sched_conns */
3481 conn
->ibc_scheduled
= 1;
3482 list_add_tail(&conn
->ibc_sched_list
, &sched
->ibs_conns
);
3484 if (waitqueue_active(&sched
->ibs_waitq
))
3485 wake_up(&sched
->ibs_waitq
);
3488 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
3492 kiblnd_cq_event(struct ib_event
*event
, void *arg
)
3494 kib_conn_t
*conn
= arg
;
3496 CERROR("%s: async CQ event type %d\n",
3497 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), event
->event
);
3501 kiblnd_scheduler(void *arg
)
3503 long id
= (long)arg
;
3504 struct kib_sched_info
*sched
;
3507 unsigned long flags
;
3513 cfs_block_allsigs();
3515 init_waitqueue_entry(&wait
, current
);
3517 sched
= kiblnd_data
.kib_scheds
[KIB_THREAD_CPT(id
)];
3519 rc
= cfs_cpt_bind(lnet_cpt_table(), sched
->ibs_cpt
);
3521 CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
3525 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3527 while (!kiblnd_data
.kib_shutdown
) {
3528 if (busy_loops
++ >= IBLND_RESCHED
) {
3529 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
3534 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3539 if (!list_empty(&sched
->ibs_conns
)) {
3540 conn
= list_entry(sched
->ibs_conns
.next
, kib_conn_t
,
3542 /* take over kib_sched_conns' ref on conn... */
3543 LASSERT(conn
->ibc_scheduled
);
3544 list_del(&conn
->ibc_sched_list
);
3545 conn
->ibc_ready
= 0;
3547 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
3549 wc
.wr_id
= IBLND_WID_INVAL
;
3551 rc
= ib_poll_cq(conn
->ibc_cq
, 1, &wc
);
3553 rc
= ib_req_notify_cq(conn
->ibc_cq
,
3556 CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
3557 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
), rc
);
3558 kiblnd_close_conn(conn
, -EIO
);
3559 kiblnd_conn_decref(conn
);
3560 spin_lock_irqsave(&sched
->ibs_lock
,
3565 rc
= ib_poll_cq(conn
->ibc_cq
, 1, &wc
);
3568 if (unlikely(rc
> 0 && wc
.wr_id
== IBLND_WID_INVAL
)) {
3569 LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n",
3570 rc
, wc
.opcode
, wc
.status
,
3572 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
),
3578 CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
3579 libcfs_nid2str(conn
->ibc_peer
->ibp_nid
),
3581 kiblnd_close_conn(conn
, -EIO
);
3582 kiblnd_conn_decref(conn
);
3583 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3587 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3589 if (rc
|| conn
->ibc_ready
) {
3591 * There may be another completion waiting; get
3592 * another scheduler to check while I handle
3595 /* +1 ref for sched_conns */
3596 kiblnd_conn_addref(conn
);
3597 list_add_tail(&conn
->ibc_sched_list
,
3599 if (waitqueue_active(&sched
->ibs_waitq
))
3600 wake_up(&sched
->ibs_waitq
);
3602 conn
->ibc_scheduled
= 0;
3606 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
3607 kiblnd_complete(&wc
);
3609 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3612 kiblnd_conn_decref(conn
); /* ...drop my ref from above */
3619 set_current_state(TASK_INTERRUPTIBLE
);
3620 add_wait_queue_exclusive(&sched
->ibs_waitq
, &wait
);
3621 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
3626 remove_wait_queue(&sched
->ibs_waitq
, &wait
);
3627 spin_lock_irqsave(&sched
->ibs_lock
, flags
);
3630 spin_unlock_irqrestore(&sched
->ibs_lock
, flags
);
3632 kiblnd_thread_fini();
3637 kiblnd_failover_thread(void *arg
)
3639 rwlock_t
*glock
= &kiblnd_data
.kib_global_lock
;
3642 unsigned long flags
;
3645 LASSERT(*kiblnd_tunables
.kib_dev_failover
);
3647 cfs_block_allsigs();
3649 init_waitqueue_entry(&wait
, current
);
3650 write_lock_irqsave(glock
, flags
);
3652 while (!kiblnd_data
.kib_shutdown
) {
3653 int do_failover
= 0;
3656 list_for_each_entry(dev
, &kiblnd_data
.kib_failed_devs
,
3658 if (time_before(cfs_time_current(),
3659 dev
->ibd_next_failover
))
3666 list_del_init(&dev
->ibd_fail_list
);
3667 dev
->ibd_failover
= 1;
3668 write_unlock_irqrestore(glock
, flags
);
3670 rc
= kiblnd_dev_failover(dev
);
3672 write_lock_irqsave(glock
, flags
);
3674 LASSERT(dev
->ibd_failover
);
3675 dev
->ibd_failover
= 0;
3676 if (rc
>= 0) { /* Device is OK or failover succeed */
3677 dev
->ibd_next_failover
= cfs_time_shift(3);
3681 /* failed to failover, retry later */
3682 dev
->ibd_next_failover
=
3683 cfs_time_shift(min(dev
->ibd_failed_failover
, 10));
3684 if (kiblnd_dev_can_failover(dev
)) {
3685 list_add_tail(&dev
->ibd_fail_list
,
3686 &kiblnd_data
.kib_failed_devs
);
3692 /* long sleep if no more pending failover */
3693 long_sleep
= list_empty(&kiblnd_data
.kib_failed_devs
);
3695 set_current_state(TASK_INTERRUPTIBLE
);
3696 add_wait_queue(&kiblnd_data
.kib_failover_waitq
, &wait
);
3697 write_unlock_irqrestore(glock
, flags
);
3699 rc
= schedule_timeout(long_sleep
? cfs_time_seconds(10) :
3700 cfs_time_seconds(1));
3701 remove_wait_queue(&kiblnd_data
.kib_failover_waitq
, &wait
);
3702 write_lock_irqsave(glock
, flags
);
3704 if (!long_sleep
|| rc
)
3708 * have a long sleep, routine check all active devices,
3709 * we need checking like this because if there is not active
3710 * connection on the dev and no SEND from local, we may listen
3711 * on wrong HCA for ever while there is a bonding failover
3713 list_for_each_entry(dev
, &kiblnd_data
.kib_devs
, ibd_list
) {
3714 if (kiblnd_dev_can_failover(dev
)) {
3715 list_add_tail(&dev
->ibd_fail_list
,
3716 &kiblnd_data
.kib_failed_devs
);
3721 write_unlock_irqrestore(glock
, flags
);
3723 kiblnd_thread_fini();