2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/svc_xprt.h>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
53 struct sockaddr
*sa
, int salen
,
55 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
);
56 static void svc_rdma_release_rqst(struct svc_rqst
*);
57 static void dto_tasklet_func(unsigned long data
);
58 static void svc_rdma_detach(struct svc_xprt
*xprt
);
59 static void svc_rdma_free(struct svc_xprt
*xprt
);
60 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
);
61 static void rq_cq_reap(struct svcxprt_rdma
*xprt
);
62 static void sq_cq_reap(struct svcxprt_rdma
*xprt
);
64 DECLARE_TASKLET(dto_tasklet
, dto_tasklet_func
, 0UL);
65 static DEFINE_SPINLOCK(dto_lock
);
66 static LIST_HEAD(dto_xprt_q
);
68 static struct svc_xprt_ops svc_rdma_ops
= {
69 .xpo_create
= svc_rdma_create
,
70 .xpo_recvfrom
= svc_rdma_recvfrom
,
71 .xpo_sendto
= svc_rdma_sendto
,
72 .xpo_release_rqst
= svc_rdma_release_rqst
,
73 .xpo_detach
= svc_rdma_detach
,
74 .xpo_free
= svc_rdma_free
,
75 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
76 .xpo_has_wspace
= svc_rdma_has_wspace
,
77 .xpo_accept
= svc_rdma_accept
,
80 struct svc_xprt_class svc_rdma_class
= {
82 .xcl_owner
= THIS_MODULE
,
83 .xcl_ops
= &svc_rdma_ops
,
84 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_TCP
,
87 /* WR context cache. Created in svc_rdma.c */
88 extern struct kmem_cache
*svc_rdma_ctxt_cachep
;
90 struct svc_rdma_op_ctxt
*svc_rdma_get_context(struct svcxprt_rdma
*xprt
)
92 struct svc_rdma_op_ctxt
*ctxt
;
95 ctxt
= kmem_cache_alloc(svc_rdma_ctxt_cachep
, GFP_KERNEL
);
98 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
101 INIT_LIST_HEAD(&ctxt
->dto_q
);
103 atomic_inc(&xprt
->sc_ctxt_used
);
107 static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt
*ctxt
)
109 struct svcxprt_rdma
*xprt
= ctxt
->xprt
;
111 for (i
= 0; i
< ctxt
->count
&& ctxt
->sge
[i
].length
; i
++) {
112 atomic_dec(&xprt
->sc_dma_used
);
113 ib_dma_unmap_single(xprt
->sc_cm_id
->device
,
120 void svc_rdma_put_context(struct svc_rdma_op_ctxt
*ctxt
, int free_pages
)
122 struct svcxprt_rdma
*xprt
;
128 for (i
= 0; i
< ctxt
->count
; i
++)
129 put_page(ctxt
->pages
[i
]);
131 kmem_cache_free(svc_rdma_ctxt_cachep
, ctxt
);
132 atomic_dec(&xprt
->sc_ctxt_used
);
135 /* Temporary NFS request map cache. Created in svc_rdma.c */
136 extern struct kmem_cache
*svc_rdma_map_cachep
;
139 * Temporary NFS req mappings are shared across all transport
140 * instances. These are short lived and should be bounded by the number
141 * of concurrent server threads * depth of the SQ.
143 struct svc_rdma_req_map
*svc_rdma_get_req_map(void)
145 struct svc_rdma_req_map
*map
;
147 map
= kmem_cache_alloc(svc_rdma_map_cachep
, GFP_KERNEL
);
150 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
156 void svc_rdma_put_req_map(struct svc_rdma_req_map
*map
)
158 kmem_cache_free(svc_rdma_map_cachep
, map
);
161 /* ib_cq event handler */
162 static void cq_event_handler(struct ib_event
*event
, void *context
)
164 struct svc_xprt
*xprt
= context
;
165 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
166 event
->event
, context
);
167 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
170 /* QP event handler */
171 static void qp_event_handler(struct ib_event
*event
, void *context
)
173 struct svc_xprt
*xprt
= context
;
175 switch (event
->event
) {
176 /* These are considered benign events */
177 case IB_EVENT_PATH_MIG
:
178 case IB_EVENT_COMM_EST
:
179 case IB_EVENT_SQ_DRAINED
:
180 case IB_EVENT_QP_LAST_WQE_REACHED
:
181 dprintk("svcrdma: QP event %d received for QP=%p\n",
182 event
->event
, event
->element
.qp
);
184 /* These are considered fatal events */
185 case IB_EVENT_PATH_MIG_ERR
:
186 case IB_EVENT_QP_FATAL
:
187 case IB_EVENT_QP_REQ_ERR
:
188 case IB_EVENT_QP_ACCESS_ERR
:
189 case IB_EVENT_DEVICE_FATAL
:
191 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
192 "closing transport\n",
193 event
->event
, event
->element
.qp
);
194 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
200 * Data Transfer Operation Tasklet
202 * Walks a list of transports with I/O pending, removing entries as
203 * they are added to the server's I/O pending list. Two bits indicate
204 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
205 * spinlock that serializes access to the transport list with the RQ
206 * and SQ interrupt handlers.
208 static void dto_tasklet_func(unsigned long data
)
210 struct svcxprt_rdma
*xprt
;
213 spin_lock_irqsave(&dto_lock
, flags
);
214 while (!list_empty(&dto_xprt_q
)) {
215 xprt
= list_entry(dto_xprt_q
.next
,
216 struct svcxprt_rdma
, sc_dto_q
);
217 list_del_init(&xprt
->sc_dto_q
);
218 spin_unlock_irqrestore(&dto_lock
, flags
);
223 svc_xprt_put(&xprt
->sc_xprt
);
224 spin_lock_irqsave(&dto_lock
, flags
);
226 spin_unlock_irqrestore(&dto_lock
, flags
);
230 * Receive Queue Completion Handler
232 * Since an RQ completion handler is called on interrupt context, we
233 * need to defer the handling of the I/O to a tasklet
235 static void rq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
237 struct svcxprt_rdma
*xprt
= cq_context
;
240 /* Guard against unconditional flush call for destroyed QP */
241 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
245 * Set the bit regardless of whether or not it's on the list
246 * because it may be on the list already due to an SQ
249 set_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
);
252 * If this transport is not already on the DTO transport queue,
255 spin_lock_irqsave(&dto_lock
, flags
);
256 if (list_empty(&xprt
->sc_dto_q
)) {
257 svc_xprt_get(&xprt
->sc_xprt
);
258 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
260 spin_unlock_irqrestore(&dto_lock
, flags
);
262 /* Tasklet does all the work to avoid irqsave locks. */
263 tasklet_schedule(&dto_tasklet
);
267 * rq_cq_reap - Process the RQ CQ.
269 * Take all completing WC off the CQE and enqueue the associated DTO
270 * context on the dto_q for the transport.
272 * Note that caller must hold a transport reference.
274 static void rq_cq_reap(struct svcxprt_rdma
*xprt
)
278 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
280 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
))
283 ib_req_notify_cq(xprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
284 atomic_inc(&rdma_stat_rq_poll
);
286 while ((ret
= ib_poll_cq(xprt
->sc_rq_cq
, 1, &wc
)) > 0) {
287 ctxt
= (struct svc_rdma_op_ctxt
*)(unsigned long)wc
.wr_id
;
288 ctxt
->wc_status
= wc
.status
;
289 ctxt
->byte_len
= wc
.byte_len
;
290 svc_rdma_unmap_dma(ctxt
);
291 if (wc
.status
!= IB_WC_SUCCESS
) {
292 /* Close the transport */
293 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt
);
294 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
295 svc_rdma_put_context(ctxt
, 1);
296 svc_xprt_put(&xprt
->sc_xprt
);
299 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
300 list_add_tail(&ctxt
->dto_q
, &xprt
->sc_rq_dto_q
);
301 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
302 svc_xprt_put(&xprt
->sc_xprt
);
306 atomic_inc(&rdma_stat_rq_prod
);
308 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
310 * If data arrived before established event,
311 * don't enqueue. This defers RPC I/O until the
312 * RDMA connection is complete.
314 if (!test_bit(RDMAXPRT_CONN_PENDING
, &xprt
->sc_flags
))
315 svc_xprt_enqueue(&xprt
->sc_xprt
);
319 * Send Queue Completion Handler - potentially called on interrupt context.
321 * Note that caller must hold a transport reference.
323 static void sq_cq_reap(struct svcxprt_rdma
*xprt
)
325 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
327 struct ib_cq
*cq
= xprt
->sc_sq_cq
;
331 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
))
334 ib_req_notify_cq(xprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
335 atomic_inc(&rdma_stat_sq_poll
);
336 while ((ret
= ib_poll_cq(cq
, 1, &wc
)) > 0) {
337 ctxt
= (struct svc_rdma_op_ctxt
*)(unsigned long)wc
.wr_id
;
340 svc_rdma_unmap_dma(ctxt
);
341 if (wc
.status
!= IB_WC_SUCCESS
)
342 /* Close the transport */
343 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
345 /* Decrement used SQ WR count */
346 atomic_dec(&xprt
->sc_sq_count
);
347 wake_up(&xprt
->sc_send_wait
);
349 switch (ctxt
->wr_op
) {
351 svc_rdma_put_context(ctxt
, 1);
354 case IB_WR_RDMA_WRITE
:
355 svc_rdma_put_context(ctxt
, 0);
358 case IB_WR_RDMA_READ
:
359 if (test_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
)) {
360 struct svc_rdma_op_ctxt
*read_hdr
= ctxt
->read_hdr
;
362 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
363 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
364 list_add_tail(&read_hdr
->dto_q
,
365 &xprt
->sc_read_complete_q
);
366 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
367 svc_xprt_enqueue(&xprt
->sc_xprt
);
369 svc_rdma_put_context(ctxt
, 0);
373 printk(KERN_ERR
"svcrdma: unexpected completion type, "
374 "opcode=%d, status=%d\n",
375 wc
.opcode
, wc
.status
);
378 svc_xprt_put(&xprt
->sc_xprt
);
382 atomic_inc(&rdma_stat_sq_prod
);
385 static void sq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
387 struct svcxprt_rdma
*xprt
= cq_context
;
390 /* Guard against unconditional flush call for destroyed QP */
391 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
395 * Set the bit regardless of whether or not it's on the list
396 * because it may be on the list already due to an RQ
399 set_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
);
402 * If this transport is not already on the DTO transport queue,
405 spin_lock_irqsave(&dto_lock
, flags
);
406 if (list_empty(&xprt
->sc_dto_q
)) {
407 svc_xprt_get(&xprt
->sc_xprt
);
408 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
410 spin_unlock_irqrestore(&dto_lock
, flags
);
412 /* Tasklet does all the work to avoid irqsave locks. */
413 tasklet_schedule(&dto_tasklet
);
416 static struct svcxprt_rdma
*rdma_create_xprt(struct svc_serv
*serv
,
419 struct svcxprt_rdma
*cma_xprt
= kzalloc(sizeof *cma_xprt
, GFP_KERNEL
);
423 svc_xprt_init(&svc_rdma_class
, &cma_xprt
->sc_xprt
, serv
);
424 INIT_LIST_HEAD(&cma_xprt
->sc_accept_q
);
425 INIT_LIST_HEAD(&cma_xprt
->sc_dto_q
);
426 INIT_LIST_HEAD(&cma_xprt
->sc_rq_dto_q
);
427 INIT_LIST_HEAD(&cma_xprt
->sc_read_complete_q
);
428 init_waitqueue_head(&cma_xprt
->sc_send_wait
);
430 spin_lock_init(&cma_xprt
->sc_lock
);
431 spin_lock_init(&cma_xprt
->sc_rq_dto_lock
);
433 cma_xprt
->sc_ord
= svcrdma_ord
;
435 cma_xprt
->sc_max_req_size
= svcrdma_max_req_size
;
436 cma_xprt
->sc_max_requests
= svcrdma_max_requests
;
437 cma_xprt
->sc_sq_depth
= svcrdma_max_requests
* RPCRDMA_SQ_DEPTH_MULT
;
438 atomic_set(&cma_xprt
->sc_sq_count
, 0);
439 atomic_set(&cma_xprt
->sc_ctxt_used
, 0);
442 set_bit(XPT_LISTENER
, &cma_xprt
->sc_xprt
.xpt_flags
);
447 struct page
*svc_rdma_get_page(void)
451 while ((page
= alloc_page(GFP_KERNEL
)) == NULL
) {
452 /* If we can't get memory, wait a bit and try again */
453 printk(KERN_INFO
"svcrdma: out of memory...retrying in 1000 "
455 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
460 int svc_rdma_post_recv(struct svcxprt_rdma
*xprt
)
462 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
463 struct svc_rdma_op_ctxt
*ctxt
;
470 ctxt
= svc_rdma_get_context(xprt
);
472 ctxt
->direction
= DMA_FROM_DEVICE
;
473 for (sge_no
= 0; buflen
< xprt
->sc_max_req_size
; sge_no
++) {
474 BUG_ON(sge_no
>= xprt
->sc_max_sge
);
475 page
= svc_rdma_get_page();
476 ctxt
->pages
[sge_no
] = page
;
477 atomic_inc(&xprt
->sc_dma_used
);
478 pa
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
481 ctxt
->sge
[sge_no
].addr
= pa
;
482 ctxt
->sge
[sge_no
].length
= PAGE_SIZE
;
483 ctxt
->sge
[sge_no
].lkey
= xprt
->sc_phys_mr
->lkey
;
486 ctxt
->count
= sge_no
;
488 recv_wr
.sg_list
= &ctxt
->sge
[0];
489 recv_wr
.num_sge
= ctxt
->count
;
490 recv_wr
.wr_id
= (u64
)(unsigned long)ctxt
;
492 svc_xprt_get(&xprt
->sc_xprt
);
493 ret
= ib_post_recv(xprt
->sc_qp
, &recv_wr
, &bad_recv_wr
);
495 svc_xprt_put(&xprt
->sc_xprt
);
496 svc_rdma_put_context(ctxt
, 1);
502 * This function handles the CONNECT_REQUEST event on a listening
503 * endpoint. It is passed the cma_id for the _new_ connection. The context in
504 * this cma_id is inherited from the listening cma_id and is the svc_xprt
505 * structure for the listening endpoint.
507 * This function creates a new xprt for the new connection and enqueues it on
508 * the accept queue for the listent xprt. When the listen thread is kicked, it
509 * will call the recvfrom method on the listen xprt which will accept the new
512 static void handle_connect_req(struct rdma_cm_id
*new_cma_id
, size_t client_ird
)
514 struct svcxprt_rdma
*listen_xprt
= new_cma_id
->context
;
515 struct svcxprt_rdma
*newxprt
;
518 /* Create a new transport */
519 newxprt
= rdma_create_xprt(listen_xprt
->sc_xprt
.xpt_server
, 0);
521 dprintk("svcrdma: failed to create new transport\n");
524 newxprt
->sc_cm_id
= new_cma_id
;
525 new_cma_id
->context
= newxprt
;
526 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
527 newxprt
, newxprt
->sc_cm_id
, listen_xprt
);
529 /* Save client advertised inbound read limit for use later in accept. */
530 newxprt
->sc_ord
= client_ird
;
532 /* Set the local and remote addresses in the transport */
533 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
534 svc_xprt_set_remote(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
535 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
536 svc_xprt_set_local(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
539 * Enqueue the new transport on the accept queue of the listening
542 spin_lock_bh(&listen_xprt
->sc_lock
);
543 list_add_tail(&newxprt
->sc_accept_q
, &listen_xprt
->sc_accept_q
);
544 spin_unlock_bh(&listen_xprt
->sc_lock
);
547 * Can't use svc_xprt_received here because we are not on a
550 set_bit(XPT_CONN
, &listen_xprt
->sc_xprt
.xpt_flags
);
551 svc_xprt_enqueue(&listen_xprt
->sc_xprt
);
555 * Handles events generated on the listening endpoint. These events will be
556 * either be incoming connect requests or adapter removal events.
558 static int rdma_listen_handler(struct rdma_cm_id
*cma_id
,
559 struct rdma_cm_event
*event
)
561 struct svcxprt_rdma
*xprt
= cma_id
->context
;
564 switch (event
->event
) {
565 case RDMA_CM_EVENT_CONNECT_REQUEST
:
566 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
567 "event=%d\n", cma_id
, cma_id
->context
, event
->event
);
568 handle_connect_req(cma_id
,
569 event
->param
.conn
.responder_resources
);
572 case RDMA_CM_EVENT_ESTABLISHED
:
573 /* Accept complete */
574 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
575 "cm_id=%p\n", xprt
, cma_id
);
578 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
579 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
582 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
586 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
587 "event=%d\n", cma_id
, event
->event
);
594 static int rdma_cma_handler(struct rdma_cm_id
*cma_id
,
595 struct rdma_cm_event
*event
)
597 struct svc_xprt
*xprt
= cma_id
->context
;
598 struct svcxprt_rdma
*rdma
=
599 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
600 switch (event
->event
) {
601 case RDMA_CM_EVENT_ESTABLISHED
:
602 /* Accept complete */
604 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
605 "cm_id=%p\n", xprt
, cma_id
);
606 clear_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
);
607 svc_xprt_enqueue(xprt
);
609 case RDMA_CM_EVENT_DISCONNECTED
:
610 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
613 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
614 svc_xprt_enqueue(xprt
);
618 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
619 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
620 "event=%d\n", cma_id
, xprt
, event
->event
);
622 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
623 svc_xprt_enqueue(xprt
);
627 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
628 "event=%d\n", cma_id
, event
->event
);
635 * Create a listening RDMA service endpoint.
637 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
638 struct sockaddr
*sa
, int salen
,
641 struct rdma_cm_id
*listen_id
;
642 struct svcxprt_rdma
*cma_xprt
;
643 struct svc_xprt
*xprt
;
646 dprintk("svcrdma: Creating RDMA socket\n");
648 cma_xprt
= rdma_create_xprt(serv
, 1);
650 return ERR_PTR(-ENOMEM
);
651 xprt
= &cma_xprt
->sc_xprt
;
653 listen_id
= rdma_create_id(rdma_listen_handler
, cma_xprt
, RDMA_PS_TCP
);
654 if (IS_ERR(listen_id
)) {
655 ret
= PTR_ERR(listen_id
);
656 dprintk("svcrdma: rdma_create_id failed = %d\n", ret
);
660 ret
= rdma_bind_addr(listen_id
, sa
);
662 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret
);
665 cma_xprt
->sc_cm_id
= listen_id
;
667 ret
= rdma_listen(listen_id
, RPCRDMA_LISTEN_BACKLOG
);
669 dprintk("svcrdma: rdma_listen failed = %d\n", ret
);
674 * We need to use the address from the cm_id in case the
675 * caller specified 0 for the port number.
677 sa
= (struct sockaddr
*)&cma_xprt
->sc_cm_id
->route
.addr
.src_addr
;
678 svc_xprt_set_local(&cma_xprt
->sc_xprt
, sa
, salen
);
680 return &cma_xprt
->sc_xprt
;
683 rdma_destroy_id(listen_id
);
690 * This is the xpo_recvfrom function for listening endpoints. Its
691 * purpose is to accept incoming connections. The CMA callback handler
692 * has already created a new transport and attached it to the new CMA
695 * There is a queue of pending connections hung on the listening
696 * transport. This queue contains the new svc_xprt structure. This
697 * function takes svc_xprt structures off the accept_q and completes
700 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
)
702 struct svcxprt_rdma
*listen_rdma
;
703 struct svcxprt_rdma
*newxprt
= NULL
;
704 struct rdma_conn_param conn_param
;
705 struct ib_qp_init_attr qp_attr
;
706 struct ib_device_attr devattr
;
710 listen_rdma
= container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
711 clear_bit(XPT_CONN
, &xprt
->xpt_flags
);
712 /* Get the next entry off the accept list */
713 spin_lock_bh(&listen_rdma
->sc_lock
);
714 if (!list_empty(&listen_rdma
->sc_accept_q
)) {
715 newxprt
= list_entry(listen_rdma
->sc_accept_q
.next
,
716 struct svcxprt_rdma
, sc_accept_q
);
717 list_del_init(&newxprt
->sc_accept_q
);
719 if (!list_empty(&listen_rdma
->sc_accept_q
))
720 set_bit(XPT_CONN
, &listen_rdma
->sc_xprt
.xpt_flags
);
721 spin_unlock_bh(&listen_rdma
->sc_lock
);
725 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
726 newxprt
, newxprt
->sc_cm_id
);
728 ret
= ib_query_device(newxprt
->sc_cm_id
->device
, &devattr
);
730 dprintk("svcrdma: could not query device attributes on "
731 "device %p, rc=%d\n", newxprt
->sc_cm_id
->device
, ret
);
735 /* Qualify the transport resource defaults with the
736 * capabilities of this particular device */
737 newxprt
->sc_max_sge
= min((size_t)devattr
.max_sge
,
738 (size_t)RPCSVC_MAXPAGES
);
739 newxprt
->sc_max_requests
= min((size_t)devattr
.max_qp_wr
,
740 (size_t)svcrdma_max_requests
);
741 newxprt
->sc_sq_depth
= RPCRDMA_SQ_DEPTH_MULT
* newxprt
->sc_max_requests
;
744 * Limit ORD based on client limit, local device limit, and
745 * configured svcrdma limit.
747 newxprt
->sc_ord
= min_t(size_t, devattr
.max_qp_rd_atom
, newxprt
->sc_ord
);
748 newxprt
->sc_ord
= min_t(size_t, svcrdma_ord
, newxprt
->sc_ord
);
750 newxprt
->sc_pd
= ib_alloc_pd(newxprt
->sc_cm_id
->device
);
751 if (IS_ERR(newxprt
->sc_pd
)) {
752 dprintk("svcrdma: error creating PD for connect request\n");
755 newxprt
->sc_sq_cq
= ib_create_cq(newxprt
->sc_cm_id
->device
,
759 newxprt
->sc_sq_depth
,
761 if (IS_ERR(newxprt
->sc_sq_cq
)) {
762 dprintk("svcrdma: error creating SQ CQ for connect request\n");
765 newxprt
->sc_rq_cq
= ib_create_cq(newxprt
->sc_cm_id
->device
,
769 newxprt
->sc_max_requests
,
771 if (IS_ERR(newxprt
->sc_rq_cq
)) {
772 dprintk("svcrdma: error creating RQ CQ for connect request\n");
776 memset(&qp_attr
, 0, sizeof qp_attr
);
777 qp_attr
.event_handler
= qp_event_handler
;
778 qp_attr
.qp_context
= &newxprt
->sc_xprt
;
779 qp_attr
.cap
.max_send_wr
= newxprt
->sc_sq_depth
;
780 qp_attr
.cap
.max_recv_wr
= newxprt
->sc_max_requests
;
781 qp_attr
.cap
.max_send_sge
= newxprt
->sc_max_sge
;
782 qp_attr
.cap
.max_recv_sge
= newxprt
->sc_max_sge
;
783 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
784 qp_attr
.qp_type
= IB_QPT_RC
;
785 qp_attr
.send_cq
= newxprt
->sc_sq_cq
;
786 qp_attr
.recv_cq
= newxprt
->sc_rq_cq
;
787 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
788 " cm_id->device=%p, sc_pd->device=%p\n"
789 " cap.max_send_wr = %d\n"
790 " cap.max_recv_wr = %d\n"
791 " cap.max_send_sge = %d\n"
792 " cap.max_recv_sge = %d\n",
793 newxprt
->sc_cm_id
, newxprt
->sc_pd
,
794 newxprt
->sc_cm_id
->device
, newxprt
->sc_pd
->device
,
795 qp_attr
.cap
.max_send_wr
,
796 qp_attr
.cap
.max_recv_wr
,
797 qp_attr
.cap
.max_send_sge
,
798 qp_attr
.cap
.max_recv_sge
);
800 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
, &qp_attr
);
803 * XXX: This is a hack. We need a xx_request_qp interface
804 * that will adjust the qp_attr's with a best-effort
807 qp_attr
.cap
.max_send_sge
-= 2;
808 qp_attr
.cap
.max_recv_sge
-= 2;
809 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
,
812 dprintk("svcrdma: failed to create QP, ret=%d\n", ret
);
815 newxprt
->sc_max_sge
= qp_attr
.cap
.max_send_sge
;
816 newxprt
->sc_max_sge
= qp_attr
.cap
.max_recv_sge
;
817 newxprt
->sc_sq_depth
= qp_attr
.cap
.max_send_wr
;
818 newxprt
->sc_max_requests
= qp_attr
.cap
.max_recv_wr
;
820 newxprt
->sc_qp
= newxprt
->sc_cm_id
->qp
;
822 /* Register all of physical memory */
823 newxprt
->sc_phys_mr
= ib_get_dma_mr(newxprt
->sc_pd
,
824 IB_ACCESS_LOCAL_WRITE
|
825 IB_ACCESS_REMOTE_WRITE
);
826 if (IS_ERR(newxprt
->sc_phys_mr
)) {
827 dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret
);
831 /* Post receive buffers */
832 for (i
= 0; i
< newxprt
->sc_max_requests
; i
++) {
833 ret
= svc_rdma_post_recv(newxprt
);
835 dprintk("svcrdma: failure posting receive buffers\n");
840 /* Swap out the handler */
841 newxprt
->sc_cm_id
->event_handler
= rdma_cma_handler
;
844 * Arm the CQs for the SQ and RQ before accepting so we can't
845 * miss the first message
847 ib_req_notify_cq(newxprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
848 ib_req_notify_cq(newxprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
850 /* Accept Connection */
851 set_bit(RDMAXPRT_CONN_PENDING
, &newxprt
->sc_flags
);
852 memset(&conn_param
, 0, sizeof conn_param
);
853 conn_param
.responder_resources
= 0;
854 conn_param
.initiator_depth
= newxprt
->sc_ord
;
855 ret
= rdma_accept(newxprt
->sc_cm_id
, &conn_param
);
857 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
862 dprintk("svcrdma: new connection %p accepted with the following "
864 " local_ip : %d.%d.%d.%d\n"
866 " remote_ip : %d.%d.%d.%d\n"
867 " remote_port : %d\n"
870 " max_requests : %d\n"
873 NIPQUAD(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
874 route
.addr
.src_addr
)->sin_addr
.s_addr
),
875 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
876 route
.addr
.src_addr
)->sin_port
),
877 NIPQUAD(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
878 route
.addr
.dst_addr
)->sin_addr
.s_addr
),
879 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
880 route
.addr
.dst_addr
)->sin_port
),
882 newxprt
->sc_sq_depth
,
883 newxprt
->sc_max_requests
,
886 return &newxprt
->sc_xprt
;
889 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret
);
890 /* Take a reference in case the DTO handler runs */
891 svc_xprt_get(&newxprt
->sc_xprt
);
892 if (newxprt
->sc_qp
&& !IS_ERR(newxprt
->sc_qp
))
893 ib_destroy_qp(newxprt
->sc_qp
);
894 rdma_destroy_id(newxprt
->sc_cm_id
);
895 /* This call to put will destroy the transport */
896 svc_xprt_put(&newxprt
->sc_xprt
);
900 static void svc_rdma_release_rqst(struct svc_rqst
*rqstp
)
905 * When connected, an svc_xprt has at least two references:
907 * - A reference held by the cm_id between the ESTABLISHED and
908 * DISCONNECTED events. If the remote peer disconnected first, this
909 * reference could be gone.
911 * - A reference held by the svc_recv code that called this function
912 * as part of close processing.
914 * At a minimum one references should still be held.
916 static void svc_rdma_detach(struct svc_xprt
*xprt
)
918 struct svcxprt_rdma
*rdma
=
919 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
920 dprintk("svc: svc_rdma_detach(%p)\n", xprt
);
922 /* Disconnect and flush posted WQE */
923 rdma_disconnect(rdma
->sc_cm_id
);
926 static void __svc_rdma_free(struct work_struct
*work
)
928 struct svcxprt_rdma
*rdma
=
929 container_of(work
, struct svcxprt_rdma
, sc_work
);
930 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma
);
932 /* We should only be called from kref_put */
933 BUG_ON(atomic_read(&rdma
->sc_xprt
.xpt_ref
.refcount
) != 0);
936 * Destroy queued, but not processed read completions. Note
937 * that this cleanup has to be done before destroying the
938 * cm_id because the device ptr is needed to unmap the dma in
939 * svc_rdma_put_context.
941 while (!list_empty(&rdma
->sc_read_complete_q
)) {
942 struct svc_rdma_op_ctxt
*ctxt
;
943 ctxt
= list_entry(rdma
->sc_read_complete_q
.next
,
944 struct svc_rdma_op_ctxt
,
946 list_del_init(&ctxt
->dto_q
);
947 svc_rdma_put_context(ctxt
, 1);
950 /* Destroy queued, but not processed recv completions */
951 while (!list_empty(&rdma
->sc_rq_dto_q
)) {
952 struct svc_rdma_op_ctxt
*ctxt
;
953 ctxt
= list_entry(rdma
->sc_rq_dto_q
.next
,
954 struct svc_rdma_op_ctxt
,
956 list_del_init(&ctxt
->dto_q
);
957 svc_rdma_put_context(ctxt
, 1);
960 /* Warn if we leaked a resource or under-referenced */
961 WARN_ON(atomic_read(&rdma
->sc_ctxt_used
) != 0);
962 WARN_ON(atomic_read(&rdma
->sc_dma_used
) != 0);
964 /* Destroy the QP if present (not a listener) */
965 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
966 ib_destroy_qp(rdma
->sc_qp
);
968 if (rdma
->sc_sq_cq
&& !IS_ERR(rdma
->sc_sq_cq
))
969 ib_destroy_cq(rdma
->sc_sq_cq
);
971 if (rdma
->sc_rq_cq
&& !IS_ERR(rdma
->sc_rq_cq
))
972 ib_destroy_cq(rdma
->sc_rq_cq
);
974 if (rdma
->sc_phys_mr
&& !IS_ERR(rdma
->sc_phys_mr
))
975 ib_dereg_mr(rdma
->sc_phys_mr
);
977 if (rdma
->sc_pd
&& !IS_ERR(rdma
->sc_pd
))
978 ib_dealloc_pd(rdma
->sc_pd
);
980 /* Destroy the CM ID */
981 rdma_destroy_id(rdma
->sc_cm_id
);
986 static void svc_rdma_free(struct svc_xprt
*xprt
)
988 struct svcxprt_rdma
*rdma
=
989 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
990 INIT_WORK(&rdma
->sc_work
, __svc_rdma_free
);
991 schedule_work(&rdma
->sc_work
);
994 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
)
996 struct svcxprt_rdma
*rdma
=
997 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1000 * If there are fewer SQ WR available than required to send a
1001 * simple response, return false.
1003 if ((rdma
->sc_sq_depth
- atomic_read(&rdma
->sc_sq_count
) < 3))
1007 * ...or there are already waiters on the SQ,
1010 if (waitqueue_active(&rdma
->sc_send_wait
))
1013 /* Otherwise return true. */
1017 int svc_rdma_send(struct svcxprt_rdma
*xprt
, struct ib_send_wr
*wr
)
1019 struct ib_send_wr
*bad_wr
;
1022 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1025 BUG_ON(wr
->send_flags
!= IB_SEND_SIGNALED
);
1026 BUG_ON(((struct svc_rdma_op_ctxt
*)(unsigned long)wr
->wr_id
)->wr_op
!=
1028 /* If the SQ is full, wait until an SQ entry is available */
1030 spin_lock_bh(&xprt
->sc_lock
);
1031 if (xprt
->sc_sq_depth
== atomic_read(&xprt
->sc_sq_count
)) {
1032 spin_unlock_bh(&xprt
->sc_lock
);
1033 atomic_inc(&rdma_stat_sq_starve
);
1035 /* See if we can opportunistically reap SQ WR to make room */
1038 /* Wait until SQ WR available if SQ still full */
1039 wait_event(xprt
->sc_send_wait
,
1040 atomic_read(&xprt
->sc_sq_count
) <
1042 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1046 /* Bumped used SQ WR count and post */
1047 svc_xprt_get(&xprt
->sc_xprt
);
1048 ret
= ib_post_send(xprt
->sc_qp
, wr
, &bad_wr
);
1050 atomic_inc(&xprt
->sc_sq_count
);
1052 svc_xprt_put(&xprt
->sc_xprt
);
1053 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1054 "sc_sq_count=%d, sc_sq_depth=%d\n",
1055 ret
, atomic_read(&xprt
->sc_sq_count
),
1058 spin_unlock_bh(&xprt
->sc_lock
);
1064 void svc_rdma_send_error(struct svcxprt_rdma
*xprt
, struct rpcrdma_msg
*rmsgp
,
1065 enum rpcrdma_errcode err
)
1067 struct ib_send_wr err_wr
;
1070 struct svc_rdma_op_ctxt
*ctxt
;
1075 p
= svc_rdma_get_page();
1076 va
= page_address(p
);
1078 /* XDR encode error */
1079 length
= svc_rdma_xdr_encode_error(xprt
, rmsgp
, err
, va
);
1081 /* Prepare SGE for local address */
1082 atomic_inc(&xprt
->sc_dma_used
);
1083 sge
.addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
1084 p
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
1085 sge
.lkey
= xprt
->sc_phys_mr
->lkey
;
1086 sge
.length
= length
;
1088 ctxt
= svc_rdma_get_context(xprt
);
1092 /* Prepare SEND WR */
1093 memset(&err_wr
, 0, sizeof err_wr
);
1094 ctxt
->wr_op
= IB_WR_SEND
;
1095 err_wr
.wr_id
= (unsigned long)ctxt
;
1096 err_wr
.sg_list
= &sge
;
1098 err_wr
.opcode
= IB_WR_SEND
;
1099 err_wr
.send_flags
= IB_SEND_SIGNALED
;
1102 ret
= svc_rdma_send(xprt
, &err_wr
);
1104 dprintk("svcrdma: Error %d posting send for protocol error\n",
1106 svc_rdma_put_context(ctxt
, 1);