2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/svc_xprt.h>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
53 struct sockaddr
*sa
, int salen
,
55 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
);
56 static void svc_rdma_release_rqst(struct svc_rqst
*);
57 static void dto_tasklet_func(unsigned long data
);
58 static void svc_rdma_detach(struct svc_xprt
*xprt
);
59 static void svc_rdma_free(struct svc_xprt
*xprt
);
60 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
);
61 static void rq_cq_reap(struct svcxprt_rdma
*xprt
);
62 static void sq_cq_reap(struct svcxprt_rdma
*xprt
);
64 DECLARE_TASKLET(dto_tasklet
, dto_tasklet_func
, 0UL);
65 static DEFINE_SPINLOCK(dto_lock
);
66 static LIST_HEAD(dto_xprt_q
);
68 static struct svc_xprt_ops svc_rdma_ops
= {
69 .xpo_create
= svc_rdma_create
,
70 .xpo_recvfrom
= svc_rdma_recvfrom
,
71 .xpo_sendto
= svc_rdma_sendto
,
72 .xpo_release_rqst
= svc_rdma_release_rqst
,
73 .xpo_detach
= svc_rdma_detach
,
74 .xpo_free
= svc_rdma_free
,
75 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
76 .xpo_has_wspace
= svc_rdma_has_wspace
,
77 .xpo_accept
= svc_rdma_accept
,
80 struct svc_xprt_class svc_rdma_class
= {
82 .xcl_owner
= THIS_MODULE
,
83 .xcl_ops
= &svc_rdma_ops
,
84 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_TCP
,
87 /* WR context cache. Created in svc_rdma.c */
88 extern struct kmem_cache
*svc_rdma_ctxt_cachep
;
90 struct svc_rdma_op_ctxt
*svc_rdma_get_context(struct svcxprt_rdma
*xprt
)
92 struct svc_rdma_op_ctxt
*ctxt
;
95 ctxt
= kmem_cache_alloc(svc_rdma_ctxt_cachep
, GFP_KERNEL
);
98 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
101 INIT_LIST_HEAD(&ctxt
->dto_q
);
104 atomic_inc(&xprt
->sc_ctxt_used
);
108 static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt
*ctxt
)
110 struct svcxprt_rdma
*xprt
= ctxt
->xprt
;
112 for (i
= 0; i
< ctxt
->count
&& ctxt
->sge
[i
].length
; i
++) {
114 * Unmap the DMA addr in the SGE if the lkey matches
115 * the sc_dma_lkey, otherwise, ignore it since it is
116 * an FRMR lkey and will be unmapped later when the
117 * last WR that uses it completes.
119 if (ctxt
->sge
[i
].lkey
== xprt
->sc_dma_lkey
) {
120 atomic_dec(&xprt
->sc_dma_used
);
121 ib_dma_unmap_single(xprt
->sc_cm_id
->device
,
129 void svc_rdma_put_context(struct svc_rdma_op_ctxt
*ctxt
, int free_pages
)
131 struct svcxprt_rdma
*xprt
;
137 for (i
= 0; i
< ctxt
->count
; i
++)
138 put_page(ctxt
->pages
[i
]);
140 kmem_cache_free(svc_rdma_ctxt_cachep
, ctxt
);
141 atomic_dec(&xprt
->sc_ctxt_used
);
144 /* Temporary NFS request map cache. Created in svc_rdma.c */
145 extern struct kmem_cache
*svc_rdma_map_cachep
;
148 * Temporary NFS req mappings are shared across all transport
149 * instances. These are short lived and should be bounded by the number
150 * of concurrent server threads * depth of the SQ.
152 struct svc_rdma_req_map
*svc_rdma_get_req_map(void)
154 struct svc_rdma_req_map
*map
;
156 map
= kmem_cache_alloc(svc_rdma_map_cachep
, GFP_KERNEL
);
159 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
166 void svc_rdma_put_req_map(struct svc_rdma_req_map
*map
)
168 kmem_cache_free(svc_rdma_map_cachep
, map
);
171 /* ib_cq event handler */
172 static void cq_event_handler(struct ib_event
*event
, void *context
)
174 struct svc_xprt
*xprt
= context
;
175 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
176 event
->event
, context
);
177 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
180 /* QP event handler */
181 static void qp_event_handler(struct ib_event
*event
, void *context
)
183 struct svc_xprt
*xprt
= context
;
185 switch (event
->event
) {
186 /* These are considered benign events */
187 case IB_EVENT_PATH_MIG
:
188 case IB_EVENT_COMM_EST
:
189 case IB_EVENT_SQ_DRAINED
:
190 case IB_EVENT_QP_LAST_WQE_REACHED
:
191 dprintk("svcrdma: QP event %d received for QP=%p\n",
192 event
->event
, event
->element
.qp
);
194 /* These are considered fatal events */
195 case IB_EVENT_PATH_MIG_ERR
:
196 case IB_EVENT_QP_FATAL
:
197 case IB_EVENT_QP_REQ_ERR
:
198 case IB_EVENT_QP_ACCESS_ERR
:
199 case IB_EVENT_DEVICE_FATAL
:
201 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
202 "closing transport\n",
203 event
->event
, event
->element
.qp
);
204 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
210 * Data Transfer Operation Tasklet
212 * Walks a list of transports with I/O pending, removing entries as
213 * they are added to the server's I/O pending list. Two bits indicate
214 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
215 * spinlock that serializes access to the transport list with the RQ
216 * and SQ interrupt handlers.
218 static void dto_tasklet_func(unsigned long data
)
220 struct svcxprt_rdma
*xprt
;
223 spin_lock_irqsave(&dto_lock
, flags
);
224 while (!list_empty(&dto_xprt_q
)) {
225 xprt
= list_entry(dto_xprt_q
.next
,
226 struct svcxprt_rdma
, sc_dto_q
);
227 list_del_init(&xprt
->sc_dto_q
);
228 spin_unlock_irqrestore(&dto_lock
, flags
);
233 svc_xprt_put(&xprt
->sc_xprt
);
234 spin_lock_irqsave(&dto_lock
, flags
);
236 spin_unlock_irqrestore(&dto_lock
, flags
);
240 * Receive Queue Completion Handler
242 * Since an RQ completion handler is called on interrupt context, we
243 * need to defer the handling of the I/O to a tasklet
245 static void rq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
247 struct svcxprt_rdma
*xprt
= cq_context
;
250 /* Guard against unconditional flush call for destroyed QP */
251 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
255 * Set the bit regardless of whether or not it's on the list
256 * because it may be on the list already due to an SQ
259 set_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
);
262 * If this transport is not already on the DTO transport queue,
265 spin_lock_irqsave(&dto_lock
, flags
);
266 if (list_empty(&xprt
->sc_dto_q
)) {
267 svc_xprt_get(&xprt
->sc_xprt
);
268 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
270 spin_unlock_irqrestore(&dto_lock
, flags
);
272 /* Tasklet does all the work to avoid irqsave locks. */
273 tasklet_schedule(&dto_tasklet
);
277 * rq_cq_reap - Process the RQ CQ.
279 * Take all completing WC off the CQE and enqueue the associated DTO
280 * context on the dto_q for the transport.
282 * Note that caller must hold a transport reference.
284 static void rq_cq_reap(struct svcxprt_rdma
*xprt
)
288 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
290 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
))
293 ib_req_notify_cq(xprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
294 atomic_inc(&rdma_stat_rq_poll
);
296 while ((ret
= ib_poll_cq(xprt
->sc_rq_cq
, 1, &wc
)) > 0) {
297 ctxt
= (struct svc_rdma_op_ctxt
*)(unsigned long)wc
.wr_id
;
298 ctxt
->wc_status
= wc
.status
;
299 ctxt
->byte_len
= wc
.byte_len
;
300 svc_rdma_unmap_dma(ctxt
);
301 if (wc
.status
!= IB_WC_SUCCESS
) {
302 /* Close the transport */
303 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt
);
304 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
305 svc_rdma_put_context(ctxt
, 1);
306 svc_xprt_put(&xprt
->sc_xprt
);
309 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
310 list_add_tail(&ctxt
->dto_q
, &xprt
->sc_rq_dto_q
);
311 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
312 svc_xprt_put(&xprt
->sc_xprt
);
316 atomic_inc(&rdma_stat_rq_prod
);
318 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
320 * If data arrived before established event,
321 * don't enqueue. This defers RPC I/O until the
322 * RDMA connection is complete.
324 if (!test_bit(RDMAXPRT_CONN_PENDING
, &xprt
->sc_flags
))
325 svc_xprt_enqueue(&xprt
->sc_xprt
);
329 * Send Queue Completion Handler - potentially called on interrupt context.
331 * Note that caller must hold a transport reference.
333 static void sq_cq_reap(struct svcxprt_rdma
*xprt
)
335 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
337 struct ib_cq
*cq
= xprt
->sc_sq_cq
;
341 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
))
344 ib_req_notify_cq(xprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
345 atomic_inc(&rdma_stat_sq_poll
);
346 while ((ret
= ib_poll_cq(cq
, 1, &wc
)) > 0) {
347 ctxt
= (struct svc_rdma_op_ctxt
*)(unsigned long)wc
.wr_id
;
350 svc_rdma_unmap_dma(ctxt
);
351 if (wc
.status
!= IB_WC_SUCCESS
)
352 /* Close the transport */
353 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
355 /* Decrement used SQ WR count */
356 atomic_dec(&xprt
->sc_sq_count
);
357 wake_up(&xprt
->sc_send_wait
);
359 switch (ctxt
->wr_op
) {
361 svc_rdma_put_context(ctxt
, 1);
364 case IB_WR_RDMA_WRITE
:
365 svc_rdma_put_context(ctxt
, 0);
368 case IB_WR_RDMA_READ
:
369 if (test_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
)) {
370 struct svc_rdma_op_ctxt
*read_hdr
= ctxt
->read_hdr
;
372 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
373 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
374 list_add_tail(&read_hdr
->dto_q
,
375 &xprt
->sc_read_complete_q
);
376 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
377 svc_xprt_enqueue(&xprt
->sc_xprt
);
379 svc_rdma_put_context(ctxt
, 0);
383 printk(KERN_ERR
"svcrdma: unexpected completion type, "
384 "opcode=%d, status=%d\n",
385 wc
.opcode
, wc
.status
);
388 svc_xprt_put(&xprt
->sc_xprt
);
392 atomic_inc(&rdma_stat_sq_prod
);
395 static void sq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
397 struct svcxprt_rdma
*xprt
= cq_context
;
400 /* Guard against unconditional flush call for destroyed QP */
401 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
405 * Set the bit regardless of whether or not it's on the list
406 * because it may be on the list already due to an RQ
409 set_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
);
412 * If this transport is not already on the DTO transport queue,
415 spin_lock_irqsave(&dto_lock
, flags
);
416 if (list_empty(&xprt
->sc_dto_q
)) {
417 svc_xprt_get(&xprt
->sc_xprt
);
418 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
420 spin_unlock_irqrestore(&dto_lock
, flags
);
422 /* Tasklet does all the work to avoid irqsave locks. */
423 tasklet_schedule(&dto_tasklet
);
426 static struct svcxprt_rdma
*rdma_create_xprt(struct svc_serv
*serv
,
429 struct svcxprt_rdma
*cma_xprt
= kzalloc(sizeof *cma_xprt
, GFP_KERNEL
);
433 svc_xprt_init(&svc_rdma_class
, &cma_xprt
->sc_xprt
, serv
);
434 INIT_LIST_HEAD(&cma_xprt
->sc_accept_q
);
435 INIT_LIST_HEAD(&cma_xprt
->sc_dto_q
);
436 INIT_LIST_HEAD(&cma_xprt
->sc_rq_dto_q
);
437 INIT_LIST_HEAD(&cma_xprt
->sc_read_complete_q
);
438 INIT_LIST_HEAD(&cma_xprt
->sc_frmr_q
);
439 init_waitqueue_head(&cma_xprt
->sc_send_wait
);
441 spin_lock_init(&cma_xprt
->sc_lock
);
442 spin_lock_init(&cma_xprt
->sc_rq_dto_lock
);
443 spin_lock_init(&cma_xprt
->sc_frmr_q_lock
);
445 cma_xprt
->sc_ord
= svcrdma_ord
;
447 cma_xprt
->sc_max_req_size
= svcrdma_max_req_size
;
448 cma_xprt
->sc_max_requests
= svcrdma_max_requests
;
449 cma_xprt
->sc_sq_depth
= svcrdma_max_requests
* RPCRDMA_SQ_DEPTH_MULT
;
450 atomic_set(&cma_xprt
->sc_sq_count
, 0);
451 atomic_set(&cma_xprt
->sc_ctxt_used
, 0);
454 set_bit(XPT_LISTENER
, &cma_xprt
->sc_xprt
.xpt_flags
);
459 struct page
*svc_rdma_get_page(void)
463 while ((page
= alloc_page(GFP_KERNEL
)) == NULL
) {
464 /* If we can't get memory, wait a bit and try again */
465 printk(KERN_INFO
"svcrdma: out of memory...retrying in 1000 "
467 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
472 int svc_rdma_post_recv(struct svcxprt_rdma
*xprt
)
474 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
475 struct svc_rdma_op_ctxt
*ctxt
;
482 ctxt
= svc_rdma_get_context(xprt
);
484 ctxt
->direction
= DMA_FROM_DEVICE
;
485 for (sge_no
= 0; buflen
< xprt
->sc_max_req_size
; sge_no
++) {
486 BUG_ON(sge_no
>= xprt
->sc_max_sge
);
487 page
= svc_rdma_get_page();
488 ctxt
->pages
[sge_no
] = page
;
489 atomic_inc(&xprt
->sc_dma_used
);
490 pa
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
493 ctxt
->sge
[sge_no
].addr
= pa
;
494 ctxt
->sge
[sge_no
].length
= PAGE_SIZE
;
495 ctxt
->sge
[sge_no
].lkey
= xprt
->sc_phys_mr
->lkey
;
498 ctxt
->count
= sge_no
;
500 recv_wr
.sg_list
= &ctxt
->sge
[0];
501 recv_wr
.num_sge
= ctxt
->count
;
502 recv_wr
.wr_id
= (u64
)(unsigned long)ctxt
;
504 svc_xprt_get(&xprt
->sc_xprt
);
505 ret
= ib_post_recv(xprt
->sc_qp
, &recv_wr
, &bad_recv_wr
);
507 svc_xprt_put(&xprt
->sc_xprt
);
508 svc_rdma_put_context(ctxt
, 1);
514 * This function handles the CONNECT_REQUEST event on a listening
515 * endpoint. It is passed the cma_id for the _new_ connection. The context in
516 * this cma_id is inherited from the listening cma_id and is the svc_xprt
517 * structure for the listening endpoint.
519 * This function creates a new xprt for the new connection and enqueues it on
520 * the accept queue for the listent xprt. When the listen thread is kicked, it
521 * will call the recvfrom method on the listen xprt which will accept the new
524 static void handle_connect_req(struct rdma_cm_id
*new_cma_id
, size_t client_ird
)
526 struct svcxprt_rdma
*listen_xprt
= new_cma_id
->context
;
527 struct svcxprt_rdma
*newxprt
;
530 /* Create a new transport */
531 newxprt
= rdma_create_xprt(listen_xprt
->sc_xprt
.xpt_server
, 0);
533 dprintk("svcrdma: failed to create new transport\n");
536 newxprt
->sc_cm_id
= new_cma_id
;
537 new_cma_id
->context
= newxprt
;
538 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
539 newxprt
, newxprt
->sc_cm_id
, listen_xprt
);
541 /* Save client advertised inbound read limit for use later in accept. */
542 newxprt
->sc_ord
= client_ird
;
544 /* Set the local and remote addresses in the transport */
545 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
546 svc_xprt_set_remote(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
547 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
548 svc_xprt_set_local(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
551 * Enqueue the new transport on the accept queue of the listening
554 spin_lock_bh(&listen_xprt
->sc_lock
);
555 list_add_tail(&newxprt
->sc_accept_q
, &listen_xprt
->sc_accept_q
);
556 spin_unlock_bh(&listen_xprt
->sc_lock
);
559 * Can't use svc_xprt_received here because we are not on a
562 set_bit(XPT_CONN
, &listen_xprt
->sc_xprt
.xpt_flags
);
563 svc_xprt_enqueue(&listen_xprt
->sc_xprt
);
567 * Handles events generated on the listening endpoint. These events will be
568 * either be incoming connect requests or adapter removal events.
570 static int rdma_listen_handler(struct rdma_cm_id
*cma_id
,
571 struct rdma_cm_event
*event
)
573 struct svcxprt_rdma
*xprt
= cma_id
->context
;
576 switch (event
->event
) {
577 case RDMA_CM_EVENT_CONNECT_REQUEST
:
578 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
579 "event=%d\n", cma_id
, cma_id
->context
, event
->event
);
580 handle_connect_req(cma_id
,
581 event
->param
.conn
.responder_resources
);
584 case RDMA_CM_EVENT_ESTABLISHED
:
585 /* Accept complete */
586 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
587 "cm_id=%p\n", xprt
, cma_id
);
590 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
591 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
594 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
598 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
599 "event=%d\n", cma_id
, event
->event
);
606 static int rdma_cma_handler(struct rdma_cm_id
*cma_id
,
607 struct rdma_cm_event
*event
)
609 struct svc_xprt
*xprt
= cma_id
->context
;
610 struct svcxprt_rdma
*rdma
=
611 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
612 switch (event
->event
) {
613 case RDMA_CM_EVENT_ESTABLISHED
:
614 /* Accept complete */
616 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
617 "cm_id=%p\n", xprt
, cma_id
);
618 clear_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
);
619 svc_xprt_enqueue(xprt
);
621 case RDMA_CM_EVENT_DISCONNECTED
:
622 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
625 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
626 svc_xprt_enqueue(xprt
);
630 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
631 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
632 "event=%d\n", cma_id
, xprt
, event
->event
);
634 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
635 svc_xprt_enqueue(xprt
);
639 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
640 "event=%d\n", cma_id
, event
->event
);
647 * Create a listening RDMA service endpoint.
649 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
650 struct sockaddr
*sa
, int salen
,
653 struct rdma_cm_id
*listen_id
;
654 struct svcxprt_rdma
*cma_xprt
;
655 struct svc_xprt
*xprt
;
658 dprintk("svcrdma: Creating RDMA socket\n");
660 cma_xprt
= rdma_create_xprt(serv
, 1);
662 return ERR_PTR(-ENOMEM
);
663 xprt
= &cma_xprt
->sc_xprt
;
665 listen_id
= rdma_create_id(rdma_listen_handler
, cma_xprt
, RDMA_PS_TCP
);
666 if (IS_ERR(listen_id
)) {
667 ret
= PTR_ERR(listen_id
);
668 dprintk("svcrdma: rdma_create_id failed = %d\n", ret
);
672 ret
= rdma_bind_addr(listen_id
, sa
);
674 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret
);
677 cma_xprt
->sc_cm_id
= listen_id
;
679 ret
= rdma_listen(listen_id
, RPCRDMA_LISTEN_BACKLOG
);
681 dprintk("svcrdma: rdma_listen failed = %d\n", ret
);
686 * We need to use the address from the cm_id in case the
687 * caller specified 0 for the port number.
689 sa
= (struct sockaddr
*)&cma_xprt
->sc_cm_id
->route
.addr
.src_addr
;
690 svc_xprt_set_local(&cma_xprt
->sc_xprt
, sa
, salen
);
692 return &cma_xprt
->sc_xprt
;
695 rdma_destroy_id(listen_id
);
701 static struct svc_rdma_fastreg_mr
*rdma_alloc_frmr(struct svcxprt_rdma
*xprt
)
704 struct ib_fast_reg_page_list
*pl
;
705 struct svc_rdma_fastreg_mr
*frmr
;
707 frmr
= kmalloc(sizeof(*frmr
), GFP_KERNEL
);
711 mr
= ib_alloc_fast_reg_mr(xprt
->sc_pd
, RPCSVC_MAXPAGES
);
715 pl
= ib_alloc_fast_reg_page_list(xprt
->sc_cm_id
->device
,
721 frmr
->page_list
= pl
;
722 INIT_LIST_HEAD(&frmr
->frmr_list
);
730 return ERR_PTR(-ENOMEM
);
733 static void rdma_dealloc_frmr_q(struct svcxprt_rdma
*xprt
)
735 struct svc_rdma_fastreg_mr
*frmr
;
737 while (!list_empty(&xprt
->sc_frmr_q
)) {
738 frmr
= list_entry(xprt
->sc_frmr_q
.next
,
739 struct svc_rdma_fastreg_mr
, frmr_list
);
740 list_del_init(&frmr
->frmr_list
);
741 ib_dereg_mr(frmr
->mr
);
742 ib_free_fast_reg_page_list(frmr
->page_list
);
747 struct svc_rdma_fastreg_mr
*svc_rdma_get_frmr(struct svcxprt_rdma
*rdma
)
749 struct svc_rdma_fastreg_mr
*frmr
= NULL
;
751 spin_lock_bh(&rdma
->sc_frmr_q_lock
);
752 if (!list_empty(&rdma
->sc_frmr_q
)) {
753 frmr
= list_entry(rdma
->sc_frmr_q
.next
,
754 struct svc_rdma_fastreg_mr
, frmr_list
);
755 list_del_init(&frmr
->frmr_list
);
757 frmr
->page_list_len
= 0;
759 spin_unlock_bh(&rdma
->sc_frmr_q_lock
);
763 return rdma_alloc_frmr(rdma
);
766 static void frmr_unmap_dma(struct svcxprt_rdma
*xprt
,
767 struct svc_rdma_fastreg_mr
*frmr
)
770 for (page_no
= 0; page_no
< frmr
->page_list_len
; page_no
++) {
771 dma_addr_t addr
= frmr
->page_list
->page_list
[page_no
];
772 if (ib_dma_mapping_error(frmr
->mr
->device
, addr
))
774 atomic_dec(&xprt
->sc_dma_used
);
775 ib_dma_unmap_single(frmr
->mr
->device
, addr
, PAGE_SIZE
,
780 void svc_rdma_put_frmr(struct svcxprt_rdma
*rdma
,
781 struct svc_rdma_fastreg_mr
*frmr
)
784 frmr_unmap_dma(rdma
, frmr
);
785 spin_lock_bh(&rdma
->sc_frmr_q_lock
);
786 BUG_ON(!list_empty(&frmr
->frmr_list
));
787 list_add(&frmr
->frmr_list
, &rdma
->sc_frmr_q
);
788 spin_unlock_bh(&rdma
->sc_frmr_q_lock
);
793 * This is the xpo_recvfrom function for listening endpoints. Its
794 * purpose is to accept incoming connections. The CMA callback handler
795 * has already created a new transport and attached it to the new CMA
798 * There is a queue of pending connections hung on the listening
799 * transport. This queue contains the new svc_xprt structure. This
800 * function takes svc_xprt structures off the accept_q and completes
803 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
)
805 struct svcxprt_rdma
*listen_rdma
;
806 struct svcxprt_rdma
*newxprt
= NULL
;
807 struct rdma_conn_param conn_param
;
808 struct ib_qp_init_attr qp_attr
;
809 struct ib_device_attr devattr
;
815 listen_rdma
= container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
816 clear_bit(XPT_CONN
, &xprt
->xpt_flags
);
817 /* Get the next entry off the accept list */
818 spin_lock_bh(&listen_rdma
->sc_lock
);
819 if (!list_empty(&listen_rdma
->sc_accept_q
)) {
820 newxprt
= list_entry(listen_rdma
->sc_accept_q
.next
,
821 struct svcxprt_rdma
, sc_accept_q
);
822 list_del_init(&newxprt
->sc_accept_q
);
824 if (!list_empty(&listen_rdma
->sc_accept_q
))
825 set_bit(XPT_CONN
, &listen_rdma
->sc_xprt
.xpt_flags
);
826 spin_unlock_bh(&listen_rdma
->sc_lock
);
830 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
831 newxprt
, newxprt
->sc_cm_id
);
833 ret
= ib_query_device(newxprt
->sc_cm_id
->device
, &devattr
);
835 dprintk("svcrdma: could not query device attributes on "
836 "device %p, rc=%d\n", newxprt
->sc_cm_id
->device
, ret
);
840 /* Qualify the transport resource defaults with the
841 * capabilities of this particular device */
842 newxprt
->sc_max_sge
= min((size_t)devattr
.max_sge
,
843 (size_t)RPCSVC_MAXPAGES
);
844 newxprt
->sc_max_requests
= min((size_t)devattr
.max_qp_wr
,
845 (size_t)svcrdma_max_requests
);
846 newxprt
->sc_sq_depth
= RPCRDMA_SQ_DEPTH_MULT
* newxprt
->sc_max_requests
;
849 * Limit ORD based on client limit, local device limit, and
850 * configured svcrdma limit.
852 newxprt
->sc_ord
= min_t(size_t, devattr
.max_qp_rd_atom
, newxprt
->sc_ord
);
853 newxprt
->sc_ord
= min_t(size_t, svcrdma_ord
, newxprt
->sc_ord
);
855 newxprt
->sc_pd
= ib_alloc_pd(newxprt
->sc_cm_id
->device
);
856 if (IS_ERR(newxprt
->sc_pd
)) {
857 dprintk("svcrdma: error creating PD for connect request\n");
860 newxprt
->sc_sq_cq
= ib_create_cq(newxprt
->sc_cm_id
->device
,
864 newxprt
->sc_sq_depth
,
866 if (IS_ERR(newxprt
->sc_sq_cq
)) {
867 dprintk("svcrdma: error creating SQ CQ for connect request\n");
870 newxprt
->sc_rq_cq
= ib_create_cq(newxprt
->sc_cm_id
->device
,
874 newxprt
->sc_max_requests
,
876 if (IS_ERR(newxprt
->sc_rq_cq
)) {
877 dprintk("svcrdma: error creating RQ CQ for connect request\n");
881 memset(&qp_attr
, 0, sizeof qp_attr
);
882 qp_attr
.event_handler
= qp_event_handler
;
883 qp_attr
.qp_context
= &newxprt
->sc_xprt
;
884 qp_attr
.cap
.max_send_wr
= newxprt
->sc_sq_depth
;
885 qp_attr
.cap
.max_recv_wr
= newxprt
->sc_max_requests
;
886 qp_attr
.cap
.max_send_sge
= newxprt
->sc_max_sge
;
887 qp_attr
.cap
.max_recv_sge
= newxprt
->sc_max_sge
;
888 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
889 qp_attr
.qp_type
= IB_QPT_RC
;
890 qp_attr
.send_cq
= newxprt
->sc_sq_cq
;
891 qp_attr
.recv_cq
= newxprt
->sc_rq_cq
;
892 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
893 " cm_id->device=%p, sc_pd->device=%p\n"
894 " cap.max_send_wr = %d\n"
895 " cap.max_recv_wr = %d\n"
896 " cap.max_send_sge = %d\n"
897 " cap.max_recv_sge = %d\n",
898 newxprt
->sc_cm_id
, newxprt
->sc_pd
,
899 newxprt
->sc_cm_id
->device
, newxprt
->sc_pd
->device
,
900 qp_attr
.cap
.max_send_wr
,
901 qp_attr
.cap
.max_recv_wr
,
902 qp_attr
.cap
.max_send_sge
,
903 qp_attr
.cap
.max_recv_sge
);
905 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
, &qp_attr
);
908 * XXX: This is a hack. We need a xx_request_qp interface
909 * that will adjust the qp_attr's with a best-effort
912 qp_attr
.cap
.max_send_sge
-= 2;
913 qp_attr
.cap
.max_recv_sge
-= 2;
914 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
,
917 dprintk("svcrdma: failed to create QP, ret=%d\n", ret
);
920 newxprt
->sc_max_sge
= qp_attr
.cap
.max_send_sge
;
921 newxprt
->sc_max_sge
= qp_attr
.cap
.max_recv_sge
;
922 newxprt
->sc_sq_depth
= qp_attr
.cap
.max_send_wr
;
923 newxprt
->sc_max_requests
= qp_attr
.cap
.max_recv_wr
;
925 newxprt
->sc_qp
= newxprt
->sc_cm_id
->qp
;
928 * Use the most secure set of MR resources based on the
929 * transport type and available memory management features in
930 * the device. Here's the table implemented below:
932 * Fast Global DMA Remote WR
934 * Sup'd Sup'd Needed Needed
946 * NB: iWARP requires remote write access for the data sink
947 * of an RDMA_READ. IB does not.
949 if (devattr
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
950 newxprt
->sc_frmr_pg_list_len
=
951 devattr
.max_fast_reg_page_list_len
;
952 newxprt
->sc_dev_caps
|= SVCRDMA_DEVCAP_FAST_REG
;
956 * Determine if a DMA MR is required and if so, what privs are required
958 switch (rdma_node_get_transport(newxprt
->sc_cm_id
->device
->node_type
)) {
959 case RDMA_TRANSPORT_IWARP
:
960 newxprt
->sc_dev_caps
|= SVCRDMA_DEVCAP_READ_W_INV
;
961 if (!(newxprt
->sc_dev_caps
& SVCRDMA_DEVCAP_FAST_REG
)) {
964 (IB_ACCESS_LOCAL_WRITE
|
965 IB_ACCESS_REMOTE_WRITE
);
966 } else if (!(devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)) {
968 dma_mr_acc
= IB_ACCESS_LOCAL_WRITE
;
972 case RDMA_TRANSPORT_IB
:
973 if (!(devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)) {
975 dma_mr_acc
= IB_ACCESS_LOCAL_WRITE
;
983 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
985 /* Register all of physical memory */
986 newxprt
->sc_phys_mr
=
987 ib_get_dma_mr(newxprt
->sc_pd
, dma_mr_acc
);
988 if (IS_ERR(newxprt
->sc_phys_mr
)) {
989 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
993 newxprt
->sc_dma_lkey
= newxprt
->sc_phys_mr
->lkey
;
995 newxprt
->sc_dma_lkey
=
996 newxprt
->sc_cm_id
->device
->local_dma_lkey
;
998 /* Post receive buffers */
999 for (i
= 0; i
< newxprt
->sc_max_requests
; i
++) {
1000 ret
= svc_rdma_post_recv(newxprt
);
1002 dprintk("svcrdma: failure posting receive buffers\n");
1007 /* Swap out the handler */
1008 newxprt
->sc_cm_id
->event_handler
= rdma_cma_handler
;
1011 * Arm the CQs for the SQ and RQ before accepting so we can't
1012 * miss the first message
1014 ib_req_notify_cq(newxprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
1015 ib_req_notify_cq(newxprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
1017 /* Accept Connection */
1018 set_bit(RDMAXPRT_CONN_PENDING
, &newxprt
->sc_flags
);
1019 memset(&conn_param
, 0, sizeof conn_param
);
1020 conn_param
.responder_resources
= 0;
1021 conn_param
.initiator_depth
= newxprt
->sc_ord
;
1022 ret
= rdma_accept(newxprt
->sc_cm_id
, &conn_param
);
1024 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1029 dprintk("svcrdma: new connection %p accepted with the following "
1031 " local_ip : %d.%d.%d.%d\n"
1032 " local_port : %d\n"
1033 " remote_ip : %d.%d.%d.%d\n"
1034 " remote_port : %d\n"
1037 " max_requests : %d\n"
1040 NIPQUAD(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1041 route
.addr
.src_addr
)->sin_addr
.s_addr
),
1042 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1043 route
.addr
.src_addr
)->sin_port
),
1044 NIPQUAD(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1045 route
.addr
.dst_addr
)->sin_addr
.s_addr
),
1046 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1047 route
.addr
.dst_addr
)->sin_port
),
1048 newxprt
->sc_max_sge
,
1049 newxprt
->sc_sq_depth
,
1050 newxprt
->sc_max_requests
,
1053 return &newxprt
->sc_xprt
;
1056 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret
);
1057 /* Take a reference in case the DTO handler runs */
1058 svc_xprt_get(&newxprt
->sc_xprt
);
1059 if (newxprt
->sc_qp
&& !IS_ERR(newxprt
->sc_qp
))
1060 ib_destroy_qp(newxprt
->sc_qp
);
1061 rdma_destroy_id(newxprt
->sc_cm_id
);
1062 /* This call to put will destroy the transport */
1063 svc_xprt_put(&newxprt
->sc_xprt
);
1067 static void svc_rdma_release_rqst(struct svc_rqst
*rqstp
)
1072 * When connected, an svc_xprt has at least two references:
1074 * - A reference held by the cm_id between the ESTABLISHED and
1075 * DISCONNECTED events. If the remote peer disconnected first, this
1076 * reference could be gone.
1078 * - A reference held by the svc_recv code that called this function
1079 * as part of close processing.
1081 * At a minimum one references should still be held.
1083 static void svc_rdma_detach(struct svc_xprt
*xprt
)
1085 struct svcxprt_rdma
*rdma
=
1086 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1087 dprintk("svc: svc_rdma_detach(%p)\n", xprt
);
1089 /* Disconnect and flush posted WQE */
1090 rdma_disconnect(rdma
->sc_cm_id
);
1093 static void __svc_rdma_free(struct work_struct
*work
)
1095 struct svcxprt_rdma
*rdma
=
1096 container_of(work
, struct svcxprt_rdma
, sc_work
);
1097 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma
);
1099 /* We should only be called from kref_put */
1100 BUG_ON(atomic_read(&rdma
->sc_xprt
.xpt_ref
.refcount
) != 0);
1103 * Destroy queued, but not processed read completions. Note
1104 * that this cleanup has to be done before destroying the
1105 * cm_id because the device ptr is needed to unmap the dma in
1106 * svc_rdma_put_context.
1108 while (!list_empty(&rdma
->sc_read_complete_q
)) {
1109 struct svc_rdma_op_ctxt
*ctxt
;
1110 ctxt
= list_entry(rdma
->sc_read_complete_q
.next
,
1111 struct svc_rdma_op_ctxt
,
1113 list_del_init(&ctxt
->dto_q
);
1114 svc_rdma_put_context(ctxt
, 1);
1117 /* Destroy queued, but not processed recv completions */
1118 while (!list_empty(&rdma
->sc_rq_dto_q
)) {
1119 struct svc_rdma_op_ctxt
*ctxt
;
1120 ctxt
= list_entry(rdma
->sc_rq_dto_q
.next
,
1121 struct svc_rdma_op_ctxt
,
1123 list_del_init(&ctxt
->dto_q
);
1124 svc_rdma_put_context(ctxt
, 1);
1127 /* Warn if we leaked a resource or under-referenced */
1128 WARN_ON(atomic_read(&rdma
->sc_ctxt_used
) != 0);
1129 WARN_ON(atomic_read(&rdma
->sc_dma_used
) != 0);
1131 /* De-allocate fastreg mr */
1132 rdma_dealloc_frmr_q(rdma
);
1134 /* Destroy the QP if present (not a listener) */
1135 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
1136 ib_destroy_qp(rdma
->sc_qp
);
1138 if (rdma
->sc_sq_cq
&& !IS_ERR(rdma
->sc_sq_cq
))
1139 ib_destroy_cq(rdma
->sc_sq_cq
);
1141 if (rdma
->sc_rq_cq
&& !IS_ERR(rdma
->sc_rq_cq
))
1142 ib_destroy_cq(rdma
->sc_rq_cq
);
1144 if (rdma
->sc_phys_mr
&& !IS_ERR(rdma
->sc_phys_mr
))
1145 ib_dereg_mr(rdma
->sc_phys_mr
);
1147 if (rdma
->sc_pd
&& !IS_ERR(rdma
->sc_pd
))
1148 ib_dealloc_pd(rdma
->sc_pd
);
1150 /* Destroy the CM ID */
1151 rdma_destroy_id(rdma
->sc_cm_id
);
1156 static void svc_rdma_free(struct svc_xprt
*xprt
)
1158 struct svcxprt_rdma
*rdma
=
1159 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1160 INIT_WORK(&rdma
->sc_work
, __svc_rdma_free
);
1161 schedule_work(&rdma
->sc_work
);
1164 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
)
1166 struct svcxprt_rdma
*rdma
=
1167 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1170 * If there are fewer SQ WR available than required to send a
1171 * simple response, return false.
1173 if ((rdma
->sc_sq_depth
- atomic_read(&rdma
->sc_sq_count
) < 3))
1177 * ...or there are already waiters on the SQ,
1180 if (waitqueue_active(&rdma
->sc_send_wait
))
1183 /* Otherwise return true. */
1187 int svc_rdma_send(struct svcxprt_rdma
*xprt
, struct ib_send_wr
*wr
)
1189 struct ib_send_wr
*bad_wr
;
1192 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1195 BUG_ON(wr
->send_flags
!= IB_SEND_SIGNALED
);
1196 BUG_ON(((struct svc_rdma_op_ctxt
*)(unsigned long)wr
->wr_id
)->wr_op
!=
1198 /* If the SQ is full, wait until an SQ entry is available */
1200 spin_lock_bh(&xprt
->sc_lock
);
1201 if (xprt
->sc_sq_depth
== atomic_read(&xprt
->sc_sq_count
)) {
1202 spin_unlock_bh(&xprt
->sc_lock
);
1203 atomic_inc(&rdma_stat_sq_starve
);
1205 /* See if we can opportunistically reap SQ WR to make room */
1208 /* Wait until SQ WR available if SQ still full */
1209 wait_event(xprt
->sc_send_wait
,
1210 atomic_read(&xprt
->sc_sq_count
) <
1212 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1216 /* Bumped used SQ WR count and post */
1217 svc_xprt_get(&xprt
->sc_xprt
);
1218 ret
= ib_post_send(xprt
->sc_qp
, wr
, &bad_wr
);
1220 atomic_inc(&xprt
->sc_sq_count
);
1222 svc_xprt_put(&xprt
->sc_xprt
);
1223 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1224 "sc_sq_count=%d, sc_sq_depth=%d\n",
1225 ret
, atomic_read(&xprt
->sc_sq_count
),
1228 spin_unlock_bh(&xprt
->sc_lock
);
1234 void svc_rdma_send_error(struct svcxprt_rdma
*xprt
, struct rpcrdma_msg
*rmsgp
,
1235 enum rpcrdma_errcode err
)
1237 struct ib_send_wr err_wr
;
1240 struct svc_rdma_op_ctxt
*ctxt
;
1245 p
= svc_rdma_get_page();
1246 va
= page_address(p
);
1248 /* XDR encode error */
1249 length
= svc_rdma_xdr_encode_error(xprt
, rmsgp
, err
, va
);
1251 /* Prepare SGE for local address */
1252 atomic_inc(&xprt
->sc_dma_used
);
1253 sge
.addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
1254 p
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
1255 sge
.lkey
= xprt
->sc_phys_mr
->lkey
;
1256 sge
.length
= length
;
1258 ctxt
= svc_rdma_get_context(xprt
);
1262 /* Prepare SEND WR */
1263 memset(&err_wr
, 0, sizeof err_wr
);
1264 ctxt
->wr_op
= IB_WR_SEND
;
1265 err_wr
.wr_id
= (unsigned long)ctxt
;
1266 err_wr
.sg_list
= &sge
;
1268 err_wr
.opcode
= IB_WR_SEND
;
1269 err_wr
.send_flags
= IB_SEND_SIGNALED
;
1272 ret
= svc_rdma_send(xprt
, &err_wr
);
1274 dprintk("svcrdma: Error %d posting send for protocol error\n",
1276 svc_rdma_put_context(ctxt
, 1);