2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
46 * When the underlying transport disconnects, MRs are left in one of
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
55 * VALID: The MR was registered before the QP entered ERROR state.
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
70 #include "xprt_rdma.h"
72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73 # define RPCDBG_FACILITY RPCDBG_TRANS
77 frwr_is_supported(struct rpcrdma_ia
*ia
)
79 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
81 if (!(attrs
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
))
82 goto out_not_supported
;
83 if (attrs
->max_fast_reg_page_list_len
== 0)
84 goto out_not_supported
;
88 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
94 frwr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mw
*r
)
96 unsigned int depth
= ia
->ri_max_frmr_depth
;
97 struct rpcrdma_frmr
*f
= &r
->frmr
;
100 f
->fr_mr
= ib_alloc_mr(ia
->ri_pd
, IB_MR_TYPE_MEM_REG
, depth
);
101 if (IS_ERR(f
->fr_mr
))
104 r
->mw_sg
= kcalloc(depth
, sizeof(*r
->mw_sg
), GFP_KERNEL
);
108 sg_init_table(r
->mw_sg
, depth
);
109 init_completion(&f
->fr_linv_done
);
113 rc
= PTR_ERR(f
->fr_mr
);
114 dprintk("RPC: %s: ib_alloc_mr status %i\n",
120 dprintk("RPC: %s: sg allocation failure\n",
122 ib_dereg_mr(f
->fr_mr
);
127 frwr_op_release_mr(struct rpcrdma_mw
*r
)
131 rc
= ib_dereg_mr(r
->frmr
.fr_mr
);
133 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
140 __frwr_reset_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mw
*r
)
142 struct rpcrdma_frmr
*f
= &r
->frmr
;
145 rc
= ib_dereg_mr(f
->fr_mr
);
147 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
152 f
->fr_mr
= ib_alloc_mr(ia
->ri_pd
, IB_MR_TYPE_MEM_REG
,
153 ia
->ri_max_frmr_depth
);
154 if (IS_ERR(f
->fr_mr
)) {
155 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
156 PTR_ERR(f
->fr_mr
), r
);
157 return PTR_ERR(f
->fr_mr
);
160 dprintk("RPC: %s: recovered FRMR %p\n", __func__
, r
);
161 f
->fr_state
= FRMR_IS_INVALID
;
165 /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
167 * There's no recovery if this fails. The FRMR is abandoned, but
168 * remains in rb_all. It will be cleaned up when the transport is
172 frwr_op_recover_mr(struct rpcrdma_mw
*mw
)
174 struct rpcrdma_xprt
*r_xprt
= mw
->mw_xprt
;
175 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
178 rc
= __frwr_reset_mr(ia
, mw
);
179 ib_dma_unmap_sg(ia
->ri_device
, mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
181 pr_err("rpcrdma: FRMR reset status %d, %p orphaned\n",
183 r_xprt
->rx_stats
.mrs_orphaned
++;
187 rpcrdma_put_mw(r_xprt
, mw
);
188 r_xprt
->rx_stats
.mrs_recovered
++;
192 frwr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
193 struct rpcrdma_create_data_internal
*cdata
)
197 ia
->ri_max_frmr_depth
=
198 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
199 ia
->ri_device
->attrs
.max_fast_reg_page_list_len
);
200 dprintk("RPC: %s: device's max FR page list len = %u\n",
201 __func__
, ia
->ri_max_frmr_depth
);
203 /* Add room for frmr register and invalidate WRs.
204 * 1. FRMR reg WR for head
205 * 2. FRMR invalidate WR for head
206 * 3. N FRMR reg WRs for pagelist
207 * 4. N FRMR invalidate WRs for pagelist
208 * 5. FRMR reg WR for tail
209 * 6. FRMR invalidate WR for tail
210 * 7. The RDMA_SEND WR
214 /* Calculate N if the device max FRMR depth is smaller than
215 * RPCRDMA_MAX_DATA_SEGS.
217 if (ia
->ri_max_frmr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
218 delta
= RPCRDMA_MAX_DATA_SEGS
- ia
->ri_max_frmr_depth
;
220 depth
+= 2; /* FRMR reg + invalidate */
221 delta
-= ia
->ri_max_frmr_depth
;
225 ep
->rep_attr
.cap
.max_send_wr
*= depth
;
226 if (ep
->rep_attr
.cap
.max_send_wr
> ia
->ri_device
->attrs
.max_qp_wr
) {
227 cdata
->max_requests
= ia
->ri_device
->attrs
.max_qp_wr
/ depth
;
228 if (!cdata
->max_requests
)
230 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
*
234 rpcrdma_set_max_header_sizes(ia
, cdata
, max_t(unsigned int, 1,
235 RPCRDMA_MAX_DATA_SEGS
/
236 ia
->ri_max_frmr_depth
));
240 /* FRWR mode conveys a list of pages per chunk segment. The
241 * maximum length of that list is the FRWR page list depth.
244 frwr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
246 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
248 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
249 RPCRDMA_MAX_HDR_SEGS
* ia
->ri_max_frmr_depth
);
253 __frwr_sendcompletion_flush(struct ib_wc
*wc
, struct rpcrdma_frmr
*frmr
,
256 frmr
->fr_state
= FRMR_IS_STALE
;
257 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
258 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
259 wr
, ib_wc_status_msg(wc
->status
),
260 wc
->status
, wc
->vendor_err
);
264 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
265 * @cq: completion queue (ignored)
270 frwr_wc_fastreg(struct ib_cq
*cq
, struct ib_wc
*wc
)
272 struct rpcrdma_frmr
*frmr
;
275 /* WARNING: Only wr_cqe and status are reliable at this point */
276 if (wc
->status
!= IB_WC_SUCCESS
) {
278 frmr
= container_of(cqe
, struct rpcrdma_frmr
, fr_cqe
);
279 __frwr_sendcompletion_flush(wc
, frmr
, "fastreg");
284 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
285 * @cq: completion queue (ignored)
290 frwr_wc_localinv(struct ib_cq
*cq
, struct ib_wc
*wc
)
292 struct rpcrdma_frmr
*frmr
;
295 /* WARNING: Only wr_cqe and status are reliable at this point */
296 if (wc
->status
!= IB_WC_SUCCESS
) {
298 frmr
= container_of(cqe
, struct rpcrdma_frmr
, fr_cqe
);
299 __frwr_sendcompletion_flush(wc
, frmr
, "localinv");
304 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
305 * @cq: completion queue (ignored)
308 * Awaken anyone waiting for an MR to finish being fenced.
311 frwr_wc_localinv_wake(struct ib_cq
*cq
, struct ib_wc
*wc
)
313 struct rpcrdma_frmr
*frmr
;
316 /* WARNING: Only wr_cqe and status are reliable at this point */
318 frmr
= container_of(cqe
, struct rpcrdma_frmr
, fr_cqe
);
319 if (wc
->status
!= IB_WC_SUCCESS
)
320 __frwr_sendcompletion_flush(wc
, frmr
, "localinv");
321 complete_all(&frmr
->fr_linv_done
);
324 /* Post a REG_MR Work Request to register a memory region
325 * for remote access via RDMA READ or RDMA WRITE.
328 frwr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
329 int nsegs
, bool writing
)
331 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
332 struct rpcrdma_mr_seg
*seg1
= seg
;
333 struct rpcrdma_mw
*mw
;
334 struct rpcrdma_frmr
*frmr
;
336 struct ib_reg_wr
*reg_wr
;
337 struct ib_send_wr
*bad_wr
;
338 int rc
, i
, n
, dma_nents
;
345 rpcrdma_defer_mr_recovery(mw
);
346 mw
= rpcrdma_get_mw(r_xprt
);
349 } while (mw
->frmr
.fr_state
!= FRMR_IS_INVALID
);
351 frmr
->fr_state
= FRMR_IS_VALID
;
353 reg_wr
= &frmr
->fr_regwr
;
355 if (nsegs
> ia
->ri_max_frmr_depth
)
356 nsegs
= ia
->ri_max_frmr_depth
;
357 for (i
= 0; i
< nsegs
;) {
359 sg_set_page(&mw
->mw_sg
[i
],
362 offset_in_page(seg
->mr_offset
));
364 sg_set_buf(&mw
->mw_sg
[i
], seg
->mr_offset
,
370 /* Check for holes */
371 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
372 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
376 mw
->mw_dir
= rpcrdma_data_dir(writing
);
380 dma_nents
= ib_dma_map_sg(ia
->ri_device
,
381 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
385 n
= ib_map_mr_sg(mr
, mw
->mw_sg
, mw
->mw_nents
, NULL
, PAGE_SIZE
);
386 if (unlikely(n
!= mw
->mw_nents
))
389 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
390 __func__
, mw
, mw
->mw_nents
, mr
->length
);
392 key
= (u8
)(mr
->rkey
& 0x000000FF);
393 ib_update_fast_reg_key(mr
, ++key
);
395 reg_wr
->wr
.next
= NULL
;
396 reg_wr
->wr
.opcode
= IB_WR_REG_MR
;
397 frmr
->fr_cqe
.done
= frwr_wc_fastreg
;
398 reg_wr
->wr
.wr_cqe
= &frmr
->fr_cqe
;
399 reg_wr
->wr
.num_sge
= 0;
400 reg_wr
->wr
.send_flags
= 0;
402 reg_wr
->key
= mr
->rkey
;
403 reg_wr
->access
= writing
?
404 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
405 IB_ACCESS_REMOTE_READ
;
407 DECR_CQCOUNT(&r_xprt
->rx_ep
);
408 rc
= ib_post_send(ia
->ri_id
->qp
, ®_wr
->wr
, &bad_wr
);
413 seg1
->mr_rkey
= mr
->rkey
;
414 seg1
->mr_base
= mr
->iova
;
415 seg1
->mr_nsegs
= mw
->mw_nents
;
416 seg1
->mr_len
= mr
->length
;
421 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
422 mw
->mw_sg
, mw
->mw_nents
);
423 rpcrdma_defer_mr_recovery(mw
);
427 pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
428 frmr
->fr_mr
, n
, mw
->mw_nents
);
429 rpcrdma_defer_mr_recovery(mw
);
433 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc
);
434 rpcrdma_defer_mr_recovery(mw
);
438 static struct ib_send_wr
*
439 __frwr_prepare_linv_wr(struct rpcrdma_mr_seg
*seg
)
441 struct rpcrdma_mw
*mw
= seg
->rl_mw
;
442 struct rpcrdma_frmr
*f
= &mw
->frmr
;
443 struct ib_send_wr
*invalidate_wr
;
445 f
->fr_state
= FRMR_IS_INVALID
;
446 invalidate_wr
= &f
->fr_invwr
;
448 memset(invalidate_wr
, 0, sizeof(*invalidate_wr
));
449 f
->fr_cqe
.done
= frwr_wc_localinv
;
450 invalidate_wr
->wr_cqe
= &f
->fr_cqe
;
451 invalidate_wr
->opcode
= IB_WR_LOCAL_INV
;
452 invalidate_wr
->ex
.invalidate_rkey
= f
->fr_mr
->rkey
;
454 return invalidate_wr
;
457 /* Invalidate all memory regions that were registered for "req".
459 * Sleeps until it is safe for the host CPU to access the
460 * previously mapped memory regions.
463 frwr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
465 struct ib_send_wr
*invalidate_wrs
, *pos
, *prev
, *bad_wr
;
466 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
467 struct rpcrdma_mr_seg
*seg
;
468 unsigned int i
, nchunks
;
469 struct rpcrdma_frmr
*f
;
470 struct rpcrdma_mw
*mw
;
473 dprintk("RPC: %s: req %p\n", __func__
, req
);
475 /* ORDER: Invalidate all of the req's MRs first
477 * Chain the LOCAL_INV Work Requests and post them with
478 * a single ib_post_send() call.
480 invalidate_wrs
= pos
= prev
= NULL
;
482 for (i
= 0, nchunks
= req
->rl_nchunks
; nchunks
; nchunks
--) {
483 seg
= &req
->rl_segments
[i
];
485 pos
= __frwr_prepare_linv_wr(seg
);
488 invalidate_wrs
= pos
;
495 f
= &seg
->rl_mw
->frmr
;
497 /* Strong send queue ordering guarantees that when the
498 * last WR in the chain completes, all WRs in the chain
501 f
->fr_invwr
.send_flags
= IB_SEND_SIGNALED
;
502 f
->fr_cqe
.done
= frwr_wc_localinv_wake
;
503 reinit_completion(&f
->fr_linv_done
);
504 INIT_CQCOUNT(&r_xprt
->rx_ep
);
506 /* Transport disconnect drains the receive CQ before it
507 * replaces the QP. The RPC reply handler won't call us
508 * unless ri_id->qp is a valid pointer.
510 rc
= ib_post_send(ia
->ri_id
->qp
, invalidate_wrs
, &bad_wr
);
514 wait_for_completion(&f
->fr_linv_done
);
516 /* ORDER: Now DMA unmap all of the req's MRs, and return
517 * them to the free MW list.
520 for (i
= 0, nchunks
= req
->rl_nchunks
; nchunks
; nchunks
--) {
521 seg
= &req
->rl_segments
[i
];
525 ib_dma_unmap_sg(ia
->ri_device
,
526 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
527 rpcrdma_put_mw(r_xprt
, mw
);
537 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc
);
538 rdma_disconnect(ia
->ri_id
);
540 /* Find and reset the MRs in the LOCAL_INV WRs that did not
541 * get posted. This is synchronous, and slow.
543 for (i
= 0, nchunks
= req
->rl_nchunks
; nchunks
; nchunks
--) {
544 seg
= &req
->rl_segments
[i
];
548 if (mw
->frmr
.fr_mr
->rkey
== bad_wr
->ex
.invalidate_rkey
) {
549 __frwr_reset_mr(ia
, mw
);
550 bad_wr
= bad_wr
->next
;
558 /* Use a slow, safe mechanism to invalidate all memory regions
559 * that were registered for "req".
562 frwr_op_unmap_safe(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
565 struct rpcrdma_mr_seg
*seg
;
566 struct rpcrdma_mw
*mw
;
569 for (i
= 0; req
->rl_nchunks
; req
->rl_nchunks
--) {
570 seg
= &req
->rl_segments
[i
];
574 frwr_op_recover_mr(mw
);
576 rpcrdma_defer_mr_recovery(mw
);
584 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops
= {
585 .ro_map
= frwr_op_map
,
586 .ro_unmap_sync
= frwr_op_unmap_sync
,
587 .ro_unmap_safe
= frwr_op_unmap_safe
,
588 .ro_recover_mr
= frwr_op_recover_mr
,
589 .ro_open
= frwr_op_open
,
590 .ro_maxpages
= frwr_op_maxpages
,
591 .ro_init_mr
= frwr_op_init_mr
,
592 .ro_release_mr
= frwr_op_release_mr
,
593 .ro_displayname
= "frwr",