2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* No-op chunk preparation. All client memory is pre-registered.
7 * Sometimes referred to as ALLPHYSICAL mode.
9 * Physical registration is simple because all client memory is
10 * pre-registered and never deregistered. This mode is good for
11 * adapter bring up, but is considered not safe: the server is
12 * trusted not to abuse its access to client memory not involved
16 #include "xprt_rdma.h"
18 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19 # define RPCDBG_FACILITY RPCDBG_TRANS
23 physical_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
24 struct rpcrdma_create_data_internal
*cdata
)
28 /* Obtain an rkey to use for RPC data payloads.
30 mr
= ib_get_dma_mr(ia
->ri_pd
,
31 IB_ACCESS_LOCAL_WRITE
|
32 IB_ACCESS_REMOTE_WRITE
|
33 IB_ACCESS_REMOTE_READ
);
35 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
36 __func__
, PTR_ERR(mr
));
41 rpcrdma_set_max_header_sizes(ia
, cdata
, min_t(unsigned int,
42 RPCRDMA_MAX_DATA_SEGS
,
43 RPCRDMA_MAX_HDR_SEGS
));
47 /* PHYSICAL memory registration conveys one page per chunk segment.
50 physical_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
52 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
53 RPCRDMA_MAX_HDR_SEGS
);
57 physical_op_init(struct rpcrdma_xprt
*r_xprt
)
62 /* The client's physical memory is already exposed for
63 * remote access via RDMA READ or RDMA WRITE.
66 physical_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
67 int nsegs
, bool writing
)
69 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
71 rpcrdma_map_one(ia
->ri_device
, seg
, rpcrdma_data_dir(writing
));
72 seg
->mr_rkey
= ia
->ri_dma_mr
->rkey
;
73 seg
->mr_base
= seg
->mr_dma
;
77 /* DMA unmap all memory regions that were mapped for "req".
80 physical_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
82 struct ib_device
*device
= r_xprt
->rx_ia
.ri_device
;
85 for (i
= 0; req
->rl_nchunks
; --req
->rl_nchunks
)
86 rpcrdma_unmap_one(device
, &req
->rl_segments
[i
++]);
89 /* Use a slow, safe mechanism to invalidate all memory regions
90 * that were registered for "req".
92 * For physical memory registration, there is no good way to
93 * fence a single MR that has been advertised to the server. The
94 * client has already handed the server an R_key that cannot be
95 * invalidated and is shared by all MRs on this connection.
96 * Tearing down the PD might be the only safe choice, but it's
97 * not clear that a freshly acquired DMA R_key would be different
98 * than the one used by the PD that was just destroyed.
102 physical_op_unmap_safe(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
105 physical_op_unmap_sync(r_xprt
, req
);
109 physical_op_destroy(struct rpcrdma_buffer
*buf
)
113 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops
= {
114 .ro_map
= physical_op_map
,
115 .ro_unmap_sync
= physical_op_unmap_sync
,
116 .ro_unmap_safe
= physical_op_unmap_safe
,
117 .ro_open
= physical_op_open
,
118 .ro_maxpages
= physical_op_maxpages
,
119 .ro_init
= physical_op_init
,
120 .ro_destroy
= physical_op_destroy
,
121 .ro_displayname
= "physical",