3750596cc432038ca524604e43479eb0af2012c5
[deliverable/linux.git] / net / sunrpc / xprtrdma / physical_ops.c
1 /*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6 /* No-op chunk preparation. All client memory is pre-registered.
7 * Sometimes referred to as ALLPHYSICAL mode.
8 *
9 * Physical registration is simple because all client memory is
10 * pre-registered and never deregistered. This mode is good for
11 * adapter bring up, but is considered not safe: the server is
12 * trusted not to abuse its access to client memory not involved
13 * in RDMA I/O.
14 */
15
16 #include "xprt_rdma.h"
17
18 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19 # define RPCDBG_FACILITY RPCDBG_TRANS
20 #endif
21
22 static int
23 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata)
25 {
26 struct ib_mr *mr;
27
28 /* Obtain an rkey to use for RPC data payloads.
29 */
30 mr = ib_get_dma_mr(ia->ri_pd,
31 IB_ACCESS_LOCAL_WRITE |
32 IB_ACCESS_REMOTE_WRITE |
33 IB_ACCESS_REMOTE_READ);
34 if (IS_ERR(mr)) {
35 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
36 __func__, PTR_ERR(mr));
37 return -ENOMEM;
38 }
39 ia->ri_dma_mr = mr;
40
41 rpcrdma_set_max_header_sizes(ia, cdata, min_t(unsigned int,
42 RPCRDMA_MAX_DATA_SEGS,
43 RPCRDMA_MAX_HDR_SEGS));
44 return 0;
45 }
46
47 /* PHYSICAL memory registration conveys one page per chunk segment.
48 */
49 static size_t
50 physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
51 {
52 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
53 RPCRDMA_MAX_HDR_SEGS);
54 }
55
56 static int
57 physical_op_init(struct rpcrdma_xprt *r_xprt)
58 {
59 return 0;
60 }
61
62 /* The client's physical memory is already exposed for
63 * remote access via RDMA READ or RDMA WRITE.
64 */
65 static int
66 physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
67 int nsegs, bool writing)
68 {
69 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
70
71 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
72 seg->mr_rkey = ia->ri_dma_mr->rkey;
73 seg->mr_base = seg->mr_dma;
74 return 1;
75 }
76
77 /* DMA unmap all memory regions that were mapped for "req".
78 */
79 static void
80 physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
81 {
82 struct ib_device *device = r_xprt->rx_ia.ri_device;
83 unsigned int i;
84
85 for (i = 0; req->rl_nchunks; --req->rl_nchunks)
86 rpcrdma_unmap_one(device, &req->rl_segments[i++]);
87 }
88
89 /* Use a slow, safe mechanism to invalidate all memory regions
90 * that were registered for "req".
91 *
92 * For physical memory registration, there is no good way to
93 * fence a single MR that has been advertised to the server. The
94 * client has already handed the server an R_key that cannot be
95 * invalidated and is shared by all MRs on this connection.
96 * Tearing down the PD might be the only safe choice, but it's
97 * not clear that a freshly acquired DMA R_key would be different
98 * than the one used by the PD that was just destroyed.
99 * FIXME.
100 */
101 static void
102 physical_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
103 bool sync)
104 {
105 physical_op_unmap_sync(r_xprt, req);
106 }
107
108 static void
109 physical_op_destroy(struct rpcrdma_buffer *buf)
110 {
111 }
112
113 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
114 .ro_map = physical_op_map,
115 .ro_unmap_sync = physical_op_unmap_sync,
116 .ro_unmap_safe = physical_op_unmap_safe,
117 .ro_open = physical_op_open,
118 .ro_maxpages = physical_op_maxpages,
119 .ro_init = physical_op_init,
120 .ro_destroy = physical_op_destroy,
121 .ro_displayname = "physical",
122 };
This page took 0.031624 seconds and 4 git commands to generate.