xprtrdma: Limit number of RDMA segments in RPC-over-RDMA headers
[deliverable/linux.git] / net / sunrpc / xprtrdma / physical_ops.c
CommitLineData
a0ce85f5
CL
1/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* No-op chunk preparation. All client memory is pre-registered.
7 * Sometimes referred to as ALLPHYSICAL mode.
8 *
9 * Physical registration is simple because all client memory is
10 * pre-registered and never deregistered. This mode is good for
11 * adapter bring up, but is considered not safe: the server is
12 * trusted not to abuse its access to client memory not involved
13 * in RDMA I/O.
14 */
15
16#include "xprt_rdma.h"
17
18#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19# define RPCDBG_FACILITY RPCDBG_TRANS
20#endif
21
3968cb58
CL
22static int
23physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata)
25{
d1ed857e
CL
26 struct ib_mr *mr;
27
28 /* Obtain an rkey to use for RPC data payloads.
29 */
30 mr = ib_get_dma_mr(ia->ri_pd,
31 IB_ACCESS_LOCAL_WRITE |
32 IB_ACCESS_REMOTE_WRITE |
33 IB_ACCESS_REMOTE_READ);
34 if (IS_ERR(mr)) {
35 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
36 __func__, PTR_ERR(mr));
37 return -ENOMEM;
38 }
d1ed857e 39
bb6c96d7 40 ia->ri_dma_mr = mr;
3968cb58
CL
41 return 0;
42}
43
1c9351ee
CL
44/* PHYSICAL memory registration conveys one page per chunk segment.
45 */
46static size_t
47physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
48{
49 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
94931746 50 RPCRDMA_MAX_HDR_SEGS);
1c9351ee
CL
51}
52
91e70e70
CL
53static int
54physical_op_init(struct rpcrdma_xprt *r_xprt)
55{
56 return 0;
57}
58
9c1b4d77
CL
59/* The client's physical memory is already exposed for
60 * remote access via RDMA READ or RDMA WRITE.
61 */
62static int
63physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
64 int nsegs, bool writing)
65{
66 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
67
89e0d112 68 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
d1ed857e 69 seg->mr_rkey = ia->ri_dma_mr->rkey;
9c1b4d77 70 seg->mr_base = seg->mr_dma;
9c1b4d77
CL
71 return 1;
72}
73
6814baea
CL
74/* Unmap a memory region, but leave it registered.
75 */
76static int
77physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
78{
d654788e
CL
79 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
80
89e0d112 81 rpcrdma_unmap_one(ia->ri_device, seg);
6814baea
CL
82 return 1;
83}
84
73eee9b2
CL
85/* DMA unmap all memory regions that were mapped for "req".
86 */
87static void
88physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
89{
90 struct ib_device *device = r_xprt->rx_ia.ri_device;
91 unsigned int i;
92
93 for (i = 0; req->rl_nchunks; --req->rl_nchunks)
94 rpcrdma_unmap_one(device, &req->rl_segments[i++]);
95}
96
4561f347
CL
97static void
98physical_op_destroy(struct rpcrdma_buffer *buf)
99{
100}
101
a0ce85f5 102const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
9c1b4d77 103 .ro_map = physical_op_map,
73eee9b2 104 .ro_unmap_sync = physical_op_unmap_sync,
6814baea 105 .ro_unmap = physical_op_unmap,
3968cb58 106 .ro_open = physical_op_open,
1c9351ee 107 .ro_maxpages = physical_op_maxpages,
91e70e70 108 .ro_init = physical_op_init,
4561f347 109 .ro_destroy = physical_op_destroy,
a0ce85f5
CL
110 .ro_displayname = "physical",
111};
This page took 0.063806 seconds and 5 git commands to generate.