Commit | Line | Data |
---|---|---|
f58851e6 TT |
1 | /* |
2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | */ | |
39 | ||
40 | /* | |
41 | * transport.c | |
42 | * | |
43 | * This file contains the top-level implementation of an RPC RDMA | |
44 | * transport. | |
45 | * | |
46 | * Naming convention: functions beginning with xprt_ are part of the | |
47 | * transport switch. All others are RPC RDMA internal. | |
48 | */ | |
49 | ||
50 | #include <linux/module.h> | |
51 | #include <linux/init.h> | |
5a0e3ad6 | 52 | #include <linux/slab.h> |
f58851e6 | 53 | #include <linux/seq_file.h> |
5976687a | 54 | #include <linux/sunrpc/addr.h> |
f58851e6 TT |
55 | |
56 | #include "xprt_rdma.h" | |
57 | ||
58 | #ifdef RPC_DEBUG | |
59 | # define RPCDBG_FACILITY RPCDBG_TRANS | |
60 | #endif | |
61 | ||
62 | MODULE_LICENSE("Dual BSD/GPL"); | |
63 | ||
64 | MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS"); | |
65 | MODULE_AUTHOR("Network Appliance, Inc."); | |
66 | ||
67 | /* | |
68 | * tunables | |
69 | */ | |
70 | ||
71 | static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; | |
72 | static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; | |
73 | static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; | |
74 | static unsigned int xprt_rdma_inline_write_padding; | |
3197d309 | 75 | static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; |
9191ca3b | 76 | int xprt_rdma_pad_optimize = 0; |
f58851e6 TT |
77 | |
78 | #ifdef RPC_DEBUG | |
79 | ||
80 | static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; | |
81 | static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; | |
82 | static unsigned int zero; | |
83 | static unsigned int max_padding = PAGE_SIZE; | |
84 | static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; | |
85 | static unsigned int max_memreg = RPCRDMA_LAST - 1; | |
86 | ||
87 | static struct ctl_table_header *sunrpc_table_header; | |
88 | ||
89 | static ctl_table xr_tunables_table[] = { | |
90 | { | |
f58851e6 TT |
91 | .procname = "rdma_slot_table_entries", |
92 | .data = &xprt_rdma_slot_table_entries, | |
93 | .maxlen = sizeof(unsigned int), | |
94 | .mode = 0644, | |
6d456111 | 95 | .proc_handler = proc_dointvec_minmax, |
f58851e6 TT |
96 | .extra1 = &min_slot_table_size, |
97 | .extra2 = &max_slot_table_size | |
98 | }, | |
99 | { | |
f58851e6 TT |
100 | .procname = "rdma_max_inline_read", |
101 | .data = &xprt_rdma_max_inline_read, | |
102 | .maxlen = sizeof(unsigned int), | |
103 | .mode = 0644, | |
6d456111 | 104 | .proc_handler = proc_dointvec, |
f58851e6 TT |
105 | }, |
106 | { | |
f58851e6 TT |
107 | .procname = "rdma_max_inline_write", |
108 | .data = &xprt_rdma_max_inline_write, | |
109 | .maxlen = sizeof(unsigned int), | |
110 | .mode = 0644, | |
6d456111 | 111 | .proc_handler = proc_dointvec, |
f58851e6 TT |
112 | }, |
113 | { | |
f58851e6 TT |
114 | .procname = "rdma_inline_write_padding", |
115 | .data = &xprt_rdma_inline_write_padding, | |
116 | .maxlen = sizeof(unsigned int), | |
117 | .mode = 0644, | |
6d456111 | 118 | .proc_handler = proc_dointvec_minmax, |
f58851e6 TT |
119 | .extra1 = &zero, |
120 | .extra2 = &max_padding, | |
121 | }, | |
122 | { | |
f58851e6 TT |
123 | .procname = "rdma_memreg_strategy", |
124 | .data = &xprt_rdma_memreg_strategy, | |
125 | .maxlen = sizeof(unsigned int), | |
126 | .mode = 0644, | |
6d456111 | 127 | .proc_handler = proc_dointvec_minmax, |
f58851e6 TT |
128 | .extra1 = &min_memreg, |
129 | .extra2 = &max_memreg, | |
130 | }, | |
9191ca3b | 131 | { |
9191ca3b TT |
132 | .procname = "rdma_pad_optimize", |
133 | .data = &xprt_rdma_pad_optimize, | |
134 | .maxlen = sizeof(unsigned int), | |
135 | .mode = 0644, | |
6d456111 | 136 | .proc_handler = proc_dointvec, |
9191ca3b | 137 | }, |
f8572d8f | 138 | { }, |
f58851e6 TT |
139 | }; |
140 | ||
141 | static ctl_table sunrpc_table[] = { | |
142 | { | |
f58851e6 TT |
143 | .procname = "sunrpc", |
144 | .mode = 0555, | |
145 | .child = xr_tunables_table | |
146 | }, | |
f8572d8f | 147 | { }, |
f58851e6 TT |
148 | }; |
149 | ||
150 | #endif | |
151 | ||
152 | static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */ | |
153 | ||
154 | static void | |
155 | xprt_rdma_format_addresses(struct rpc_xprt *xprt) | |
156 | { | |
c877b849 | 157 | struct sockaddr *sap = (struct sockaddr *) |
f58851e6 | 158 | &rpcx_to_rdmad(xprt).addr; |
c877b849 CL |
159 | struct sockaddr_in *sin = (struct sockaddr_in *)sap; |
160 | char buf[64]; | |
f58851e6 | 161 | |
c877b849 CL |
162 | (void)rpc_ntop(sap, buf, sizeof(buf)); |
163 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); | |
f58851e6 | 164 | |
81160e66 | 165 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); |
c877b849 | 166 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); |
f58851e6 TT |
167 | |
168 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; | |
169 | ||
81160e66 | 170 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); |
c877b849 CL |
171 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); |
172 | ||
81160e66 | 173 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); |
c877b849 | 174 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); |
f58851e6 | 175 | |
f58851e6 TT |
176 | /* netid */ |
177 | xprt->address_strings[RPC_DISPLAY_NETID] = "rdma"; | |
178 | } | |
179 | ||
180 | static void | |
181 | xprt_rdma_free_addresses(struct rpc_xprt *xprt) | |
182 | { | |
33e01dc7 CL |
183 | unsigned int i; |
184 | ||
185 | for (i = 0; i < RPC_DISPLAY_MAX; i++) | |
186 | switch (i) { | |
187 | case RPC_DISPLAY_PROTO: | |
188 | case RPC_DISPLAY_NETID: | |
189 | continue; | |
190 | default: | |
191 | kfree(xprt->address_strings[i]); | |
192 | } | |
f58851e6 TT |
193 | } |
194 | ||
195 | static void | |
196 | xprt_rdma_connect_worker(struct work_struct *work) | |
197 | { | |
198 | struct rpcrdma_xprt *r_xprt = | |
199 | container_of(work, struct rpcrdma_xprt, rdma_connect.work); | |
200 | struct rpc_xprt *xprt = &r_xprt->xprt; | |
201 | int rc = 0; | |
202 | ||
d19751e7 TM |
203 | current->flags |= PF_FSTRANS; |
204 | xprt_clear_connected(xprt); | |
205 | ||
206 | dprintk("RPC: %s: %sconnect\n", __func__, | |
207 | r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); | |
208 | rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); | |
209 | if (rc) | |
210 | xprt_wake_pending_tasks(xprt, rc); | |
211 | ||
f58851e6 TT |
212 | dprintk("RPC: %s: exit\n", __func__); |
213 | xprt_clear_connecting(xprt); | |
5cf02d09 | 214 | current->flags &= ~PF_FSTRANS; |
f58851e6 TT |
215 | } |
216 | ||
217 | /* | |
218 | * xprt_rdma_destroy | |
219 | * | |
220 | * Destroy the xprt. | |
221 | * Free all memory associated with the object, including its own. | |
222 | * NOTE: none of the *destroy methods free memory for their top-level | |
223 | * objects, even though they may have allocated it (they do free | |
224 | * private memory). It's up to the caller to handle it. In this | |
225 | * case (RDMA transport), all structure memory is inlined with the | |
226 | * struct rpcrdma_xprt. | |
227 | */ | |
228 | static void | |
229 | xprt_rdma_destroy(struct rpc_xprt *xprt) | |
230 | { | |
231 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | |
232 | int rc; | |
233 | ||
234 | dprintk("RPC: %s: called\n", __func__); | |
235 | ||
a25e758c | 236 | cancel_delayed_work_sync(&r_xprt->rdma_connect); |
f58851e6 TT |
237 | |
238 | xprt_clear_connected(xprt); | |
239 | ||
240 | rpcrdma_buffer_destroy(&r_xprt->rx_buf); | |
241 | rc = rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); | |
242 | if (rc) | |
243 | dprintk("RPC: %s: rpcrdma_ep_destroy returned %i\n", | |
244 | __func__, rc); | |
245 | rpcrdma_ia_close(&r_xprt->rx_ia); | |
246 | ||
247 | xprt_rdma_free_addresses(xprt); | |
248 | ||
e204e621 | 249 | xprt_free(xprt); |
f58851e6 TT |
250 | |
251 | dprintk("RPC: %s: returning\n", __func__); | |
252 | ||
253 | module_put(THIS_MODULE); | |
254 | } | |
255 | ||
2881ae74 TM |
256 | static const struct rpc_timeout xprt_rdma_default_timeout = { |
257 | .to_initval = 60 * HZ, | |
258 | .to_maxval = 60 * HZ, | |
259 | }; | |
260 | ||
f58851e6 TT |
261 | /** |
262 | * xprt_setup_rdma - Set up transport to use RDMA | |
263 | * | |
264 | * @args: rpc transport arguments | |
265 | */ | |
266 | static struct rpc_xprt * | |
267 | xprt_setup_rdma(struct xprt_create *args) | |
268 | { | |
269 | struct rpcrdma_create_data_internal cdata; | |
270 | struct rpc_xprt *xprt; | |
271 | struct rpcrdma_xprt *new_xprt; | |
272 | struct rpcrdma_ep *new_ep; | |
273 | struct sockaddr_in *sin; | |
274 | int rc; | |
275 | ||
276 | if (args->addrlen > sizeof(xprt->addr)) { | |
277 | dprintk("RPC: %s: address too large\n", __func__); | |
278 | return ERR_PTR(-EBADF); | |
279 | } | |
280 | ||
37aa2133 | 281 | xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), |
d9ba131d | 282 | xprt_rdma_slot_table_entries, |
bd1722d4 | 283 | xprt_rdma_slot_table_entries); |
f58851e6 TT |
284 | if (xprt == NULL) { |
285 | dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", | |
286 | __func__); | |
287 | return ERR_PTR(-ENOMEM); | |
288 | } | |
289 | ||
f58851e6 | 290 | /* 60 second timeout, no retries */ |
ba7392bb | 291 | xprt->timeout = &xprt_rdma_default_timeout; |
f58851e6 | 292 | xprt->bind_timeout = (60U * HZ); |
f58851e6 TT |
293 | xprt->reestablish_timeout = (5U * HZ); |
294 | xprt->idle_timeout = (5U * 60 * HZ); | |
295 | ||
296 | xprt->resvport = 0; /* privileged port not needed */ | |
297 | xprt->tsh_size = 0; /* RPC-RDMA handles framing */ | |
298 | xprt->max_payload = RPCRDMA_MAX_DATA_SEGS * PAGE_SIZE; | |
299 | xprt->ops = &xprt_rdma_procs; | |
300 | ||
301 | /* | |
302 | * Set up RDMA-specific connect data. | |
303 | */ | |
304 | ||
305 | /* Put server RDMA address in local cdata */ | |
306 | memcpy(&cdata.addr, args->dstaddr, args->addrlen); | |
307 | ||
308 | /* Ensure xprt->addr holds valid server TCP (not RDMA) | |
309 | * address, for any side protocols which peek at it */ | |
310 | xprt->prot = IPPROTO_TCP; | |
311 | xprt->addrlen = args->addrlen; | |
312 | memcpy(&xprt->addr, &cdata.addr, xprt->addrlen); | |
313 | ||
314 | sin = (struct sockaddr_in *)&cdata.addr; | |
315 | if (ntohs(sin->sin_port) != 0) | |
316 | xprt_set_bound(xprt); | |
317 | ||
21454aaa HH |
318 | dprintk("RPC: %s: %pI4:%u\n", |
319 | __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port)); | |
f58851e6 TT |
320 | |
321 | /* Set max requests */ | |
322 | cdata.max_requests = xprt->max_reqs; | |
323 | ||
324 | /* Set some length limits */ | |
325 | cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ | |
326 | cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ | |
327 | ||
328 | cdata.inline_wsize = xprt_rdma_max_inline_write; | |
329 | if (cdata.inline_wsize > cdata.wsize) | |
330 | cdata.inline_wsize = cdata.wsize; | |
331 | ||
332 | cdata.inline_rsize = xprt_rdma_max_inline_read; | |
333 | if (cdata.inline_rsize > cdata.rsize) | |
334 | cdata.inline_rsize = cdata.rsize; | |
335 | ||
336 | cdata.padding = xprt_rdma_inline_write_padding; | |
337 | ||
338 | /* | |
339 | * Create new transport instance, which includes initialized | |
340 | * o ia | |
341 | * o endpoint | |
342 | * o buffers | |
343 | */ | |
344 | ||
345 | new_xprt = rpcx_to_rdmax(xprt); | |
346 | ||
347 | rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr, | |
348 | xprt_rdma_memreg_strategy); | |
349 | if (rc) | |
350 | goto out1; | |
351 | ||
352 | /* | |
353 | * initialize and create ep | |
354 | */ | |
355 | new_xprt->rx_data = cdata; | |
356 | new_ep = &new_xprt->rx_ep; | |
357 | new_ep->rep_remote_addr = cdata.addr; | |
358 | ||
359 | rc = rpcrdma_ep_create(&new_xprt->rx_ep, | |
360 | &new_xprt->rx_ia, &new_xprt->rx_data); | |
361 | if (rc) | |
362 | goto out2; | |
363 | ||
364 | /* | |
365 | * Allocate pre-registered send and receive buffers for headers and | |
366 | * any inline data. Also specify any padding which will be provided | |
367 | * from a preregistered zero buffer. | |
368 | */ | |
369 | rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia, | |
370 | &new_xprt->rx_data); | |
371 | if (rc) | |
372 | goto out3; | |
373 | ||
374 | /* | |
375 | * Register a callback for connection events. This is necessary because | |
376 | * connection loss notification is async. We also catch connection loss | |
377 | * when reaping receives. | |
378 | */ | |
379 | INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker); | |
380 | new_ep->rep_func = rpcrdma_conn_func; | |
381 | new_ep->rep_xprt = xprt; | |
382 | ||
383 | xprt_rdma_format_addresses(xprt); | |
384 | ||
385 | if (!try_module_get(THIS_MODULE)) | |
386 | goto out4; | |
387 | ||
388 | return xprt; | |
389 | ||
390 | out4: | |
391 | xprt_rdma_free_addresses(xprt); | |
392 | rc = -EINVAL; | |
393 | out3: | |
394 | (void) rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia); | |
395 | out2: | |
396 | rpcrdma_ia_close(&new_xprt->rx_ia); | |
397 | out1: | |
e204e621 | 398 | xprt_free(xprt); |
f58851e6 TT |
399 | return ERR_PTR(rc); |
400 | } | |
401 | ||
402 | /* | |
403 | * Close a connection, during shutdown or timeout/reconnect | |
404 | */ | |
405 | static void | |
406 | xprt_rdma_close(struct rpc_xprt *xprt) | |
407 | { | |
408 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | |
409 | ||
410 | dprintk("RPC: %s: closing\n", __func__); | |
08ca0dce TT |
411 | if (r_xprt->rx_ep.rep_connected > 0) |
412 | xprt->reestablish_timeout = 0; | |
62da3b24 | 413 | xprt_disconnect_done(xprt); |
f58851e6 TT |
414 | (void) rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia); |
415 | } | |
416 | ||
417 | static void | |
418 | xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) | |
419 | { | |
420 | struct sockaddr_in *sap; | |
421 | ||
422 | sap = (struct sockaddr_in *)&xprt->addr; | |
423 | sap->sin_port = htons(port); | |
424 | sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr; | |
425 | sap->sin_port = htons(port); | |
426 | dprintk("RPC: %s: %u\n", __func__, port); | |
427 | } | |
428 | ||
429 | static void | |
1b092092 | 430 | xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) |
f58851e6 | 431 | { |
f58851e6 TT |
432 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
433 | ||
0b9e7943 TM |
434 | if (r_xprt->rx_ep.rep_connected != 0) { |
435 | /* Reconnect */ | |
436 | schedule_delayed_work(&r_xprt->rdma_connect, | |
437 | xprt->reestablish_timeout); | |
438 | xprt->reestablish_timeout <<= 1; | |
439 | if (xprt->reestablish_timeout > (30 * HZ)) | |
440 | xprt->reestablish_timeout = (30 * HZ); | |
441 | else if (xprt->reestablish_timeout < (5 * HZ)) | |
442 | xprt->reestablish_timeout = (5 * HZ); | |
443 | } else { | |
444 | schedule_delayed_work(&r_xprt->rdma_connect, 0); | |
445 | if (!RPC_IS_ASYNC(task)) | |
a25e758c | 446 | flush_delayed_work(&r_xprt->rdma_connect); |
f58851e6 TT |
447 | } |
448 | } | |
449 | ||
450 | static int | |
43cedbf0 | 451 | xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
f58851e6 | 452 | { |
f58851e6 TT |
453 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
454 | int credits = atomic_read(&r_xprt->rx_buf.rb_credits); | |
455 | ||
456 | /* == RPC_CWNDSCALE @ init, but *after* setup */ | |
457 | if (r_xprt->rx_buf.rb_cwndscale == 0UL) { | |
458 | r_xprt->rx_buf.rb_cwndscale = xprt->cwnd; | |
459 | dprintk("RPC: %s: cwndscale %lu\n", __func__, | |
460 | r_xprt->rx_buf.rb_cwndscale); | |
461 | BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); | |
462 | } | |
463 | xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; | |
43cedbf0 | 464 | return xprt_reserve_xprt_cong(xprt, task); |
f58851e6 TT |
465 | } |
466 | ||
467 | /* | |
468 | * The RDMA allocate/free functions need the task structure as a place | |
469 | * to hide the struct rpcrdma_req, which is necessary for the actual send/recv | |
470 | * sequence. For this reason, the recv buffers are attached to send | |
471 | * buffers for portions of the RPC. Note that the RPC layer allocates | |
472 | * both send and receive buffers in the same call. We may register | |
473 | * the receive buffer portion when using reply chunks. | |
474 | */ | |
475 | static void * | |
476 | xprt_rdma_allocate(struct rpc_task *task, size_t size) | |
477 | { | |
a4f0835c | 478 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
f58851e6 TT |
479 | struct rpcrdma_req *req, *nreq; |
480 | ||
481 | req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf); | |
482 | BUG_ON(NULL == req); | |
483 | ||
484 | if (size > req->rl_size) { | |
485 | dprintk("RPC: %s: size %zd too large for buffer[%zd]: " | |
486 | "prog %d vers %d proc %d\n", | |
487 | __func__, size, req->rl_size, | |
488 | task->tk_client->cl_prog, task->tk_client->cl_vers, | |
489 | task->tk_msg.rpc_proc->p_proc); | |
490 | /* | |
491 | * Outgoing length shortage. Our inline write max must have | |
492 | * been configured to perform direct i/o. | |
493 | * | |
494 | * This is therefore a large metadata operation, and the | |
495 | * allocate call was made on the maximum possible message, | |
496 | * e.g. containing long filename(s) or symlink data. In | |
497 | * fact, while these metadata operations *might* carry | |
498 | * large outgoing payloads, they rarely *do*. However, we | |
499 | * have to commit to the request here, so reallocate and | |
500 | * register it now. The data path will never require this | |
501 | * reallocation. | |
502 | * | |
503 | * If the allocation or registration fails, the RPC framework | |
504 | * will (doggedly) retry. | |
505 | */ | |
506 | if (rpcx_to_rdmax(xprt)->rx_ia.ri_memreg_strategy == | |
507 | RPCRDMA_BOUNCEBUFFERS) { | |
508 | /* forced to "pure inline" */ | |
509 | dprintk("RPC: %s: too much data (%zd) for inline " | |
510 | "(r/w max %d/%d)\n", __func__, size, | |
511 | rpcx_to_rdmad(xprt).inline_rsize, | |
512 | rpcx_to_rdmad(xprt).inline_wsize); | |
513 | size = req->rl_size; | |
514 | rpc_exit(task, -EIO); /* fail the operation */ | |
515 | rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++; | |
516 | goto out; | |
517 | } | |
518 | if (task->tk_flags & RPC_TASK_SWAPPER) | |
519 | nreq = kmalloc(sizeof *req + size, GFP_ATOMIC); | |
520 | else | |
521 | nreq = kmalloc(sizeof *req + size, GFP_NOFS); | |
522 | if (nreq == NULL) | |
523 | goto outfail; | |
524 | ||
525 | if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia, | |
526 | nreq->rl_base, size + sizeof(struct rpcrdma_req) | |
527 | - offsetof(struct rpcrdma_req, rl_base), | |
528 | &nreq->rl_handle, &nreq->rl_iov)) { | |
529 | kfree(nreq); | |
530 | goto outfail; | |
531 | } | |
532 | rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size; | |
533 | nreq->rl_size = size; | |
534 | nreq->rl_niovs = 0; | |
535 | nreq->rl_nchunks = 0; | |
536 | nreq->rl_buffer = (struct rpcrdma_buffer *)req; | |
537 | nreq->rl_reply = req->rl_reply; | |
538 | memcpy(nreq->rl_segments, | |
539 | req->rl_segments, sizeof nreq->rl_segments); | |
540 | /* flag the swap with an unused field */ | |
541 | nreq->rl_iov.length = 0; | |
542 | req->rl_reply = NULL; | |
543 | req = nreq; | |
544 | } | |
545 | dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); | |
546 | out: | |
575448bd | 547 | req->rl_connect_cookie = 0; /* our reserved value */ |
f58851e6 TT |
548 | return req->rl_xdr_buf; |
549 | ||
550 | outfail: | |
551 | rpcrdma_buffer_put(req); | |
552 | rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++; | |
553 | return NULL; | |
554 | } | |
555 | ||
556 | /* | |
557 | * This function returns all RDMA resources to the pool. | |
558 | */ | |
559 | static void | |
560 | xprt_rdma_free(void *buffer) | |
561 | { | |
562 | struct rpcrdma_req *req; | |
563 | struct rpcrdma_xprt *r_xprt; | |
564 | struct rpcrdma_rep *rep; | |
565 | int i; | |
566 | ||
567 | if (buffer == NULL) | |
568 | return; | |
569 | ||
570 | req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]); | |
ee1a2c56 TT |
571 | if (req->rl_iov.length == 0) { /* see allocate above */ |
572 | r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer, | |
573 | struct rpcrdma_xprt, rx_buf); | |
574 | } else | |
575 | r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); | |
f58851e6 TT |
576 | rep = req->rl_reply; |
577 | ||
578 | dprintk("RPC: %s: called on 0x%p%s\n", | |
579 | __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : ""); | |
580 | ||
581 | /* | |
582 | * Finish the deregistration. When using mw bind, this was | |
583 | * begun in rpcrdma_reply_handler(). In all other modes, we | |
584 | * do it here, in thread context. The process is considered | |
585 | * complete when the rr_func vector becomes NULL - this | |
586 | * was put in place during rpcrdma_reply_handler() - the wait | |
587 | * call below will not block if the dereg is "done". If | |
588 | * interrupted, our framework will clean up. | |
589 | */ | |
590 | for (i = 0; req->rl_nchunks;) { | |
591 | --req->rl_nchunks; | |
592 | i += rpcrdma_deregister_external( | |
593 | &req->rl_segments[i], r_xprt, NULL); | |
594 | } | |
595 | ||
596 | if (rep && wait_event_interruptible(rep->rr_unbind, !rep->rr_func)) { | |
597 | rep->rr_func = NULL; /* abandon the callback */ | |
598 | req->rl_reply = NULL; | |
599 | } | |
600 | ||
601 | if (req->rl_iov.length == 0) { /* see allocate above */ | |
602 | struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer; | |
603 | oreq->rl_reply = req->rl_reply; | |
604 | (void) rpcrdma_deregister_internal(&r_xprt->rx_ia, | |
605 | req->rl_handle, | |
606 | &req->rl_iov); | |
607 | kfree(req); | |
608 | req = oreq; | |
609 | } | |
610 | ||
611 | /* Put back request+reply buffers */ | |
612 | rpcrdma_buffer_put(req); | |
613 | } | |
614 | ||
615 | /* | |
616 | * send_request invokes the meat of RPC RDMA. It must do the following: | |
617 | * 1. Marshal the RPC request into an RPC RDMA request, which means | |
618 | * putting a header in front of data, and creating IOVs for RDMA | |
619 | * from those in the request. | |
620 | * 2. In marshaling, detect opportunities for RDMA, and use them. | |
621 | * 3. Post a recv message to set up asynch completion, then send | |
622 | * the request (rpcrdma_ep_post). | |
623 | * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP). | |
624 | */ | |
625 | ||
626 | static int | |
627 | xprt_rdma_send_request(struct rpc_task *task) | |
628 | { | |
629 | struct rpc_rqst *rqst = task->tk_rqstp; | |
a4f0835c | 630 | struct rpc_xprt *xprt = rqst->rq_xprt; |
f58851e6 TT |
631 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
632 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | |
633 | ||
634 | /* marshal the send itself */ | |
635 | if (req->rl_niovs == 0 && rpcrdma_marshal_req(rqst) != 0) { | |
636 | r_xprt->rx_stats.failed_marshal_count++; | |
637 | dprintk("RPC: %s: rpcrdma_marshal_req failed\n", | |
638 | __func__); | |
639 | return -EIO; | |
640 | } | |
641 | ||
642 | if (req->rl_reply == NULL) /* e.g. reconnection */ | |
643 | rpcrdma_recv_buffer_get(req); | |
644 | ||
645 | if (req->rl_reply) { | |
646 | req->rl_reply->rr_func = rpcrdma_reply_handler; | |
647 | /* this need only be done once, but... */ | |
648 | req->rl_reply->rr_xprt = xprt; | |
649 | } | |
650 | ||
575448bd TT |
651 | /* Must suppress retransmit to maintain credits */ |
652 | if (req->rl_connect_cookie == xprt->connect_cookie) | |
653 | goto drop_connection; | |
654 | req->rl_connect_cookie = xprt->connect_cookie; | |
655 | ||
656 | if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) | |
657 | goto drop_connection; | |
f58851e6 | 658 | |
d60dbb20 | 659 | rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; |
f58851e6 TT |
660 | rqst->rq_bytes_sent = 0; |
661 | return 0; | |
575448bd TT |
662 | |
663 | drop_connection: | |
664 | xprt_disconnect_done(xprt); | |
665 | return -ENOTCONN; /* implies disconnect */ | |
f58851e6 TT |
666 | } |
667 | ||
668 | static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |
669 | { | |
670 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | |
671 | long idle_time = 0; | |
672 | ||
673 | if (xprt_connected(xprt)) | |
674 | idle_time = (long)(jiffies - xprt->last_used) / HZ; | |
675 | ||
676 | seq_printf(seq, | |
677 | "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu " | |
678 | "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n", | |
679 | ||
680 | 0, /* need a local port? */ | |
681 | xprt->stat.bind_count, | |
682 | xprt->stat.connect_count, | |
683 | xprt->stat.connect_time, | |
684 | idle_time, | |
685 | xprt->stat.sends, | |
686 | xprt->stat.recvs, | |
687 | xprt->stat.bad_xids, | |
688 | xprt->stat.req_u, | |
689 | xprt->stat.bklog_u, | |
690 | ||
691 | r_xprt->rx_stats.read_chunk_count, | |
692 | r_xprt->rx_stats.write_chunk_count, | |
693 | r_xprt->rx_stats.reply_chunk_count, | |
694 | r_xprt->rx_stats.total_rdma_request, | |
695 | r_xprt->rx_stats.total_rdma_reply, | |
696 | r_xprt->rx_stats.pullup_copy_count, | |
697 | r_xprt->rx_stats.fixup_copy_count, | |
698 | r_xprt->rx_stats.hardway_register_count, | |
699 | r_xprt->rx_stats.failed_marshal_count, | |
700 | r_xprt->rx_stats.bad_reply_count); | |
701 | } | |
702 | ||
703 | /* | |
704 | * Plumbing for rpc transport switch and kernel module | |
705 | */ | |
706 | ||
707 | static struct rpc_xprt_ops xprt_rdma_procs = { | |
708 | .reserve_xprt = xprt_rdma_reserve_xprt, | |
709 | .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ | |
f39c1bfb | 710 | .alloc_slot = xprt_alloc_slot, |
f58851e6 TT |
711 | .release_request = xprt_release_rqst_cong, /* ditto */ |
712 | .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ | |
713 | .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ | |
714 | .set_port = xprt_rdma_set_port, | |
715 | .connect = xprt_rdma_connect, | |
716 | .buf_alloc = xprt_rdma_allocate, | |
717 | .buf_free = xprt_rdma_free, | |
718 | .send_request = xprt_rdma_send_request, | |
719 | .close = xprt_rdma_close, | |
720 | .destroy = xprt_rdma_destroy, | |
721 | .print_stats = xprt_rdma_print_stats | |
722 | }; | |
723 | ||
724 | static struct xprt_class xprt_rdma = { | |
725 | .list = LIST_HEAD_INIT(xprt_rdma.list), | |
726 | .name = "rdma", | |
727 | .owner = THIS_MODULE, | |
728 | .ident = XPRT_TRANSPORT_RDMA, | |
729 | .setup = xprt_setup_rdma, | |
730 | }; | |
731 | ||
732 | static void __exit xprt_rdma_cleanup(void) | |
733 | { | |
734 | int rc; | |
735 | ||
b3cd8d45 | 736 | dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n"); |
f58851e6 TT |
737 | #ifdef RPC_DEBUG |
738 | if (sunrpc_table_header) { | |
739 | unregister_sysctl_table(sunrpc_table_header); | |
740 | sunrpc_table_header = NULL; | |
741 | } | |
742 | #endif | |
743 | rc = xprt_unregister_transport(&xprt_rdma); | |
744 | if (rc) | |
745 | dprintk("RPC: %s: xprt_unregister returned %i\n", | |
746 | __func__, rc); | |
747 | } | |
748 | ||
749 | static int __init xprt_rdma_init(void) | |
750 | { | |
751 | int rc; | |
752 | ||
753 | rc = xprt_register_transport(&xprt_rdma); | |
754 | ||
755 | if (rc) | |
756 | return rc; | |
757 | ||
758 | dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n"); | |
759 | ||
760 | dprintk(KERN_INFO "Defaults:\n"); | |
761 | dprintk(KERN_INFO "\tSlots %d\n" | |
762 | "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", | |
763 | xprt_rdma_slot_table_entries, | |
764 | xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); | |
765 | dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n", | |
766 | xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); | |
767 | ||
768 | #ifdef RPC_DEBUG | |
769 | if (!sunrpc_table_header) | |
770 | sunrpc_table_header = register_sysctl_table(sunrpc_table); | |
771 | #endif | |
772 | return 0; | |
773 | } | |
774 | ||
775 | module_init(xprt_rdma_init); | |
776 | module_exit(xprt_rdma_cleanup); |