Commit | Line | Data |
---|---|---|
f58851e6 | 1 | /* |
e9601828 TT |
2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | */ | |
39 | ||
40 | /* | |
41 | * rpc_rdma.c | |
42 | * | |
43 | * This file contains the guts of the RPC RDMA protocol, and | |
44 | * does marshaling/unmarshaling, etc. It is also where interfacing | |
45 | * to the Linux RPC framework lives. | |
f58851e6 TT |
46 | */ |
47 | ||
48 | #include "xprt_rdma.h" | |
49 | ||
e9601828 TT |
50 | #include <linux/highmem.h> |
51 | ||
52 | #ifdef RPC_DEBUG | |
53 | # define RPCDBG_FACILITY RPCDBG_TRANS | |
54 | #endif | |
55 | ||
56 | enum rpcrdma_chunktype { | |
57 | rpcrdma_noch = 0, | |
58 | rpcrdma_readch, | |
59 | rpcrdma_areadch, | |
60 | rpcrdma_writech, | |
61 | rpcrdma_replych | |
62 | }; | |
63 | ||
64 | #ifdef RPC_DEBUG | |
65 | static const char transfertypes[][12] = { | |
66 | "pure inline", /* no chunks */ | |
67 | " read chunk", /* some argument via rdma read */ | |
68 | "*read chunk", /* entire request via rdma read */ | |
69 | "write chunk", /* some result via rdma write */ | |
70 | "reply chunk" /* entire reply via rdma write */ | |
71 | }; | |
72 | #endif | |
73 | ||
74 | /* | |
75 | * Chunk assembly from upper layer xdr_buf. | |
76 | * | |
77 | * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk | |
78 | * elements. Segments are then coalesced when registered, if possible | |
79 | * within the selected memreg mode. | |
80 | * | |
81 | * Note, this routine is never called if the connection's memory | |
82 | * registration strategy is 0 (bounce buffers). | |
83 | */ | |
84 | ||
85 | static int | |
2a428b2b | 86 | rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, |
e9601828 TT |
87 | enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs) |
88 | { | |
89 | int len, n = 0, p; | |
bd7ea31b TT |
90 | int page_base; |
91 | struct page **ppages; | |
e9601828 TT |
92 | |
93 | if (pos == 0 && xdrbuf->head[0].iov_len) { | |
94 | seg[n].mr_page = NULL; | |
95 | seg[n].mr_offset = xdrbuf->head[0].iov_base; | |
96 | seg[n].mr_len = xdrbuf->head[0].iov_len; | |
e9601828 TT |
97 | ++n; |
98 | } | |
99 | ||
bd7ea31b TT |
100 | len = xdrbuf->page_len; |
101 | ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); | |
102 | page_base = xdrbuf->page_base & ~PAGE_MASK; | |
103 | p = 0; | |
104 | while (len && n < nsegs) { | |
105 | seg[n].mr_page = ppages[p]; | |
106 | seg[n].mr_offset = (void *)(unsigned long) page_base; | |
107 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); | |
108 | BUG_ON(seg[n].mr_len > PAGE_SIZE); | |
109 | len -= seg[n].mr_len; | |
e9601828 | 110 | ++n; |
bd7ea31b TT |
111 | ++p; |
112 | page_base = 0; /* page offset only applies to first page */ | |
e9601828 TT |
113 | } |
114 | ||
bd7ea31b TT |
115 | /* Message overflows the seg array */ |
116 | if (len && n == nsegs) | |
117 | return 0; | |
118 | ||
50e1092b | 119 | if (xdrbuf->tail[0].iov_len) { |
9191ca3b TT |
120 | /* the rpcrdma protocol allows us to omit any trailing |
121 | * xdr pad bytes, saving the server an RDMA operation. */ | |
122 | if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) | |
123 | return n; | |
e9601828 | 124 | if (n == nsegs) |
bd7ea31b | 125 | /* Tail remains, but we're out of segments */ |
e9601828 TT |
126 | return 0; |
127 | seg[n].mr_page = NULL; | |
128 | seg[n].mr_offset = xdrbuf->tail[0].iov_base; | |
129 | seg[n].mr_len = xdrbuf->tail[0].iov_len; | |
e9601828 TT |
130 | ++n; |
131 | } | |
132 | ||
e9601828 TT |
133 | return n; |
134 | } | |
135 | ||
136 | /* | |
137 | * Create read/write chunk lists, and reply chunks, for RDMA | |
138 | * | |
139 | * Assume check against THRESHOLD has been done, and chunks are required. | |
140 | * Assume only encoding one list entry for read|write chunks. The NFSv3 | |
141 | * protocol is simple enough to allow this as it only has a single "bulk | |
142 | * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The | |
143 | * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.) | |
144 | * | |
145 | * When used for a single reply chunk (which is a special write | |
146 | * chunk used for the entire reply, rather than just the data), it | |
147 | * is used primarily for READDIR and READLINK which would otherwise | |
148 | * be severely size-limited by a small rdma inline read max. The server | |
149 | * response will come back as an RDMA Write, followed by a message | |
150 | * of type RDMA_NOMSG carrying the xid and length. As a result, reply | |
151 | * chunks do not provide data alignment, however they do not require | |
152 | * "fixup" (moving the response to the upper layer buffer) either. | |
153 | * | |
154 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): | |
155 | * | |
156 | * Read chunklist (a linked list): | |
157 | * N elements, position P (same P for all chunks of same arg!): | |
158 | * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 | |
159 | * | |
160 | * Write chunklist (a list of (one) counted array): | |
161 | * N elements: | |
162 | * 1 - N - HLOO - HLOO - ... - HLOO - 0 | |
163 | * | |
164 | * Reply chunk (a counted array): | |
165 | * N elements: | |
166 | * 1 - N - HLOO - HLOO - ... - HLOO | |
167 | */ | |
168 | ||
169 | static unsigned int | |
170 | rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |
171 | struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) | |
172 | { | |
173 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | |
a4f0835c | 174 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); |
e9601828 | 175 | int nsegs, nchunks = 0; |
2a428b2b | 176 | unsigned int pos; |
e9601828 TT |
177 | struct rpcrdma_mr_seg *seg = req->rl_segments; |
178 | struct rpcrdma_read_chunk *cur_rchunk = NULL; | |
179 | struct rpcrdma_write_array *warray = NULL; | |
180 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | |
2d8a9726 | 181 | __be32 *iptr = headerp->rm_body.rm_chunks; |
e9601828 TT |
182 | |
183 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | |
184 | /* a read chunk - server will RDMA Read our memory */ | |
185 | cur_rchunk = (struct rpcrdma_read_chunk *) iptr; | |
186 | } else { | |
187 | /* a write or reply chunk - server will RDMA Write our memory */ | |
188 | *iptr++ = xdr_zero; /* encode a NULL read chunk list */ | |
189 | if (type == rpcrdma_replych) | |
190 | *iptr++ = xdr_zero; /* a NULL write chunk list */ | |
191 | warray = (struct rpcrdma_write_array *) iptr; | |
192 | cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1); | |
193 | } | |
194 | ||
195 | if (type == rpcrdma_replych || type == rpcrdma_areadch) | |
196 | pos = 0; | |
197 | else | |
198 | pos = target->head[0].iov_len; | |
199 | ||
200 | nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS); | |
201 | if (nsegs == 0) | |
202 | return 0; | |
203 | ||
204 | do { | |
205 | /* bind/register the memory, then build chunk from result. */ | |
206 | int n = rpcrdma_register_external(seg, nsegs, | |
207 | cur_wchunk != NULL, r_xprt); | |
208 | if (n <= 0) | |
209 | goto out; | |
210 | if (cur_rchunk) { /* read */ | |
211 | cur_rchunk->rc_discrim = xdr_one; | |
212 | /* all read chunks have the same "position" */ | |
213 | cur_rchunk->rc_position = htonl(pos); | |
214 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | |
215 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | |
216 | xdr_encode_hyper( | |
2d8a9726 | 217 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
e9601828 TT |
218 | seg->mr_base); |
219 | dprintk("RPC: %s: read chunk " | |
2a428b2b | 220 | "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__, |
e08a132b SR |
221 | seg->mr_len, (unsigned long long)seg->mr_base, |
222 | seg->mr_rkey, pos, n < nsegs ? "more" : "last"); | |
e9601828 TT |
223 | cur_rchunk++; |
224 | r_xprt->rx_stats.read_chunk_count++; | |
225 | } else { /* write/reply */ | |
226 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | |
227 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | |
228 | xdr_encode_hyper( | |
2d8a9726 | 229 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
e9601828 TT |
230 | seg->mr_base); |
231 | dprintk("RPC: %s: %s chunk " | |
232 | "elem %d@0x%llx:0x%x (%s)\n", __func__, | |
233 | (type == rpcrdma_replych) ? "reply" : "write", | |
e08a132b SR |
234 | seg->mr_len, (unsigned long long)seg->mr_base, |
235 | seg->mr_rkey, n < nsegs ? "more" : "last"); | |
e9601828 TT |
236 | cur_wchunk++; |
237 | if (type == rpcrdma_replych) | |
238 | r_xprt->rx_stats.reply_chunk_count++; | |
239 | else | |
240 | r_xprt->rx_stats.write_chunk_count++; | |
241 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; | |
242 | } | |
243 | nchunks++; | |
244 | seg += n; | |
245 | nsegs -= n; | |
246 | } while (nsegs); | |
247 | ||
248 | /* success. all failures return above */ | |
249 | req->rl_nchunks = nchunks; | |
250 | ||
e9601828 TT |
251 | /* |
252 | * finish off header. If write, marshal discrim and nchunks. | |
253 | */ | |
254 | if (cur_rchunk) { | |
2d8a9726 | 255 | iptr = (__be32 *) cur_rchunk; |
e9601828 TT |
256 | *iptr++ = xdr_zero; /* finish the read chunk list */ |
257 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ | |
258 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | |
259 | } else { | |
260 | warray->wc_discrim = xdr_one; | |
261 | warray->wc_nchunks = htonl(nchunks); | |
2d8a9726 | 262 | iptr = (__be32 *) cur_wchunk; |
e9601828 TT |
263 | if (type == rpcrdma_writech) { |
264 | *iptr++ = xdr_zero; /* finish the write chunk list */ | |
265 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | |
266 | } | |
267 | } | |
268 | ||
269 | /* | |
270 | * Return header size. | |
271 | */ | |
272 | return (unsigned char *)iptr - (unsigned char *)headerp; | |
273 | ||
274 | out: | |
275 | for (pos = 0; nchunks--;) | |
276 | pos += rpcrdma_deregister_external( | |
277 | &req->rl_segments[pos], r_xprt, NULL); | |
278 | return 0; | |
279 | } | |
280 | ||
281 | /* | |
282 | * Copy write data inline. | |
283 | * This function is used for "small" requests. Data which is passed | |
284 | * to RPC via iovecs (or page list) is copied directly into the | |
285 | * pre-registered memory buffer for this request. For small amounts | |
286 | * of data, this is efficient. The cutoff value is tunable. | |
287 | */ | |
288 | static int | |
289 | rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad) | |
290 | { | |
291 | int i, npages, curlen; | |
292 | int copy_len; | |
293 | unsigned char *srcp, *destp; | |
294 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); | |
bd7ea31b TT |
295 | int page_base; |
296 | struct page **ppages; | |
e9601828 TT |
297 | |
298 | destp = rqst->rq_svec[0].iov_base; | |
299 | curlen = rqst->rq_svec[0].iov_len; | |
300 | destp += curlen; | |
301 | /* | |
302 | * Do optional padding where it makes sense. Alignment of write | |
303 | * payload can help the server, if our setting is accurate. | |
304 | */ | |
305 | pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/); | |
306 | if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH) | |
307 | pad = 0; /* don't pad this request */ | |
308 | ||
309 | dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n", | |
310 | __func__, pad, destp, rqst->rq_slen, curlen); | |
311 | ||
312 | copy_len = rqst->rq_snd_buf.page_len; | |
b38ab40a TT |
313 | |
314 | if (rqst->rq_snd_buf.tail[0].iov_len) { | |
315 | curlen = rqst->rq_snd_buf.tail[0].iov_len; | |
316 | if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { | |
317 | memmove(destp + copy_len, | |
318 | rqst->rq_snd_buf.tail[0].iov_base, curlen); | |
319 | r_xprt->rx_stats.pullup_copy_count += curlen; | |
320 | } | |
321 | dprintk("RPC: %s: tail destp 0x%p len %d\n", | |
322 | __func__, destp + copy_len, curlen); | |
323 | rqst->rq_svec[0].iov_len += curlen; | |
324 | } | |
e9601828 | 325 | r_xprt->rx_stats.pullup_copy_count += copy_len; |
bd7ea31b TT |
326 | |
327 | page_base = rqst->rq_snd_buf.page_base; | |
328 | ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); | |
329 | page_base &= ~PAGE_MASK; | |
330 | npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; | |
e9601828 | 331 | for (i = 0; copy_len && i < npages; i++) { |
bd7ea31b | 332 | curlen = PAGE_SIZE - page_base; |
e9601828 TT |
333 | if (curlen > copy_len) |
334 | curlen = copy_len; | |
335 | dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", | |
336 | __func__, i, destp, copy_len, curlen); | |
b8541786 | 337 | srcp = kmap_atomic(ppages[i]); |
bd7ea31b | 338 | memcpy(destp, srcp+page_base, curlen); |
b8541786 | 339 | kunmap_atomic(srcp); |
e9601828 TT |
340 | rqst->rq_svec[0].iov_len += curlen; |
341 | destp += curlen; | |
342 | copy_len -= curlen; | |
bd7ea31b | 343 | page_base = 0; |
e9601828 | 344 | } |
e9601828 TT |
345 | /* header now contains entire send message */ |
346 | return pad; | |
347 | } | |
348 | ||
349 | /* | |
350 | * Marshal a request: the primary job of this routine is to choose | |
351 | * the transfer modes. See comments below. | |
352 | * | |
353 | * Uses multiple RDMA IOVs for a request: | |
354 | * [0] -- RPC RDMA header, which uses memory from the *start* of the | |
355 | * preregistered buffer that already holds the RPC data in | |
356 | * its middle. | |
357 | * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol. | |
358 | * [2] -- optional padding. | |
359 | * [3] -- if padded, header only in [1] and data here. | |
360 | */ | |
361 | ||
362 | int | |
363 | rpcrdma_marshal_req(struct rpc_rqst *rqst) | |
364 | { | |
a4f0835c | 365 | struct rpc_xprt *xprt = rqst->rq_xprt; |
e9601828 TT |
366 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
367 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | |
368 | char *base; | |
369 | size_t hdrlen, rpclen, padlen; | |
370 | enum rpcrdma_chunktype rtype, wtype; | |
371 | struct rpcrdma_msg *headerp; | |
372 | ||
373 | /* | |
374 | * rpclen gets amount of data in first buffer, which is the | |
375 | * pre-registered buffer. | |
376 | */ | |
377 | base = rqst->rq_svec[0].iov_base; | |
378 | rpclen = rqst->rq_svec[0].iov_len; | |
379 | ||
380 | /* build RDMA header in private area at front */ | |
381 | headerp = (struct rpcrdma_msg *) req->rl_base; | |
382 | /* don't htonl XID, it's already done in request */ | |
383 | headerp->rm_xid = rqst->rq_xid; | |
384 | headerp->rm_vers = xdr_one; | |
385 | headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests); | |
8d614434 | 386 | headerp->rm_type = htonl(RDMA_MSG); |
e9601828 TT |
387 | |
388 | /* | |
389 | * Chunks needed for results? | |
390 | * | |
391 | * o If the expected result is under the inline threshold, all ops | |
392 | * return as inline (but see later). | |
393 | * o Large non-read ops return as a single reply chunk. | |
394 | * o Large read ops return data as write chunk(s), header as inline. | |
395 | * | |
396 | * Note: the NFS code sending down multiple result segments implies | |
397 | * the op is one of read, readdir[plus], readlink or NFSv4 getacl. | |
398 | */ | |
399 | ||
400 | /* | |
401 | * This code can handle read chunks, write chunks OR reply | |
402 | * chunks -- only one type. If the request is too big to fit | |
403 | * inline, then we will choose read chunks. If the request is | |
404 | * a READ, then use write chunks to separate the file data | |
405 | * into pages; otherwise use reply chunks. | |
406 | */ | |
407 | if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) | |
408 | wtype = rpcrdma_noch; | |
409 | else if (rqst->rq_rcv_buf.page_len == 0) | |
410 | wtype = rpcrdma_replych; | |
411 | else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) | |
412 | wtype = rpcrdma_writech; | |
413 | else | |
414 | wtype = rpcrdma_replych; | |
415 | ||
416 | /* | |
417 | * Chunks needed for arguments? | |
418 | * | |
419 | * o If the total request is under the inline threshold, all ops | |
420 | * are sent as inline. | |
421 | * o Large non-write ops are sent with the entire message as a | |
422 | * single read chunk (protocol 0-position special case). | |
423 | * o Large write ops transmit data as read chunk(s), header as | |
424 | * inline. | |
425 | * | |
426 | * Note: the NFS code sending down multiple argument segments | |
427 | * implies the op is a write. | |
428 | * TBD check NFSv4 setacl | |
429 | */ | |
430 | if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) | |
431 | rtype = rpcrdma_noch; | |
432 | else if (rqst->rq_snd_buf.page_len == 0) | |
433 | rtype = rpcrdma_areadch; | |
434 | else | |
435 | rtype = rpcrdma_readch; | |
436 | ||
437 | /* The following simplification is not true forever */ | |
438 | if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) | |
439 | wtype = rpcrdma_noch; | |
440 | BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch); | |
441 | ||
442 | if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS && | |
443 | (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) { | |
444 | /* forced to "pure inline"? */ | |
445 | dprintk("RPC: %s: too much data (%d/%d) for inline\n", | |
446 | __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len); | |
447 | return -1; | |
448 | } | |
449 | ||
450 | hdrlen = 28; /*sizeof *headerp;*/ | |
451 | padlen = 0; | |
452 | ||
453 | /* | |
454 | * Pull up any extra send data into the preregistered buffer. | |
455 | * When padding is in use and applies to the transfer, insert | |
456 | * it and change the message type. | |
457 | */ | |
458 | if (rtype == rpcrdma_noch) { | |
459 | ||
460 | padlen = rpcrdma_inline_pullup(rqst, | |
461 | RPCRDMA_INLINE_PAD_VALUE(rqst)); | |
462 | ||
463 | if (padlen) { | |
8d614434 | 464 | headerp->rm_type = htonl(RDMA_MSGP); |
e9601828 TT |
465 | headerp->rm_body.rm_padded.rm_align = |
466 | htonl(RPCRDMA_INLINE_PAD_VALUE(rqst)); | |
467 | headerp->rm_body.rm_padded.rm_thresh = | |
8d614434 | 468 | htonl(RPCRDMA_INLINE_PAD_THRESH); |
e9601828 TT |
469 | headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; |
470 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; | |
471 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; | |
472 | hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ | |
473 | BUG_ON(wtype != rpcrdma_noch); | |
474 | ||
475 | } else { | |
476 | headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; | |
477 | headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; | |
478 | headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; | |
479 | /* new length after pullup */ | |
480 | rpclen = rqst->rq_svec[0].iov_len; | |
481 | /* | |
482 | * Currently we try to not actually use read inline. | |
483 | * Reply chunks have the desirable property that | |
484 | * they land, packed, directly in the target buffers | |
485 | * without headers, so they require no fixup. The | |
486 | * additional RDMA Write op sends the same amount | |
487 | * of data, streams on-the-wire and adds no overhead | |
488 | * on receive. Therefore, we request a reply chunk | |
489 | * for non-writes wherever feasible and efficient. | |
490 | */ | |
491 | if (wtype == rpcrdma_noch && | |
492 | r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER) | |
493 | wtype = rpcrdma_replych; | |
494 | } | |
495 | } | |
496 | ||
497 | /* | |
498 | * Marshal chunks. This routine will return the header length | |
499 | * consumed by marshaling. | |
500 | */ | |
501 | if (rtype != rpcrdma_noch) { | |
502 | hdrlen = rpcrdma_create_chunks(rqst, | |
503 | &rqst->rq_snd_buf, headerp, rtype); | |
504 | wtype = rtype; /* simplify dprintk */ | |
505 | ||
506 | } else if (wtype != rpcrdma_noch) { | |
507 | hdrlen = rpcrdma_create_chunks(rqst, | |
508 | &rqst->rq_rcv_buf, headerp, wtype); | |
509 | } | |
510 | ||
511 | if (hdrlen == 0) | |
512 | return -1; | |
513 | ||
5f37d561 TT |
514 | dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" |
515 | " headerp 0x%p base 0x%p lkey 0x%x\n", | |
e9601828 TT |
516 | __func__, transfertypes[wtype], hdrlen, rpclen, padlen, |
517 | headerp, base, req->rl_iov.lkey); | |
518 | ||
519 | /* | |
520 | * initialize send_iov's - normally only two: rdma chunk header and | |
521 | * single preregistered RPC header buffer, but if padding is present, | |
522 | * then use a preregistered (and zeroed) pad buffer between the RPC | |
523 | * header and any write data. In all non-rdma cases, any following | |
524 | * data has been copied into the RPC header buffer. | |
525 | */ | |
526 | req->rl_send_iov[0].addr = req->rl_iov.addr; | |
527 | req->rl_send_iov[0].length = hdrlen; | |
528 | req->rl_send_iov[0].lkey = req->rl_iov.lkey; | |
529 | ||
530 | req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base); | |
531 | req->rl_send_iov[1].length = rpclen; | |
532 | req->rl_send_iov[1].lkey = req->rl_iov.lkey; | |
533 | ||
534 | req->rl_niovs = 2; | |
535 | ||
536 | if (padlen) { | |
537 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; | |
538 | ||
539 | req->rl_send_iov[2].addr = ep->rep_pad.addr; | |
540 | req->rl_send_iov[2].length = padlen; | |
541 | req->rl_send_iov[2].lkey = ep->rep_pad.lkey; | |
542 | ||
543 | req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; | |
544 | req->rl_send_iov[3].length = rqst->rq_slen - rpclen; | |
545 | req->rl_send_iov[3].lkey = req->rl_iov.lkey; | |
546 | ||
547 | req->rl_niovs = 4; | |
548 | } | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
553 | /* | |
554 | * Chase down a received write or reply chunklist to get length | |
555 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | |
556 | */ | |
557 | static int | |
d4b37ff7 | 558 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp) |
e9601828 TT |
559 | { |
560 | unsigned int i, total_len; | |
561 | struct rpcrdma_write_chunk *cur_wchunk; | |
562 | ||
563 | i = ntohl(**iptrp); /* get array count */ | |
564 | if (i > max) | |
565 | return -1; | |
566 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); | |
567 | total_len = 0; | |
568 | while (i--) { | |
569 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | |
570 | ifdebug(FACILITY) { | |
571 | u64 off; | |
2d8a9726 | 572 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
e9601828 TT |
573 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
574 | __func__, | |
575 | ntohl(seg->rs_length), | |
e08a132b | 576 | (unsigned long long)off, |
e9601828 TT |
577 | ntohl(seg->rs_handle)); |
578 | } | |
579 | total_len += ntohl(seg->rs_length); | |
580 | ++cur_wchunk; | |
581 | } | |
582 | /* check and adjust for properly terminated write chunk */ | |
583 | if (wrchunk) { | |
2d8a9726 | 584 | __be32 *w = (__be32 *) cur_wchunk; |
e9601828 TT |
585 | if (*w++ != xdr_zero) |
586 | return -1; | |
587 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | |
588 | } | |
589 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) | |
590 | return -1; | |
591 | ||
2d8a9726 | 592 | *iptrp = (__be32 *) cur_wchunk; |
e9601828 TT |
593 | return total_len; |
594 | } | |
595 | ||
596 | /* | |
597 | * Scatter inline received data back into provided iov's. | |
598 | */ | |
599 | static void | |
9191ca3b | 600 | rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) |
e9601828 TT |
601 | { |
602 | int i, npages, curlen, olen; | |
603 | char *destp; | |
bd7ea31b TT |
604 | struct page **ppages; |
605 | int page_base; | |
e9601828 TT |
606 | |
607 | curlen = rqst->rq_rcv_buf.head[0].iov_len; | |
608 | if (curlen > copy_len) { /* write chunk header fixup */ | |
609 | curlen = copy_len; | |
610 | rqst->rq_rcv_buf.head[0].iov_len = curlen; | |
611 | } | |
612 | ||
613 | dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", | |
614 | __func__, srcp, copy_len, curlen); | |
615 | ||
616 | /* Shift pointer for first receive segment only */ | |
617 | rqst->rq_rcv_buf.head[0].iov_base = srcp; | |
618 | srcp += curlen; | |
619 | copy_len -= curlen; | |
620 | ||
621 | olen = copy_len; | |
622 | i = 0; | |
623 | rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen; | |
bd7ea31b TT |
624 | page_base = rqst->rq_rcv_buf.page_base; |
625 | ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); | |
626 | page_base &= ~PAGE_MASK; | |
627 | ||
e9601828 | 628 | if (copy_len && rqst->rq_rcv_buf.page_len) { |
bd7ea31b | 629 | npages = PAGE_ALIGN(page_base + |
e9601828 TT |
630 | rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT; |
631 | for (; i < npages; i++) { | |
bd7ea31b | 632 | curlen = PAGE_SIZE - page_base; |
e9601828 TT |
633 | if (curlen > copy_len) |
634 | curlen = copy_len; | |
635 | dprintk("RPC: %s: page %d" | |
636 | " srcp 0x%p len %d curlen %d\n", | |
637 | __func__, i, srcp, copy_len, curlen); | |
b8541786 | 638 | destp = kmap_atomic(ppages[i]); |
bd7ea31b TT |
639 | memcpy(destp + page_base, srcp, curlen); |
640 | flush_dcache_page(ppages[i]); | |
b8541786 | 641 | kunmap_atomic(destp); |
e9601828 TT |
642 | srcp += curlen; |
643 | copy_len -= curlen; | |
644 | if (copy_len == 0) | |
645 | break; | |
bd7ea31b | 646 | page_base = 0; |
e9601828 | 647 | } |
2b7bbc96 | 648 | } |
e9601828 TT |
649 | |
650 | if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { | |
651 | curlen = copy_len; | |
652 | if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) | |
653 | curlen = rqst->rq_rcv_buf.tail[0].iov_len; | |
654 | if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) | |
b38ab40a | 655 | memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); |
e9601828 TT |
656 | dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", |
657 | __func__, srcp, copy_len, curlen); | |
658 | rqst->rq_rcv_buf.tail[0].iov_len = curlen; | |
659 | copy_len -= curlen; ++i; | |
660 | } else | |
661 | rqst->rq_rcv_buf.tail[0].iov_len = 0; | |
662 | ||
9191ca3b TT |
663 | if (pad) { |
664 | /* implicit padding on terminal chunk */ | |
665 | unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; | |
666 | while (pad--) | |
667 | p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; | |
668 | } | |
669 | ||
e9601828 TT |
670 | if (copy_len) |
671 | dprintk("RPC: %s: %d bytes in" | |
672 | " %d extra segments (%d lost)\n", | |
673 | __func__, olen, i, copy_len); | |
674 | ||
675 | /* TBD avoid a warning from call_decode() */ | |
676 | rqst->rq_private_buf = rqst->rq_rcv_buf; | |
677 | } | |
678 | ||
679 | /* | |
680 | * This function is called when an async event is posted to | |
681 | * the connection which changes the connection state. All it | |
682 | * does at this point is mark the connection up/down, the rpc | |
683 | * timers do the rest. | |
684 | */ | |
685 | void | |
686 | rpcrdma_conn_func(struct rpcrdma_ep *ep) | |
687 | { | |
688 | struct rpc_xprt *xprt = ep->rep_xprt; | |
689 | ||
690 | spin_lock_bh(&xprt->transport_lock); | |
575448bd TT |
691 | if (++xprt->connect_cookie == 0) /* maintain a reserved value */ |
692 | ++xprt->connect_cookie; | |
e9601828 TT |
693 | if (ep->rep_connected > 0) { |
694 | if (!xprt_test_and_set_connected(xprt)) | |
695 | xprt_wake_pending_tasks(xprt, 0); | |
696 | } else { | |
697 | if (xprt_test_and_clear_connected(xprt)) | |
926449ba | 698 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
e9601828 TT |
699 | } |
700 | spin_unlock_bh(&xprt->transport_lock); | |
701 | } | |
702 | ||
703 | /* | |
704 | * This function is called when memory window unbind which we are waiting | |
705 | * for completes. Just use rr_func (zeroed by upcall) to signal completion. | |
706 | */ | |
707 | static void | |
708 | rpcrdma_unbind_func(struct rpcrdma_rep *rep) | |
709 | { | |
710 | wake_up(&rep->rr_unbind); | |
711 | } | |
712 | ||
713 | /* | |
714 | * Called as a tasklet to do req/reply match and complete a request | |
715 | * Errors must result in the RPC task either being awakened, or | |
716 | * allowed to timeout, to discover the errors at that time. | |
717 | */ | |
718 | void | |
719 | rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |
720 | { | |
721 | struct rpcrdma_msg *headerp; | |
722 | struct rpcrdma_req *req; | |
723 | struct rpc_rqst *rqst; | |
724 | struct rpc_xprt *xprt = rep->rr_xprt; | |
725 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | |
2d8a9726 | 726 | __be32 *iptr; |
e9601828 TT |
727 | int i, rdmalen, status; |
728 | ||
729 | /* Check status. If bad, signal disconnect and return rep to pool */ | |
730 | if (rep->rr_len == ~0U) { | |
731 | rpcrdma_recv_buffer_put(rep); | |
732 | if (r_xprt->rx_ep.rep_connected == 1) { | |
733 | r_xprt->rx_ep.rep_connected = -EIO; | |
734 | rpcrdma_conn_func(&r_xprt->rx_ep); | |
735 | } | |
736 | return; | |
737 | } | |
738 | if (rep->rr_len < 28) { | |
739 | dprintk("RPC: %s: short/invalid reply\n", __func__); | |
740 | goto repost; | |
741 | } | |
742 | headerp = (struct rpcrdma_msg *) rep->rr_base; | |
743 | if (headerp->rm_vers != xdr_one) { | |
744 | dprintk("RPC: %s: invalid version %d\n", | |
745 | __func__, ntohl(headerp->rm_vers)); | |
746 | goto repost; | |
747 | } | |
748 | ||
749 | /* Get XID and try for a match. */ | |
750 | spin_lock(&xprt->transport_lock); | |
751 | rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); | |
752 | if (rqst == NULL) { | |
753 | spin_unlock(&xprt->transport_lock); | |
754 | dprintk("RPC: %s: reply 0x%p failed " | |
755 | "to match any request xid 0x%08x len %d\n", | |
756 | __func__, rep, headerp->rm_xid, rep->rr_len); | |
757 | repost: | |
758 | r_xprt->rx_stats.bad_reply_count++; | |
759 | rep->rr_func = rpcrdma_reply_handler; | |
760 | if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) | |
761 | rpcrdma_recv_buffer_put(rep); | |
762 | ||
763 | return; | |
764 | } | |
765 | ||
766 | /* get request object */ | |
767 | req = rpcr_to_rdmar(rqst); | |
4a6862b3 TT |
768 | if (req->rl_reply) { |
769 | spin_unlock(&xprt->transport_lock); | |
770 | dprintk("RPC: %s: duplicate reply 0x%p to RPC " | |
771 | "request 0x%p: xid 0x%08x\n", __func__, rep, req, | |
772 | headerp->rm_xid); | |
773 | goto repost; | |
774 | } | |
e9601828 TT |
775 | |
776 | dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" | |
777 | " RPC request 0x%p xid 0x%08x\n", | |
778 | __func__, rep, req, rqst, headerp->rm_xid); | |
779 | ||
e9601828 TT |
780 | /* from here on, the reply is no longer an orphan */ |
781 | req->rl_reply = rep; | |
782 | ||
783 | /* check for expected message types */ | |
784 | /* The order of some of these tests is important. */ | |
785 | switch (headerp->rm_type) { | |
60678040 | 786 | case htonl(RDMA_MSG): |
e9601828 TT |
787 | /* never expect read chunks */ |
788 | /* never expect reply chunks (two ways to check) */ | |
789 | /* never expect write chunks without having offered RDMA */ | |
790 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | |
791 | (headerp->rm_body.rm_chunks[1] == xdr_zero && | |
792 | headerp->rm_body.rm_chunks[2] != xdr_zero) || | |
793 | (headerp->rm_body.rm_chunks[1] != xdr_zero && | |
794 | req->rl_nchunks == 0)) | |
795 | goto badheader; | |
796 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) { | |
797 | /* count any expected write chunks in read reply */ | |
798 | /* start at write chunk array count */ | |
799 | iptr = &headerp->rm_body.rm_chunks[2]; | |
800 | rdmalen = rpcrdma_count_chunks(rep, | |
801 | req->rl_nchunks, 1, &iptr); | |
802 | /* check for validity, and no reply chunk after */ | |
803 | if (rdmalen < 0 || *iptr++ != xdr_zero) | |
804 | goto badheader; | |
805 | rep->rr_len -= | |
806 | ((unsigned char *)iptr - (unsigned char *)headerp); | |
807 | status = rep->rr_len + rdmalen; | |
808 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | |
9191ca3b TT |
809 | /* special case - last chunk may omit padding */ |
810 | if (rdmalen &= 3) { | |
811 | rdmalen = 4 - rdmalen; | |
812 | status += rdmalen; | |
813 | } | |
e9601828 TT |
814 | } else { |
815 | /* else ordinary inline */ | |
9191ca3b | 816 | rdmalen = 0; |
2d8a9726 | 817 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
e9601828 TT |
818 | rep->rr_len -= 28; /*sizeof *headerp;*/ |
819 | status = rep->rr_len; | |
820 | } | |
821 | /* Fix up the rpc results for upper layer */ | |
9191ca3b | 822 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); |
e9601828 TT |
823 | break; |
824 | ||
60678040 | 825 | case htonl(RDMA_NOMSG): |
e9601828 TT |
826 | /* never expect read or write chunks, always reply chunks */ |
827 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | |
828 | headerp->rm_body.rm_chunks[1] != xdr_zero || | |
829 | headerp->rm_body.rm_chunks[2] != xdr_one || | |
830 | req->rl_nchunks == 0) | |
831 | goto badheader; | |
2d8a9726 | 832 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
e9601828 TT |
833 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
834 | if (rdmalen < 0) | |
835 | goto badheader; | |
836 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | |
837 | /* Reply chunk buffer already is the reply vector - no fixup. */ | |
838 | status = rdmalen; | |
839 | break; | |
840 | ||
841 | badheader: | |
842 | default: | |
843 | dprintk("%s: invalid rpcrdma reply header (type %d):" | |
844 | " chunks[012] == %d %d %d" | |
845 | " expected chunks <= %d\n", | |
846 | __func__, ntohl(headerp->rm_type), | |
847 | headerp->rm_body.rm_chunks[0], | |
848 | headerp->rm_body.rm_chunks[1], | |
849 | headerp->rm_body.rm_chunks[2], | |
850 | req->rl_nchunks); | |
851 | status = -EIO; | |
852 | r_xprt->rx_stats.bad_reply_count++; | |
853 | break; | |
854 | } | |
855 | ||
856 | /* If using mw bind, start the deregister process now. */ | |
857 | /* (Note: if mr_free(), cannot perform it here, in tasklet context) */ | |
858 | if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) { | |
859 | case RPCRDMA_MEMWINDOWS: | |
860 | for (i = 0; req->rl_nchunks-- > 1;) | |
861 | i += rpcrdma_deregister_external( | |
862 | &req->rl_segments[i], r_xprt, NULL); | |
863 | /* Optionally wait (not here) for unbinds to complete */ | |
864 | rep->rr_func = rpcrdma_unbind_func; | |
865 | (void) rpcrdma_deregister_external(&req->rl_segments[i], | |
866 | r_xprt, rep); | |
867 | break; | |
868 | case RPCRDMA_MEMWINDOWS_ASYNC: | |
869 | for (i = 0; req->rl_nchunks--;) | |
870 | i += rpcrdma_deregister_external(&req->rl_segments[i], | |
871 | r_xprt, NULL); | |
872 | break; | |
873 | default: | |
874 | break; | |
875 | } | |
876 | ||
877 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", | |
878 | __func__, xprt, rqst, status); | |
879 | xprt_complete_rqst(rqst->rq_task, status); | |
880 | spin_unlock(&xprt->transport_lock); | |
881 | } |