Commit | Line | Data |
---|---|---|
f58851e6 | 1 | /* |
e9601828 TT |
2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | */ | |
39 | ||
40 | /* | |
41 | * rpc_rdma.c | |
42 | * | |
43 | * This file contains the guts of the RPC RDMA protocol, and | |
44 | * does marshaling/unmarshaling, etc. It is also where interfacing | |
45 | * to the Linux RPC framework lives. | |
f58851e6 TT |
46 | */ |
47 | ||
48 | #include "xprt_rdma.h" | |
49 | ||
e9601828 TT |
50 | #include <linux/highmem.h> |
51 | ||
f895b252 | 52 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
e9601828 TT |
53 | # define RPCDBG_FACILITY RPCDBG_TRANS |
54 | #endif | |
55 | ||
e2377945 CL |
56 | enum rpcrdma_chunktype { |
57 | rpcrdma_noch = 0, | |
58 | rpcrdma_readch, | |
59 | rpcrdma_areadch, | |
60 | rpcrdma_writech, | |
61 | rpcrdma_replych | |
62 | }; | |
63 | ||
f895b252 | 64 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
e9601828 TT |
65 | static const char transfertypes[][12] = { |
66 | "pure inline", /* no chunks */ | |
67 | " read chunk", /* some argument via rdma read */ | |
68 | "*read chunk", /* entire request via rdma read */ | |
69 | "write chunk", /* some result via rdma write */ | |
70 | "reply chunk" /* entire reply via rdma write */ | |
71 | }; | |
72 | #endif | |
73 | ||
5457ced0 CL |
74 | /* The client can send a request inline as long as the RPCRDMA header |
75 | * plus the RPC call fit under the transport's inline limit. If the | |
76 | * combined call message size exceeds that limit, the client must use | |
77 | * the read chunk list for this operation. | |
78 | */ | |
79 | static bool rpcrdma_args_inline(struct rpc_rqst *rqst) | |
80 | { | |
81 | unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len; | |
82 | ||
83 | return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); | |
84 | } | |
85 | ||
86 | /* The client can't know how large the actual reply will be. Thus it | |
87 | * plans for the largest possible reply for that particular ULP | |
88 | * operation. If the maximum combined reply message size exceeds that | |
89 | * limit, the client must provide a write list or a reply chunk for | |
90 | * this request. | |
91 | */ | |
92 | static bool rpcrdma_results_inline(struct rpc_rqst *rqst) | |
93 | { | |
94 | unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen; | |
95 | ||
96 | return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst); | |
97 | } | |
98 | ||
677eb17e CL |
99 | static int |
100 | rpcrdma_tail_pullup(struct xdr_buf *buf) | |
101 | { | |
102 | size_t tlen = buf->tail[0].iov_len; | |
103 | size_t skip = tlen & 3; | |
104 | ||
105 | /* Do not include the tail if it is only an XDR pad */ | |
106 | if (tlen < 4) | |
107 | return 0; | |
108 | ||
109 | /* xdr_write_pages() adds a pad at the beginning of the tail | |
110 | * if the content in "buf->pages" is unaligned. Force the | |
111 | * tail's actual content to land at the next XDR position | |
112 | * after the head instead. | |
113 | */ | |
114 | if (skip) { | |
115 | unsigned char *src, *dst; | |
116 | unsigned int count; | |
117 | ||
118 | src = buf->tail[0].iov_base; | |
119 | dst = buf->head[0].iov_base; | |
120 | dst += buf->head[0].iov_len; | |
121 | ||
122 | src += skip; | |
123 | tlen -= skip; | |
124 | ||
125 | dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n", | |
126 | __func__, skip, dst, src, tlen); | |
127 | ||
128 | for (count = tlen; count; count--) | |
129 | *dst++ = *src++; | |
130 | } | |
131 | ||
132 | return tlen; | |
133 | } | |
134 | ||
e9601828 TT |
135 | /* |
136 | * Chunk assembly from upper layer xdr_buf. | |
137 | * | |
138 | * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk | |
139 | * elements. Segments are then coalesced when registered, if possible | |
140 | * within the selected memreg mode. | |
c93c6223 CL |
141 | * |
142 | * Returns positive number of segments converted, or a negative errno. | |
e9601828 TT |
143 | */ |
144 | ||
145 | static int | |
2a428b2b | 146 | rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, |
e9601828 TT |
147 | enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs) |
148 | { | |
149 | int len, n = 0, p; | |
bd7ea31b TT |
150 | int page_base; |
151 | struct page **ppages; | |
e9601828 TT |
152 | |
153 | if (pos == 0 && xdrbuf->head[0].iov_len) { | |
154 | seg[n].mr_page = NULL; | |
155 | seg[n].mr_offset = xdrbuf->head[0].iov_base; | |
156 | seg[n].mr_len = xdrbuf->head[0].iov_len; | |
e9601828 TT |
157 | ++n; |
158 | } | |
159 | ||
bd7ea31b TT |
160 | len = xdrbuf->page_len; |
161 | ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); | |
162 | page_base = xdrbuf->page_base & ~PAGE_MASK; | |
163 | p = 0; | |
164 | while (len && n < nsegs) { | |
196c6998 SM |
165 | if (!ppages[p]) { |
166 | /* alloc the pagelist for receiving buffer */ | |
167 | ppages[p] = alloc_page(GFP_ATOMIC); | |
168 | if (!ppages[p]) | |
c93c6223 | 169 | return -ENOMEM; |
196c6998 | 170 | } |
bd7ea31b TT |
171 | seg[n].mr_page = ppages[p]; |
172 | seg[n].mr_offset = (void *)(unsigned long) page_base; | |
173 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); | |
c93c6223 CL |
174 | if (seg[n].mr_len > PAGE_SIZE) |
175 | return -EIO; | |
bd7ea31b | 176 | len -= seg[n].mr_len; |
e9601828 | 177 | ++n; |
bd7ea31b TT |
178 | ++p; |
179 | page_base = 0; /* page offset only applies to first page */ | |
e9601828 TT |
180 | } |
181 | ||
bd7ea31b TT |
182 | /* Message overflows the seg array */ |
183 | if (len && n == nsegs) | |
c93c6223 | 184 | return -EIO; |
bd7ea31b | 185 | |
677eb17e CL |
186 | /* When encoding the read list, the tail is always sent inline */ |
187 | if (type == rpcrdma_readch) | |
188 | return n; | |
189 | ||
50e1092b | 190 | if (xdrbuf->tail[0].iov_len) { |
9191ca3b TT |
191 | /* the rpcrdma protocol allows us to omit any trailing |
192 | * xdr pad bytes, saving the server an RDMA operation. */ | |
193 | if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) | |
194 | return n; | |
e9601828 | 195 | if (n == nsegs) |
bd7ea31b | 196 | /* Tail remains, but we're out of segments */ |
c93c6223 | 197 | return -EIO; |
e9601828 TT |
198 | seg[n].mr_page = NULL; |
199 | seg[n].mr_offset = xdrbuf->tail[0].iov_base; | |
200 | seg[n].mr_len = xdrbuf->tail[0].iov_len; | |
e9601828 TT |
201 | ++n; |
202 | } | |
203 | ||
e9601828 TT |
204 | return n; |
205 | } | |
206 | ||
207 | /* | |
208 | * Create read/write chunk lists, and reply chunks, for RDMA | |
209 | * | |
210 | * Assume check against THRESHOLD has been done, and chunks are required. | |
211 | * Assume only encoding one list entry for read|write chunks. The NFSv3 | |
212 | * protocol is simple enough to allow this as it only has a single "bulk | |
213 | * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The | |
214 | * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.) | |
215 | * | |
216 | * When used for a single reply chunk (which is a special write | |
217 | * chunk used for the entire reply, rather than just the data), it | |
218 | * is used primarily for READDIR and READLINK which would otherwise | |
219 | * be severely size-limited by a small rdma inline read max. The server | |
220 | * response will come back as an RDMA Write, followed by a message | |
221 | * of type RDMA_NOMSG carrying the xid and length. As a result, reply | |
222 | * chunks do not provide data alignment, however they do not require | |
223 | * "fixup" (moving the response to the upper layer buffer) either. | |
224 | * | |
225 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): | |
226 | * | |
227 | * Read chunklist (a linked list): | |
228 | * N elements, position P (same P for all chunks of same arg!): | |
229 | * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 | |
230 | * | |
231 | * Write chunklist (a list of (one) counted array): | |
232 | * N elements: | |
233 | * 1 - N - HLOO - HLOO - ... - HLOO - 0 | |
234 | * | |
235 | * Reply chunk (a counted array): | |
236 | * N elements: | |
237 | * 1 - N - HLOO - HLOO - ... - HLOO | |
c93c6223 CL |
238 | * |
239 | * Returns positive RPC/RDMA header size, or negative errno. | |
e9601828 TT |
240 | */ |
241 | ||
c93c6223 | 242 | static ssize_t |
e9601828 TT |
243 | rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, |
244 | struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) | |
245 | { | |
246 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | |
a4f0835c | 247 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); |
c93c6223 | 248 | int n, nsegs, nchunks = 0; |
2a428b2b | 249 | unsigned int pos; |
e9601828 TT |
250 | struct rpcrdma_mr_seg *seg = req->rl_segments; |
251 | struct rpcrdma_read_chunk *cur_rchunk = NULL; | |
252 | struct rpcrdma_write_array *warray = NULL; | |
253 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | |
2d8a9726 | 254 | __be32 *iptr = headerp->rm_body.rm_chunks; |
9c1b4d77 | 255 | int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool); |
e9601828 TT |
256 | |
257 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | |
258 | /* a read chunk - server will RDMA Read our memory */ | |
259 | cur_rchunk = (struct rpcrdma_read_chunk *) iptr; | |
260 | } else { | |
261 | /* a write or reply chunk - server will RDMA Write our memory */ | |
262 | *iptr++ = xdr_zero; /* encode a NULL read chunk list */ | |
263 | if (type == rpcrdma_replych) | |
264 | *iptr++ = xdr_zero; /* a NULL write chunk list */ | |
265 | warray = (struct rpcrdma_write_array *) iptr; | |
266 | cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1); | |
267 | } | |
268 | ||
269 | if (type == rpcrdma_replych || type == rpcrdma_areadch) | |
270 | pos = 0; | |
271 | else | |
272 | pos = target->head[0].iov_len; | |
273 | ||
274 | nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS); | |
c93c6223 CL |
275 | if (nsegs < 0) |
276 | return nsegs; | |
e9601828 | 277 | |
9c1b4d77 | 278 | map = r_xprt->rx_ia.ri_ops->ro_map; |
e9601828 | 279 | do { |
9c1b4d77 | 280 | n = map(r_xprt, seg, nsegs, cur_wchunk != NULL); |
e9601828 TT |
281 | if (n <= 0) |
282 | goto out; | |
283 | if (cur_rchunk) { /* read */ | |
284 | cur_rchunk->rc_discrim = xdr_one; | |
285 | /* all read chunks have the same "position" */ | |
284f4902 CL |
286 | cur_rchunk->rc_position = cpu_to_be32(pos); |
287 | cur_rchunk->rc_target.rs_handle = | |
288 | cpu_to_be32(seg->mr_rkey); | |
289 | cur_rchunk->rc_target.rs_length = | |
290 | cpu_to_be32(seg->mr_len); | |
e9601828 | 291 | xdr_encode_hyper( |
2d8a9726 | 292 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
e9601828 TT |
293 | seg->mr_base); |
294 | dprintk("RPC: %s: read chunk " | |
2a428b2b | 295 | "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__, |
e08a132b SR |
296 | seg->mr_len, (unsigned long long)seg->mr_base, |
297 | seg->mr_rkey, pos, n < nsegs ? "more" : "last"); | |
e9601828 TT |
298 | cur_rchunk++; |
299 | r_xprt->rx_stats.read_chunk_count++; | |
300 | } else { /* write/reply */ | |
284f4902 CL |
301 | cur_wchunk->wc_target.rs_handle = |
302 | cpu_to_be32(seg->mr_rkey); | |
303 | cur_wchunk->wc_target.rs_length = | |
304 | cpu_to_be32(seg->mr_len); | |
e9601828 | 305 | xdr_encode_hyper( |
2d8a9726 | 306 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
e9601828 TT |
307 | seg->mr_base); |
308 | dprintk("RPC: %s: %s chunk " | |
309 | "elem %d@0x%llx:0x%x (%s)\n", __func__, | |
310 | (type == rpcrdma_replych) ? "reply" : "write", | |
e08a132b SR |
311 | seg->mr_len, (unsigned long long)seg->mr_base, |
312 | seg->mr_rkey, n < nsegs ? "more" : "last"); | |
e9601828 TT |
313 | cur_wchunk++; |
314 | if (type == rpcrdma_replych) | |
315 | r_xprt->rx_stats.reply_chunk_count++; | |
316 | else | |
317 | r_xprt->rx_stats.write_chunk_count++; | |
318 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; | |
319 | } | |
320 | nchunks++; | |
321 | seg += n; | |
322 | nsegs -= n; | |
323 | } while (nsegs); | |
324 | ||
325 | /* success. all failures return above */ | |
326 | req->rl_nchunks = nchunks; | |
327 | ||
e9601828 TT |
328 | /* |
329 | * finish off header. If write, marshal discrim and nchunks. | |
330 | */ | |
331 | if (cur_rchunk) { | |
2d8a9726 | 332 | iptr = (__be32 *) cur_rchunk; |
e9601828 TT |
333 | *iptr++ = xdr_zero; /* finish the read chunk list */ |
334 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ | |
335 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | |
336 | } else { | |
337 | warray->wc_discrim = xdr_one; | |
284f4902 | 338 | warray->wc_nchunks = cpu_to_be32(nchunks); |
2d8a9726 | 339 | iptr = (__be32 *) cur_wchunk; |
e9601828 TT |
340 | if (type == rpcrdma_writech) { |
341 | *iptr++ = xdr_zero; /* finish the write chunk list */ | |
342 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | |
343 | } | |
344 | } | |
345 | ||
346 | /* | |
347 | * Return header size. | |
348 | */ | |
349 | return (unsigned char *)iptr - (unsigned char *)headerp; | |
350 | ||
351 | out: | |
6814baea CL |
352 | for (pos = 0; nchunks--;) |
353 | pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, | |
354 | &req->rl_segments[pos]); | |
c93c6223 | 355 | return n; |
e9601828 TT |
356 | } |
357 | ||
358 | /* | |
359 | * Copy write data inline. | |
360 | * This function is used for "small" requests. Data which is passed | |
361 | * to RPC via iovecs (or page list) is copied directly into the | |
362 | * pre-registered memory buffer for this request. For small amounts | |
363 | * of data, this is efficient. The cutoff value is tunable. | |
364 | */ | |
b3221d6a | 365 | static void rpcrdma_inline_pullup(struct rpc_rqst *rqst) |
e9601828 TT |
366 | { |
367 | int i, npages, curlen; | |
368 | int copy_len; | |
369 | unsigned char *srcp, *destp; | |
370 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); | |
bd7ea31b TT |
371 | int page_base; |
372 | struct page **ppages; | |
e9601828 TT |
373 | |
374 | destp = rqst->rq_svec[0].iov_base; | |
375 | curlen = rqst->rq_svec[0].iov_len; | |
376 | destp += curlen; | |
e9601828 | 377 | |
b3221d6a CL |
378 | dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n", |
379 | __func__, destp, rqst->rq_slen, curlen); | |
e9601828 TT |
380 | |
381 | copy_len = rqst->rq_snd_buf.page_len; | |
b38ab40a TT |
382 | |
383 | if (rqst->rq_snd_buf.tail[0].iov_len) { | |
384 | curlen = rqst->rq_snd_buf.tail[0].iov_len; | |
385 | if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { | |
386 | memmove(destp + copy_len, | |
387 | rqst->rq_snd_buf.tail[0].iov_base, curlen); | |
388 | r_xprt->rx_stats.pullup_copy_count += curlen; | |
389 | } | |
390 | dprintk("RPC: %s: tail destp 0x%p len %d\n", | |
391 | __func__, destp + copy_len, curlen); | |
392 | rqst->rq_svec[0].iov_len += curlen; | |
393 | } | |
e9601828 | 394 | r_xprt->rx_stats.pullup_copy_count += copy_len; |
bd7ea31b TT |
395 | |
396 | page_base = rqst->rq_snd_buf.page_base; | |
397 | ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); | |
398 | page_base &= ~PAGE_MASK; | |
399 | npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; | |
e9601828 | 400 | for (i = 0; copy_len && i < npages; i++) { |
bd7ea31b | 401 | curlen = PAGE_SIZE - page_base; |
e9601828 TT |
402 | if (curlen > copy_len) |
403 | curlen = copy_len; | |
404 | dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", | |
405 | __func__, i, destp, copy_len, curlen); | |
b8541786 | 406 | srcp = kmap_atomic(ppages[i]); |
bd7ea31b | 407 | memcpy(destp, srcp+page_base, curlen); |
b8541786 | 408 | kunmap_atomic(srcp); |
e9601828 TT |
409 | rqst->rq_svec[0].iov_len += curlen; |
410 | destp += curlen; | |
411 | copy_len -= curlen; | |
bd7ea31b | 412 | page_base = 0; |
e9601828 | 413 | } |
e9601828 | 414 | /* header now contains entire send message */ |
e9601828 TT |
415 | } |
416 | ||
417 | /* | |
418 | * Marshal a request: the primary job of this routine is to choose | |
419 | * the transfer modes. See comments below. | |
420 | * | |
421 | * Uses multiple RDMA IOVs for a request: | |
422 | * [0] -- RPC RDMA header, which uses memory from the *start* of the | |
423 | * preregistered buffer that already holds the RPC data in | |
424 | * its middle. | |
425 | * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol. | |
426 | * [2] -- optional padding. | |
427 | * [3] -- if padded, header only in [1] and data here. | |
c93c6223 CL |
428 | * |
429 | * Returns zero on success, otherwise a negative errno. | |
e9601828 TT |
430 | */ |
431 | ||
432 | int | |
433 | rpcrdma_marshal_req(struct rpc_rqst *rqst) | |
434 | { | |
a4f0835c | 435 | struct rpc_xprt *xprt = rqst->rq_xprt; |
e9601828 TT |
436 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
437 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | |
438 | char *base; | |
b3221d6a | 439 | size_t rpclen; |
c93c6223 | 440 | ssize_t hdrlen; |
e2377945 | 441 | enum rpcrdma_chunktype rtype, wtype; |
e9601828 TT |
442 | struct rpcrdma_msg *headerp; |
443 | ||
83128a60 CL |
444 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
445 | if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)) | |
446 | return rpcrdma_bc_marshal_reply(rqst); | |
447 | #endif | |
448 | ||
e9601828 TT |
449 | /* |
450 | * rpclen gets amount of data in first buffer, which is the | |
451 | * pre-registered buffer. | |
452 | */ | |
453 | base = rqst->rq_svec[0].iov_base; | |
454 | rpclen = rqst->rq_svec[0].iov_len; | |
455 | ||
85275c87 | 456 | headerp = rdmab_to_msg(req->rl_rdmabuf); |
284f4902 | 457 | /* don't byte-swap XID, it's already done in request */ |
e9601828 | 458 | headerp->rm_xid = rqst->rq_xid; |
284f4902 CL |
459 | headerp->rm_vers = rpcrdma_version; |
460 | headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); | |
461 | headerp->rm_type = rdma_msg; | |
e9601828 TT |
462 | |
463 | /* | |
464 | * Chunks needed for results? | |
465 | * | |
02eb57d8 | 466 | * o Read ops return data as write chunk(s), header as inline. |
e9601828 | 467 | * o If the expected result is under the inline threshold, all ops |
33943b29 | 468 | * return as inline. |
e9601828 | 469 | * o Large non-read ops return as a single reply chunk. |
e9601828 | 470 | */ |
02eb57d8 | 471 | if (rqst->rq_rcv_buf.flags & XDRBUF_READ) |
e2377945 | 472 | wtype = rpcrdma_writech; |
02eb57d8 CL |
473 | else if (rpcrdma_results_inline(rqst)) |
474 | wtype = rpcrdma_noch; | |
e9601828 | 475 | else |
e2377945 | 476 | wtype = rpcrdma_replych; |
e9601828 TT |
477 | |
478 | /* | |
479 | * Chunks needed for arguments? | |
480 | * | |
481 | * o If the total request is under the inline threshold, all ops | |
482 | * are sent as inline. | |
e9601828 TT |
483 | * o Large write ops transmit data as read chunk(s), header as |
484 | * inline. | |
2fcc213a CL |
485 | * o Large non-write ops are sent with the entire message as a |
486 | * single read chunk (protocol 0-position special case). | |
e9601828 | 487 | * |
2fcc213a CL |
488 | * This assumes that the upper layer does not present a request |
489 | * that both has a data payload, and whose non-data arguments | |
490 | * by themselves are larger than the inline threshold. | |
e9601828 | 491 | */ |
2fcc213a | 492 | if (rpcrdma_args_inline(rqst)) { |
e2377945 | 493 | rtype = rpcrdma_noch; |
2fcc213a | 494 | } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) { |
e2377945 | 495 | rtype = rpcrdma_readch; |
2fcc213a | 496 | } else { |
860477d1 | 497 | r_xprt->rx_stats.nomsg_call_count++; |
2fcc213a CL |
498 | headerp->rm_type = htonl(RDMA_NOMSG); |
499 | rtype = rpcrdma_areadch; | |
500 | rpclen = 0; | |
501 | } | |
e9601828 TT |
502 | |
503 | /* The following simplification is not true forever */ | |
e2377945 CL |
504 | if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) |
505 | wtype = rpcrdma_noch; | |
506 | if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { | |
c93c6223 CL |
507 | dprintk("RPC: %s: cannot marshal multiple chunk lists\n", |
508 | __func__); | |
509 | return -EIO; | |
510 | } | |
e9601828 | 511 | |
f2846481 | 512 | hdrlen = RPCRDMA_HDRLEN_MIN; |
e9601828 TT |
513 | |
514 | /* | |
515 | * Pull up any extra send data into the preregistered buffer. | |
516 | * When padding is in use and applies to the transfer, insert | |
517 | * it and change the message type. | |
518 | */ | |
e2377945 | 519 | if (rtype == rpcrdma_noch) { |
e9601828 | 520 | |
b3221d6a CL |
521 | rpcrdma_inline_pullup(rqst); |
522 | ||
523 | headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; | |
524 | headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; | |
525 | headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; | |
526 | /* new length after pullup */ | |
527 | rpclen = rqst->rq_svec[0].iov_len; | |
677eb17e CL |
528 | } else if (rtype == rpcrdma_readch) |
529 | rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf); | |
e2377945 CL |
530 | if (rtype != rpcrdma_noch) { |
531 | hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, | |
532 | headerp, rtype); | |
533 | wtype = rtype; /* simplify dprintk */ | |
534 | ||
535 | } else if (wtype != rpcrdma_noch) { | |
536 | hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, | |
537 | headerp, wtype); | |
538 | } | |
c93c6223 CL |
539 | if (hdrlen < 0) |
540 | return hdrlen; | |
e9601828 | 541 | |
b3221d6a | 542 | dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd" |
5f37d561 | 543 | " headerp 0x%p base 0x%p lkey 0x%x\n", |
b3221d6a | 544 | __func__, transfertypes[wtype], hdrlen, rpclen, |
85275c87 | 545 | headerp, base, rdmab_lkey(req->rl_rdmabuf)); |
e9601828 TT |
546 | |
547 | /* | |
548 | * initialize send_iov's - normally only two: rdma chunk header and | |
549 | * single preregistered RPC header buffer, but if padding is present, | |
550 | * then use a preregistered (and zeroed) pad buffer between the RPC | |
551 | * header and any write data. In all non-rdma cases, any following | |
552 | * data has been copied into the RPC header buffer. | |
553 | */ | |
85275c87 | 554 | req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); |
e9601828 | 555 | req->rl_send_iov[0].length = hdrlen; |
85275c87 | 556 | req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); |
e9601828 | 557 | |
2fcc213a CL |
558 | req->rl_niovs = 1; |
559 | if (rtype == rpcrdma_areadch) | |
560 | return 0; | |
561 | ||
0ca77dc3 | 562 | req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); |
e9601828 | 563 | req->rl_send_iov[1].length = rpclen; |
0ca77dc3 | 564 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); |
e9601828 TT |
565 | |
566 | req->rl_niovs = 2; | |
e9601828 TT |
567 | return 0; |
568 | } | |
569 | ||
570 | /* | |
571 | * Chase down a received write or reply chunklist to get length | |
572 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | |
573 | */ | |
574 | static int | |
d4b37ff7 | 575 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp) |
e9601828 TT |
576 | { |
577 | unsigned int i, total_len; | |
578 | struct rpcrdma_write_chunk *cur_wchunk; | |
6b1184cd | 579 | char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); |
e9601828 | 580 | |
284f4902 | 581 | i = be32_to_cpu(**iptrp); |
e9601828 TT |
582 | if (i > max) |
583 | return -1; | |
584 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); | |
585 | total_len = 0; | |
586 | while (i--) { | |
587 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | |
588 | ifdebug(FACILITY) { | |
589 | u64 off; | |
2d8a9726 | 590 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
e9601828 TT |
591 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
592 | __func__, | |
284f4902 | 593 | be32_to_cpu(seg->rs_length), |
e08a132b | 594 | (unsigned long long)off, |
284f4902 | 595 | be32_to_cpu(seg->rs_handle)); |
e9601828 | 596 | } |
284f4902 | 597 | total_len += be32_to_cpu(seg->rs_length); |
e9601828 TT |
598 | ++cur_wchunk; |
599 | } | |
600 | /* check and adjust for properly terminated write chunk */ | |
601 | if (wrchunk) { | |
2d8a9726 | 602 | __be32 *w = (__be32 *) cur_wchunk; |
e9601828 TT |
603 | if (*w++ != xdr_zero) |
604 | return -1; | |
605 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | |
606 | } | |
6b1184cd | 607 | if ((char *)cur_wchunk > base + rep->rr_len) |
e9601828 TT |
608 | return -1; |
609 | ||
2d8a9726 | 610 | *iptrp = (__be32 *) cur_wchunk; |
e9601828 TT |
611 | return total_len; |
612 | } | |
613 | ||
614 | /* | |
615 | * Scatter inline received data back into provided iov's. | |
616 | */ | |
617 | static void | |
9191ca3b | 618 | rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) |
e9601828 TT |
619 | { |
620 | int i, npages, curlen, olen; | |
621 | char *destp; | |
bd7ea31b TT |
622 | struct page **ppages; |
623 | int page_base; | |
e9601828 TT |
624 | |
625 | curlen = rqst->rq_rcv_buf.head[0].iov_len; | |
626 | if (curlen > copy_len) { /* write chunk header fixup */ | |
627 | curlen = copy_len; | |
628 | rqst->rq_rcv_buf.head[0].iov_len = curlen; | |
629 | } | |
630 | ||
631 | dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", | |
632 | __func__, srcp, copy_len, curlen); | |
633 | ||
634 | /* Shift pointer for first receive segment only */ | |
635 | rqst->rq_rcv_buf.head[0].iov_base = srcp; | |
636 | srcp += curlen; | |
637 | copy_len -= curlen; | |
638 | ||
639 | olen = copy_len; | |
640 | i = 0; | |
641 | rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen; | |
bd7ea31b TT |
642 | page_base = rqst->rq_rcv_buf.page_base; |
643 | ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); | |
644 | page_base &= ~PAGE_MASK; | |
645 | ||
e9601828 | 646 | if (copy_len && rqst->rq_rcv_buf.page_len) { |
bd7ea31b | 647 | npages = PAGE_ALIGN(page_base + |
e9601828 TT |
648 | rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT; |
649 | for (; i < npages; i++) { | |
bd7ea31b | 650 | curlen = PAGE_SIZE - page_base; |
e9601828 TT |
651 | if (curlen > copy_len) |
652 | curlen = copy_len; | |
653 | dprintk("RPC: %s: page %d" | |
654 | " srcp 0x%p len %d curlen %d\n", | |
655 | __func__, i, srcp, copy_len, curlen); | |
b8541786 | 656 | destp = kmap_atomic(ppages[i]); |
bd7ea31b TT |
657 | memcpy(destp + page_base, srcp, curlen); |
658 | flush_dcache_page(ppages[i]); | |
b8541786 | 659 | kunmap_atomic(destp); |
e9601828 TT |
660 | srcp += curlen; |
661 | copy_len -= curlen; | |
662 | if (copy_len == 0) | |
663 | break; | |
bd7ea31b | 664 | page_base = 0; |
e9601828 | 665 | } |
2b7bbc96 | 666 | } |
e9601828 TT |
667 | |
668 | if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { | |
669 | curlen = copy_len; | |
670 | if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) | |
671 | curlen = rqst->rq_rcv_buf.tail[0].iov_len; | |
672 | if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) | |
b38ab40a | 673 | memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); |
e9601828 TT |
674 | dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", |
675 | __func__, srcp, copy_len, curlen); | |
676 | rqst->rq_rcv_buf.tail[0].iov_len = curlen; | |
677 | copy_len -= curlen; ++i; | |
678 | } else | |
679 | rqst->rq_rcv_buf.tail[0].iov_len = 0; | |
680 | ||
9191ca3b TT |
681 | if (pad) { |
682 | /* implicit padding on terminal chunk */ | |
683 | unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; | |
684 | while (pad--) | |
685 | p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; | |
686 | } | |
687 | ||
e9601828 TT |
688 | if (copy_len) |
689 | dprintk("RPC: %s: %d bytes in" | |
690 | " %d extra segments (%d lost)\n", | |
691 | __func__, olen, i, copy_len); | |
692 | ||
693 | /* TBD avoid a warning from call_decode() */ | |
694 | rqst->rq_private_buf = rqst->rq_rcv_buf; | |
695 | } | |
696 | ||
e9601828 | 697 | void |
254f91e2 | 698 | rpcrdma_connect_worker(struct work_struct *work) |
e9601828 | 699 | { |
254f91e2 CL |
700 | struct rpcrdma_ep *ep = |
701 | container_of(work, struct rpcrdma_ep, rep_connect_worker.work); | |
afadc468 CL |
702 | struct rpcrdma_xprt *r_xprt = |
703 | container_of(ep, struct rpcrdma_xprt, rx_ep); | |
704 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; | |
e9601828 TT |
705 | |
706 | spin_lock_bh(&xprt->transport_lock); | |
575448bd TT |
707 | if (++xprt->connect_cookie == 0) /* maintain a reserved value */ |
708 | ++xprt->connect_cookie; | |
e9601828 TT |
709 | if (ep->rep_connected > 0) { |
710 | if (!xprt_test_and_set_connected(xprt)) | |
711 | xprt_wake_pending_tasks(xprt, 0); | |
712 | } else { | |
713 | if (xprt_test_and_clear_connected(xprt)) | |
926449ba | 714 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
e9601828 TT |
715 | } |
716 | spin_unlock_bh(&xprt->transport_lock); | |
717 | } | |
718 | ||
63cae470 CL |
719 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
720 | /* By convention, backchannel calls arrive via rdma_msg type | |
721 | * messages, and never populate the chunk lists. This makes | |
722 | * the RPC/RDMA header small and fixed in size, so it is | |
723 | * straightforward to check the RPC header's direction field. | |
724 | */ | |
725 | static bool | |
726 | rpcrdma_is_bcall(struct rpcrdma_msg *headerp) | |
727 | { | |
728 | __be32 *p = (__be32 *)headerp; | |
729 | ||
730 | if (headerp->rm_type != rdma_msg) | |
731 | return false; | |
732 | if (headerp->rm_body.rm_chunks[0] != xdr_zero) | |
733 | return false; | |
734 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) | |
735 | return false; | |
736 | if (headerp->rm_body.rm_chunks[2] != xdr_zero) | |
737 | return false; | |
738 | ||
739 | /* sanity */ | |
740 | if (p[7] != headerp->rm_xid) | |
741 | return false; | |
742 | /* call direction */ | |
743 | if (p[8] != cpu_to_be32(RPC_CALL)) | |
744 | return false; | |
745 | ||
746 | return true; | |
747 | } | |
748 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | |
749 | ||
254f91e2 CL |
750 | /* |
751 | * This function is called when an async event is posted to | |
752 | * the connection which changes the connection state. All it | |
753 | * does at this point is mark the connection up/down, the rpc | |
754 | * timers do the rest. | |
755 | */ | |
756 | void | |
757 | rpcrdma_conn_func(struct rpcrdma_ep *ep) | |
758 | { | |
759 | schedule_delayed_work(&ep->rep_connect_worker, 0); | |
760 | } | |
761 | ||
fe97b47c CL |
762 | /* Process received RPC/RDMA messages. |
763 | * | |
e9601828 TT |
764 | * Errors must result in the RPC task either being awakened, or |
765 | * allowed to timeout, to discover the errors at that time. | |
766 | */ | |
767 | void | |
768 | rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |
769 | { | |
770 | struct rpcrdma_msg *headerp; | |
771 | struct rpcrdma_req *req; | |
772 | struct rpc_rqst *rqst; | |
fed171b3 CL |
773 | struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; |
774 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; | |
2d8a9726 | 775 | __be32 *iptr; |
9b1dcbc8 | 776 | int rdmalen, status; |
e7ce710a | 777 | unsigned long cwnd; |
9b1dcbc8 | 778 | u32 credits; |
e9601828 | 779 | |
b0e178a2 CL |
780 | dprintk("RPC: %s: incoming rep %p\n", __func__, rep); |
781 | ||
782 | if (rep->rr_len == RPCRDMA_BAD_LEN) | |
783 | goto out_badstatus; | |
784 | if (rep->rr_len < RPCRDMA_HDRLEN_MIN) | |
785 | goto out_shortreply; | |
786 | ||
6b1184cd | 787 | headerp = rdmab_to_msg(rep->rr_rdmabuf); |
b0e178a2 CL |
788 | if (headerp->rm_vers != rpcrdma_version) |
789 | goto out_badversion; | |
63cae470 CL |
790 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
791 | if (rpcrdma_is_bcall(headerp)) | |
792 | goto out_bcall; | |
793 | #endif | |
e9601828 | 794 | |
fe97b47c CL |
795 | /* Match incoming rpcrdma_rep to an rpcrdma_req to |
796 | * get context for handling any incoming chunks. | |
797 | */ | |
798 | spin_lock_bh(&xprt->transport_lock); | |
e9601828 | 799 | rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); |
b0e178a2 CL |
800 | if (!rqst) |
801 | goto out_nomatch; | |
e9601828 | 802 | |
e9601828 | 803 | req = rpcr_to_rdmar(rqst); |
b0e178a2 CL |
804 | if (req->rl_reply) |
805 | goto out_duplicate; | |
e9601828 | 806 | |
68791649 CL |
807 | /* Sanity checking has passed. We are now committed |
808 | * to complete this transaction. | |
809 | */ | |
810 | list_del_init(&rqst->rq_list); | |
811 | spin_unlock_bh(&xprt->transport_lock); | |
e9601828 TT |
812 | dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" |
813 | " RPC request 0x%p xid 0x%08x\n", | |
052151a9 CL |
814 | __func__, rep, req, rqst, |
815 | be32_to_cpu(headerp->rm_xid)); | |
e9601828 | 816 | |
e9601828 TT |
817 | /* from here on, the reply is no longer an orphan */ |
818 | req->rl_reply = rep; | |
18906972 | 819 | xprt->reestablish_timeout = 0; |
e9601828 TT |
820 | |
821 | /* check for expected message types */ | |
822 | /* The order of some of these tests is important. */ | |
823 | switch (headerp->rm_type) { | |
284f4902 | 824 | case rdma_msg: |
e9601828 TT |
825 | /* never expect read chunks */ |
826 | /* never expect reply chunks (two ways to check) */ | |
827 | /* never expect write chunks without having offered RDMA */ | |
828 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | |
829 | (headerp->rm_body.rm_chunks[1] == xdr_zero && | |
830 | headerp->rm_body.rm_chunks[2] != xdr_zero) || | |
831 | (headerp->rm_body.rm_chunks[1] != xdr_zero && | |
832 | req->rl_nchunks == 0)) | |
833 | goto badheader; | |
834 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) { | |
835 | /* count any expected write chunks in read reply */ | |
836 | /* start at write chunk array count */ | |
837 | iptr = &headerp->rm_body.rm_chunks[2]; | |
838 | rdmalen = rpcrdma_count_chunks(rep, | |
839 | req->rl_nchunks, 1, &iptr); | |
840 | /* check for validity, and no reply chunk after */ | |
841 | if (rdmalen < 0 || *iptr++ != xdr_zero) | |
842 | goto badheader; | |
843 | rep->rr_len -= | |
844 | ((unsigned char *)iptr - (unsigned char *)headerp); | |
845 | status = rep->rr_len + rdmalen; | |
846 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | |
9191ca3b TT |
847 | /* special case - last chunk may omit padding */ |
848 | if (rdmalen &= 3) { | |
849 | rdmalen = 4 - rdmalen; | |
850 | status += rdmalen; | |
851 | } | |
e9601828 TT |
852 | } else { |
853 | /* else ordinary inline */ | |
9191ca3b | 854 | rdmalen = 0; |
f2846481 CL |
855 | iptr = (__be32 *)((unsigned char *)headerp + |
856 | RPCRDMA_HDRLEN_MIN); | |
857 | rep->rr_len -= RPCRDMA_HDRLEN_MIN; | |
e9601828 TT |
858 | status = rep->rr_len; |
859 | } | |
860 | /* Fix up the rpc results for upper layer */ | |
9191ca3b | 861 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); |
e9601828 TT |
862 | break; |
863 | ||
284f4902 | 864 | case rdma_nomsg: |
e9601828 TT |
865 | /* never expect read or write chunks, always reply chunks */ |
866 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | |
867 | headerp->rm_body.rm_chunks[1] != xdr_zero || | |
868 | headerp->rm_body.rm_chunks[2] != xdr_one || | |
869 | req->rl_nchunks == 0) | |
870 | goto badheader; | |
f2846481 CL |
871 | iptr = (__be32 *)((unsigned char *)headerp + |
872 | RPCRDMA_HDRLEN_MIN); | |
e9601828 TT |
873 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
874 | if (rdmalen < 0) | |
875 | goto badheader; | |
876 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | |
877 | /* Reply chunk buffer already is the reply vector - no fixup. */ | |
878 | status = rdmalen; | |
879 | break; | |
880 | ||
881 | badheader: | |
882 | default: | |
883 | dprintk("%s: invalid rpcrdma reply header (type %d):" | |
884 | " chunks[012] == %d %d %d" | |
885 | " expected chunks <= %d\n", | |
284f4902 | 886 | __func__, be32_to_cpu(headerp->rm_type), |
e9601828 TT |
887 | headerp->rm_body.rm_chunks[0], |
888 | headerp->rm_body.rm_chunks[1], | |
889 | headerp->rm_body.rm_chunks[2], | |
890 | req->rl_nchunks); | |
891 | status = -EIO; | |
892 | r_xprt->rx_stats.bad_reply_count++; | |
893 | break; | |
894 | } | |
895 | ||
68791649 CL |
896 | /* Invalidate and flush the data payloads before waking the |
897 | * waiting application. This guarantees the memory region is | |
898 | * properly fenced from the server before the application | |
899 | * accesses the data. It also ensures proper send flow | |
900 | * control: waking the next RPC waits until this RPC has | |
901 | * relinquished all its Send Queue entries. | |
902 | */ | |
903 | if (req->rl_nchunks) | |
904 | r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req); | |
905 | ||
eba8ff66 CL |
906 | credits = be32_to_cpu(headerp->rm_credit); |
907 | if (credits == 0) | |
908 | credits = 1; /* don't deadlock */ | |
909 | else if (credits > r_xprt->rx_buf.rb_max_requests) | |
910 | credits = r_xprt->rx_buf.rb_max_requests; | |
911 | ||
68791649 | 912 | spin_lock_bh(&xprt->transport_lock); |
e7ce710a | 913 | cwnd = xprt->cwnd; |
eba8ff66 | 914 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
e7ce710a CL |
915 | if (xprt->cwnd > cwnd) |
916 | xprt_release_rqst_cong(rqst->rq_task); | |
917 | ||
b0e178a2 | 918 | xprt_complete_rqst(rqst->rq_task, status); |
fe97b47c | 919 | spin_unlock_bh(&xprt->transport_lock); |
e9601828 TT |
920 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", |
921 | __func__, xprt, rqst, status); | |
b0e178a2 CL |
922 | return; |
923 | ||
924 | out_badstatus: | |
925 | rpcrdma_recv_buffer_put(rep); | |
926 | if (r_xprt->rx_ep.rep_connected == 1) { | |
927 | r_xprt->rx_ep.rep_connected = -EIO; | |
928 | rpcrdma_conn_func(&r_xprt->rx_ep); | |
929 | } | |
930 | return; | |
931 | ||
63cae470 CL |
932 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
933 | out_bcall: | |
934 | rpcrdma_bc_receive_call(r_xprt, rep); | |
935 | return; | |
936 | #endif | |
937 | ||
b0e178a2 CL |
938 | out_shortreply: |
939 | dprintk("RPC: %s: short/invalid reply\n", __func__); | |
940 | goto repost; | |
941 | ||
942 | out_badversion: | |
943 | dprintk("RPC: %s: invalid version %d\n", | |
944 | __func__, be32_to_cpu(headerp->rm_vers)); | |
945 | goto repost; | |
946 | ||
947 | out_nomatch: | |
fe97b47c | 948 | spin_unlock_bh(&xprt->transport_lock); |
b0e178a2 CL |
949 | dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n", |
950 | __func__, be32_to_cpu(headerp->rm_xid), | |
951 | rep->rr_len); | |
952 | goto repost; | |
953 | ||
954 | out_duplicate: | |
fe97b47c | 955 | spin_unlock_bh(&xprt->transport_lock); |
b0e178a2 CL |
956 | dprintk("RPC: %s: " |
957 | "duplicate reply %p to RPC request %p: xid 0x%08x\n", | |
958 | __func__, rep, req, be32_to_cpu(headerp->rm_xid)); | |
959 | ||
960 | repost: | |
961 | r_xprt->rx_stats.bad_reply_count++; | |
962 | if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) | |
963 | rpcrdma_recv_buffer_put(rep); | |
e9601828 | 964 | } |