2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
48 #include "xprt_rdma.h"
50 #include <linux/highmem.h>
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY RPCDBG_TRANS
56 enum rpcrdma_chunktype
{
64 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
65 static const char transfertypes
[][12] = {
66 "pure inline", /* no chunks */
67 " read chunk", /* some argument via rdma read */
68 "*read chunk", /* entire request via rdma read */
69 "write chunk", /* some result via rdma write */
70 "reply chunk" /* entire reply via rdma write */
74 /* The client can send a request inline as long as the RPCRDMA header
75 * plus the RPC call fit under the transport's inline limit. If the
76 * combined call message size exceeds that limit, the client must use
77 * the read chunk list for this operation.
79 static bool rpcrdma_args_inline(struct rpc_rqst
*rqst
)
81 unsigned int callsize
= RPCRDMA_HDRLEN_MIN
+ rqst
->rq_snd_buf
.len
;
83 return callsize
<= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst
);
86 /* The client can't know how large the actual reply will be. Thus it
87 * plans for the largest possible reply for that particular ULP
88 * operation. If the maximum combined reply message size exceeds that
89 * limit, the client must provide a write list or a reply chunk for
92 static bool rpcrdma_results_inline(struct rpc_rqst
*rqst
)
94 unsigned int repsize
= RPCRDMA_HDRLEN_MIN
+ rqst
->rq_rcv_buf
.buflen
;
96 return repsize
<= RPCRDMA_INLINE_READ_THRESHOLD(rqst
);
100 rpcrdma_tail_pullup(struct xdr_buf
*buf
)
102 size_t tlen
= buf
->tail
[0].iov_len
;
103 size_t skip
= tlen
& 3;
105 /* Do not include the tail if it is only an XDR pad */
109 /* xdr_write_pages() adds a pad at the beginning of the tail
110 * if the content in "buf->pages" is unaligned. Force the
111 * tail's actual content to land at the next XDR position
112 * after the head instead.
115 unsigned char *src
, *dst
;
118 src
= buf
->tail
[0].iov_base
;
119 dst
= buf
->head
[0].iov_base
;
120 dst
+= buf
->head
[0].iov_len
;
125 dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n",
126 __func__
, skip
, dst
, src
, tlen
);
128 for (count
= tlen
; count
; count
--)
136 * Chunk assembly from upper layer xdr_buf.
138 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
139 * elements. Segments are then coalesced when registered, if possible
140 * within the selected memreg mode.
142 * Returns positive number of segments converted, or a negative errno.
146 rpcrdma_convert_iovs(struct xdr_buf
*xdrbuf
, unsigned int pos
,
147 enum rpcrdma_chunktype type
, struct rpcrdma_mr_seg
*seg
, int nsegs
)
151 struct page
**ppages
;
153 if (pos
== 0 && xdrbuf
->head
[0].iov_len
) {
154 seg
[n
].mr_page
= NULL
;
155 seg
[n
].mr_offset
= xdrbuf
->head
[0].iov_base
;
156 seg
[n
].mr_len
= xdrbuf
->head
[0].iov_len
;
160 len
= xdrbuf
->page_len
;
161 ppages
= xdrbuf
->pages
+ (xdrbuf
->page_base
>> PAGE_SHIFT
);
162 page_base
= xdrbuf
->page_base
& ~PAGE_MASK
;
164 while (len
&& n
< nsegs
) {
166 /* alloc the pagelist for receiving buffer */
167 ppages
[p
] = alloc_page(GFP_ATOMIC
);
171 seg
[n
].mr_page
= ppages
[p
];
172 seg
[n
].mr_offset
= (void *)(unsigned long) page_base
;
173 seg
[n
].mr_len
= min_t(u32
, PAGE_SIZE
- page_base
, len
);
174 if (seg
[n
].mr_len
> PAGE_SIZE
)
176 len
-= seg
[n
].mr_len
;
179 page_base
= 0; /* page offset only applies to first page */
182 /* Message overflows the seg array */
183 if (len
&& n
== nsegs
)
186 /* When encoding the read list, the tail is always sent inline */
187 if (type
== rpcrdma_readch
)
190 if (xdrbuf
->tail
[0].iov_len
) {
191 /* the rpcrdma protocol allows us to omit any trailing
192 * xdr pad bytes, saving the server an RDMA operation. */
193 if (xdrbuf
->tail
[0].iov_len
< 4 && xprt_rdma_pad_optimize
)
196 /* Tail remains, but we're out of segments */
198 seg
[n
].mr_page
= NULL
;
199 seg
[n
].mr_offset
= xdrbuf
->tail
[0].iov_base
;
200 seg
[n
].mr_len
= xdrbuf
->tail
[0].iov_len
;
208 * Create read/write chunk lists, and reply chunks, for RDMA
210 * Assume check against THRESHOLD has been done, and chunks are required.
211 * Assume only encoding one list entry for read|write chunks. The NFSv3
212 * protocol is simple enough to allow this as it only has a single "bulk
213 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
214 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
216 * When used for a single reply chunk (which is a special write
217 * chunk used for the entire reply, rather than just the data), it
218 * is used primarily for READDIR and READLINK which would otherwise
219 * be severely size-limited by a small rdma inline read max. The server
220 * response will come back as an RDMA Write, followed by a message
221 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
222 * chunks do not provide data alignment, however they do not require
223 * "fixup" (moving the response to the upper layer buffer) either.
225 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
227 * Read chunklist (a linked list):
228 * N elements, position P (same P for all chunks of same arg!):
229 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
231 * Write chunklist (a list of (one) counted array):
233 * 1 - N - HLOO - HLOO - ... - HLOO - 0
235 * Reply chunk (a counted array):
237 * 1 - N - HLOO - HLOO - ... - HLOO
239 * Returns positive RPC/RDMA header size, or negative errno.
243 rpcrdma_create_chunks(struct rpc_rqst
*rqst
, struct xdr_buf
*target
,
244 struct rpcrdma_msg
*headerp
, enum rpcrdma_chunktype type
)
246 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
247 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(rqst
->rq_xprt
);
248 int n
, nsegs
, nchunks
= 0;
250 struct rpcrdma_mr_seg
*seg
= req
->rl_segments
;
251 struct rpcrdma_read_chunk
*cur_rchunk
= NULL
;
252 struct rpcrdma_write_array
*warray
= NULL
;
253 struct rpcrdma_write_chunk
*cur_wchunk
= NULL
;
254 __be32
*iptr
= headerp
->rm_body
.rm_chunks
;
255 int (*map
)(struct rpcrdma_xprt
*, struct rpcrdma_mr_seg
*, int, bool);
257 if (type
== rpcrdma_readch
|| type
== rpcrdma_areadch
) {
258 /* a read chunk - server will RDMA Read our memory */
259 cur_rchunk
= (struct rpcrdma_read_chunk
*) iptr
;
261 /* a write or reply chunk - server will RDMA Write our memory */
262 *iptr
++ = xdr_zero
; /* encode a NULL read chunk list */
263 if (type
== rpcrdma_replych
)
264 *iptr
++ = xdr_zero
; /* a NULL write chunk list */
265 warray
= (struct rpcrdma_write_array
*) iptr
;
266 cur_wchunk
= (struct rpcrdma_write_chunk
*) (warray
+ 1);
269 if (type
== rpcrdma_replych
|| type
== rpcrdma_areadch
)
272 pos
= target
->head
[0].iov_len
;
274 nsegs
= rpcrdma_convert_iovs(target
, pos
, type
, seg
, RPCRDMA_MAX_SEGS
);
278 map
= r_xprt
->rx_ia
.ri_ops
->ro_map
;
280 n
= map(r_xprt
, seg
, nsegs
, cur_wchunk
!= NULL
);
283 if (cur_rchunk
) { /* read */
284 cur_rchunk
->rc_discrim
= xdr_one
;
285 /* all read chunks have the same "position" */
286 cur_rchunk
->rc_position
= cpu_to_be32(pos
);
287 cur_rchunk
->rc_target
.rs_handle
=
288 cpu_to_be32(seg
->mr_rkey
);
289 cur_rchunk
->rc_target
.rs_length
=
290 cpu_to_be32(seg
->mr_len
);
292 (__be32
*)&cur_rchunk
->rc_target
.rs_offset
,
294 dprintk("RPC: %s: read chunk "
295 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__
,
296 seg
->mr_len
, (unsigned long long)seg
->mr_base
,
297 seg
->mr_rkey
, pos
, n
< nsegs
? "more" : "last");
299 r_xprt
->rx_stats
.read_chunk_count
++;
300 } else { /* write/reply */
301 cur_wchunk
->wc_target
.rs_handle
=
302 cpu_to_be32(seg
->mr_rkey
);
303 cur_wchunk
->wc_target
.rs_length
=
304 cpu_to_be32(seg
->mr_len
);
306 (__be32
*)&cur_wchunk
->wc_target
.rs_offset
,
308 dprintk("RPC: %s: %s chunk "
309 "elem %d@0x%llx:0x%x (%s)\n", __func__
,
310 (type
== rpcrdma_replych
) ? "reply" : "write",
311 seg
->mr_len
, (unsigned long long)seg
->mr_base
,
312 seg
->mr_rkey
, n
< nsegs
? "more" : "last");
314 if (type
== rpcrdma_replych
)
315 r_xprt
->rx_stats
.reply_chunk_count
++;
317 r_xprt
->rx_stats
.write_chunk_count
++;
318 r_xprt
->rx_stats
.total_rdma_request
+= seg
->mr_len
;
325 /* success. all failures return above */
326 req
->rl_nchunks
= nchunks
;
329 * finish off header. If write, marshal discrim and nchunks.
332 iptr
= (__be32
*) cur_rchunk
;
333 *iptr
++ = xdr_zero
; /* finish the read chunk list */
334 *iptr
++ = xdr_zero
; /* encode a NULL write chunk list */
335 *iptr
++ = xdr_zero
; /* encode a NULL reply chunk */
337 warray
->wc_discrim
= xdr_one
;
338 warray
->wc_nchunks
= cpu_to_be32(nchunks
);
339 iptr
= (__be32
*) cur_wchunk
;
340 if (type
== rpcrdma_writech
) {
341 *iptr
++ = xdr_zero
; /* finish the write chunk list */
342 *iptr
++ = xdr_zero
; /* encode a NULL reply chunk */
347 * Return header size.
349 return (unsigned char *)iptr
- (unsigned char *)headerp
;
352 for (pos
= 0; nchunks
--;)
353 pos
+= r_xprt
->rx_ia
.ri_ops
->ro_unmap(r_xprt
,
354 &req
->rl_segments
[pos
]);
359 * Copy write data inline.
360 * This function is used for "small" requests. Data which is passed
361 * to RPC via iovecs (or page list) is copied directly into the
362 * pre-registered memory buffer for this request. For small amounts
363 * of data, this is efficient. The cutoff value is tunable.
365 static void rpcrdma_inline_pullup(struct rpc_rqst
*rqst
)
367 int i
, npages
, curlen
;
369 unsigned char *srcp
, *destp
;
370 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(rqst
->rq_xprt
);
372 struct page
**ppages
;
374 destp
= rqst
->rq_svec
[0].iov_base
;
375 curlen
= rqst
->rq_svec
[0].iov_len
;
378 dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n",
379 __func__
, destp
, rqst
->rq_slen
, curlen
);
381 copy_len
= rqst
->rq_snd_buf
.page_len
;
383 if (rqst
->rq_snd_buf
.tail
[0].iov_len
) {
384 curlen
= rqst
->rq_snd_buf
.tail
[0].iov_len
;
385 if (destp
+ copy_len
!= rqst
->rq_snd_buf
.tail
[0].iov_base
) {
386 memmove(destp
+ copy_len
,
387 rqst
->rq_snd_buf
.tail
[0].iov_base
, curlen
);
388 r_xprt
->rx_stats
.pullup_copy_count
+= curlen
;
390 dprintk("RPC: %s: tail destp 0x%p len %d\n",
391 __func__
, destp
+ copy_len
, curlen
);
392 rqst
->rq_svec
[0].iov_len
+= curlen
;
394 r_xprt
->rx_stats
.pullup_copy_count
+= copy_len
;
396 page_base
= rqst
->rq_snd_buf
.page_base
;
397 ppages
= rqst
->rq_snd_buf
.pages
+ (page_base
>> PAGE_SHIFT
);
398 page_base
&= ~PAGE_MASK
;
399 npages
= PAGE_ALIGN(page_base
+copy_len
) >> PAGE_SHIFT
;
400 for (i
= 0; copy_len
&& i
< npages
; i
++) {
401 curlen
= PAGE_SIZE
- page_base
;
402 if (curlen
> copy_len
)
404 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
405 __func__
, i
, destp
, copy_len
, curlen
);
406 srcp
= kmap_atomic(ppages
[i
]);
407 memcpy(destp
, srcp
+page_base
, curlen
);
409 rqst
->rq_svec
[0].iov_len
+= curlen
;
414 /* header now contains entire send message */
418 * Marshal a request: the primary job of this routine is to choose
419 * the transfer modes. See comments below.
421 * Uses multiple RDMA IOVs for a request:
422 * [0] -- RPC RDMA header, which uses memory from the *start* of the
423 * preregistered buffer that already holds the RPC data in
425 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
426 * [2] -- optional padding.
427 * [3] -- if padded, header only in [1] and data here.
429 * Returns zero on success, otherwise a negative errno.
433 rpcrdma_marshal_req(struct rpc_rqst
*rqst
)
435 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
436 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
437 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
441 enum rpcrdma_chunktype rtype
, wtype
;
442 struct rpcrdma_msg
*headerp
;
445 * rpclen gets amount of data in first buffer, which is the
446 * pre-registered buffer.
448 base
= rqst
->rq_svec
[0].iov_base
;
449 rpclen
= rqst
->rq_svec
[0].iov_len
;
451 headerp
= rdmab_to_msg(req
->rl_rdmabuf
);
452 /* don't byte-swap XID, it's already done in request */
453 headerp
->rm_xid
= rqst
->rq_xid
;
454 headerp
->rm_vers
= rpcrdma_version
;
455 headerp
->rm_credit
= cpu_to_be32(r_xprt
->rx_buf
.rb_max_requests
);
456 headerp
->rm_type
= rdma_msg
;
459 * Chunks needed for results?
461 * o Read ops return data as write chunk(s), header as inline.
462 * o If the expected result is under the inline threshold, all ops
464 * o Large non-read ops return as a single reply chunk.
466 if (rqst
->rq_rcv_buf
.flags
& XDRBUF_READ
)
467 wtype
= rpcrdma_writech
;
468 else if (rpcrdma_results_inline(rqst
))
469 wtype
= rpcrdma_noch
;
471 wtype
= rpcrdma_replych
;
474 * Chunks needed for arguments?
476 * o If the total request is under the inline threshold, all ops
477 * are sent as inline.
478 * o Large write ops transmit data as read chunk(s), header as
480 * o Large non-write ops are sent with the entire message as a
481 * single read chunk (protocol 0-position special case).
483 * This assumes that the upper layer does not present a request
484 * that both has a data payload, and whose non-data arguments
485 * by themselves are larger than the inline threshold.
487 if (rpcrdma_args_inline(rqst
)) {
488 rtype
= rpcrdma_noch
;
489 } else if (rqst
->rq_snd_buf
.flags
& XDRBUF_WRITE
) {
490 rtype
= rpcrdma_readch
;
492 r_xprt
->rx_stats
.nomsg_call_count
++;
493 headerp
->rm_type
= htonl(RDMA_NOMSG
);
494 rtype
= rpcrdma_areadch
;
498 /* The following simplification is not true forever */
499 if (rtype
!= rpcrdma_noch
&& wtype
== rpcrdma_replych
)
500 wtype
= rpcrdma_noch
;
501 if (rtype
!= rpcrdma_noch
&& wtype
!= rpcrdma_noch
) {
502 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
507 hdrlen
= RPCRDMA_HDRLEN_MIN
;
510 * Pull up any extra send data into the preregistered buffer.
511 * When padding is in use and applies to the transfer, insert
512 * it and change the message type.
514 if (rtype
== rpcrdma_noch
) {
516 rpcrdma_inline_pullup(rqst
);
518 headerp
->rm_body
.rm_nochunks
.rm_empty
[0] = xdr_zero
;
519 headerp
->rm_body
.rm_nochunks
.rm_empty
[1] = xdr_zero
;
520 headerp
->rm_body
.rm_nochunks
.rm_empty
[2] = xdr_zero
;
521 /* new length after pullup */
522 rpclen
= rqst
->rq_svec
[0].iov_len
;
523 } else if (rtype
== rpcrdma_readch
)
524 rpclen
+= rpcrdma_tail_pullup(&rqst
->rq_snd_buf
);
525 if (rtype
!= rpcrdma_noch
) {
526 hdrlen
= rpcrdma_create_chunks(rqst
, &rqst
->rq_snd_buf
,
528 wtype
= rtype
; /* simplify dprintk */
530 } else if (wtype
!= rpcrdma_noch
) {
531 hdrlen
= rpcrdma_create_chunks(rqst
, &rqst
->rq_rcv_buf
,
537 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
538 " headerp 0x%p base 0x%p lkey 0x%x\n",
539 __func__
, transfertypes
[wtype
], hdrlen
, rpclen
,
540 headerp
, base
, rdmab_lkey(req
->rl_rdmabuf
));
543 * initialize send_iov's - normally only two: rdma chunk header and
544 * single preregistered RPC header buffer, but if padding is present,
545 * then use a preregistered (and zeroed) pad buffer between the RPC
546 * header and any write data. In all non-rdma cases, any following
547 * data has been copied into the RPC header buffer.
549 req
->rl_send_iov
[0].addr
= rdmab_addr(req
->rl_rdmabuf
);
550 req
->rl_send_iov
[0].length
= hdrlen
;
551 req
->rl_send_iov
[0].lkey
= rdmab_lkey(req
->rl_rdmabuf
);
554 if (rtype
== rpcrdma_areadch
)
557 req
->rl_send_iov
[1].addr
= rdmab_addr(req
->rl_sendbuf
);
558 req
->rl_send_iov
[1].length
= rpclen
;
559 req
->rl_send_iov
[1].lkey
= rdmab_lkey(req
->rl_sendbuf
);
566 * Chase down a received write or reply chunklist to get length
567 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
570 rpcrdma_count_chunks(struct rpcrdma_rep
*rep
, unsigned int max
, int wrchunk
, __be32
**iptrp
)
572 unsigned int i
, total_len
;
573 struct rpcrdma_write_chunk
*cur_wchunk
;
574 char *base
= (char *)rdmab_to_msg(rep
->rr_rdmabuf
);
576 i
= be32_to_cpu(**iptrp
);
579 cur_wchunk
= (struct rpcrdma_write_chunk
*) (*iptrp
+ 1);
582 struct rpcrdma_segment
*seg
= &cur_wchunk
->wc_target
;
585 xdr_decode_hyper((__be32
*)&seg
->rs_offset
, &off
);
586 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
588 be32_to_cpu(seg
->rs_length
),
589 (unsigned long long)off
,
590 be32_to_cpu(seg
->rs_handle
));
592 total_len
+= be32_to_cpu(seg
->rs_length
);
595 /* check and adjust for properly terminated write chunk */
597 __be32
*w
= (__be32
*) cur_wchunk
;
598 if (*w
++ != xdr_zero
)
600 cur_wchunk
= (struct rpcrdma_write_chunk
*) w
;
602 if ((char *)cur_wchunk
> base
+ rep
->rr_len
)
605 *iptrp
= (__be32
*) cur_wchunk
;
610 * Scatter inline received data back into provided iov's.
613 rpcrdma_inline_fixup(struct rpc_rqst
*rqst
, char *srcp
, int copy_len
, int pad
)
615 int i
, npages
, curlen
, olen
;
617 struct page
**ppages
;
620 curlen
= rqst
->rq_rcv_buf
.head
[0].iov_len
;
621 if (curlen
> copy_len
) { /* write chunk header fixup */
623 rqst
->rq_rcv_buf
.head
[0].iov_len
= curlen
;
626 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
627 __func__
, srcp
, copy_len
, curlen
);
629 /* Shift pointer for first receive segment only */
630 rqst
->rq_rcv_buf
.head
[0].iov_base
= srcp
;
636 rpcx_to_rdmax(rqst
->rq_xprt
)->rx_stats
.fixup_copy_count
+= olen
;
637 page_base
= rqst
->rq_rcv_buf
.page_base
;
638 ppages
= rqst
->rq_rcv_buf
.pages
+ (page_base
>> PAGE_SHIFT
);
639 page_base
&= ~PAGE_MASK
;
641 if (copy_len
&& rqst
->rq_rcv_buf
.page_len
) {
642 npages
= PAGE_ALIGN(page_base
+
643 rqst
->rq_rcv_buf
.page_len
) >> PAGE_SHIFT
;
644 for (; i
< npages
; i
++) {
645 curlen
= PAGE_SIZE
- page_base
;
646 if (curlen
> copy_len
)
648 dprintk("RPC: %s: page %d"
649 " srcp 0x%p len %d curlen %d\n",
650 __func__
, i
, srcp
, copy_len
, curlen
);
651 destp
= kmap_atomic(ppages
[i
]);
652 memcpy(destp
+ page_base
, srcp
, curlen
);
653 flush_dcache_page(ppages
[i
]);
654 kunmap_atomic(destp
);
663 if (copy_len
&& rqst
->rq_rcv_buf
.tail
[0].iov_len
) {
665 if (curlen
> rqst
->rq_rcv_buf
.tail
[0].iov_len
)
666 curlen
= rqst
->rq_rcv_buf
.tail
[0].iov_len
;
667 if (rqst
->rq_rcv_buf
.tail
[0].iov_base
!= srcp
)
668 memmove(rqst
->rq_rcv_buf
.tail
[0].iov_base
, srcp
, curlen
);
669 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
670 __func__
, srcp
, copy_len
, curlen
);
671 rqst
->rq_rcv_buf
.tail
[0].iov_len
= curlen
;
672 copy_len
-= curlen
; ++i
;
674 rqst
->rq_rcv_buf
.tail
[0].iov_len
= 0;
677 /* implicit padding on terminal chunk */
678 unsigned char *p
= rqst
->rq_rcv_buf
.tail
[0].iov_base
;
680 p
[rqst
->rq_rcv_buf
.tail
[0].iov_len
++] = 0;
684 dprintk("RPC: %s: %d bytes in"
685 " %d extra segments (%d lost)\n",
686 __func__
, olen
, i
, copy_len
);
688 /* TBD avoid a warning from call_decode() */
689 rqst
->rq_private_buf
= rqst
->rq_rcv_buf
;
693 rpcrdma_connect_worker(struct work_struct
*work
)
695 struct rpcrdma_ep
*ep
=
696 container_of(work
, struct rpcrdma_ep
, rep_connect_worker
.work
);
697 struct rpcrdma_xprt
*r_xprt
=
698 container_of(ep
, struct rpcrdma_xprt
, rx_ep
);
699 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
701 spin_lock_bh(&xprt
->transport_lock
);
702 if (++xprt
->connect_cookie
== 0) /* maintain a reserved value */
703 ++xprt
->connect_cookie
;
704 if (ep
->rep_connected
> 0) {
705 if (!xprt_test_and_set_connected(xprt
))
706 xprt_wake_pending_tasks(xprt
, 0);
708 if (xprt_test_and_clear_connected(xprt
))
709 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
711 spin_unlock_bh(&xprt
->transport_lock
);
715 * This function is called when an async event is posted to
716 * the connection which changes the connection state. All it
717 * does at this point is mark the connection up/down, the rpc
718 * timers do the rest.
721 rpcrdma_conn_func(struct rpcrdma_ep
*ep
)
723 schedule_delayed_work(&ep
->rep_connect_worker
, 0);
727 * Called as a tasklet to do req/reply match and complete a request
728 * Errors must result in the RPC task either being awakened, or
729 * allowed to timeout, to discover the errors at that time.
732 rpcrdma_reply_handler(struct rpcrdma_rep
*rep
)
734 struct rpcrdma_msg
*headerp
;
735 struct rpcrdma_req
*req
;
736 struct rpc_rqst
*rqst
;
737 struct rpcrdma_xprt
*r_xprt
= rep
->rr_rxprt
;
738 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
744 /* Check status. If bad, signal disconnect and return rep to pool */
745 if (rep
->rr_len
== ~0U) {
746 rpcrdma_recv_buffer_put(rep
);
747 if (r_xprt
->rx_ep
.rep_connected
== 1) {
748 r_xprt
->rx_ep
.rep_connected
= -EIO
;
749 rpcrdma_conn_func(&r_xprt
->rx_ep
);
753 if (rep
->rr_len
< RPCRDMA_HDRLEN_MIN
) {
754 dprintk("RPC: %s: short/invalid reply\n", __func__
);
757 headerp
= rdmab_to_msg(rep
->rr_rdmabuf
);
758 if (headerp
->rm_vers
!= rpcrdma_version
) {
759 dprintk("RPC: %s: invalid version %d\n",
760 __func__
, be32_to_cpu(headerp
->rm_vers
));
764 /* Get XID and try for a match. */
765 spin_lock(&xprt
->transport_lock
);
766 rqst
= xprt_lookup_rqst(xprt
, headerp
->rm_xid
);
768 spin_unlock(&xprt
->transport_lock
);
769 dprintk("RPC: %s: reply 0x%p failed "
770 "to match any request xid 0x%08x len %d\n",
771 __func__
, rep
, be32_to_cpu(headerp
->rm_xid
),
774 r_xprt
->rx_stats
.bad_reply_count
++;
775 if (rpcrdma_ep_post_recv(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, rep
))
776 rpcrdma_recv_buffer_put(rep
);
781 /* get request object */
782 req
= rpcr_to_rdmar(rqst
);
784 spin_unlock(&xprt
->transport_lock
);
785 dprintk("RPC: %s: duplicate reply 0x%p to RPC "
786 "request 0x%p: xid 0x%08x\n", __func__
, rep
, req
,
787 be32_to_cpu(headerp
->rm_xid
));
791 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
792 " RPC request 0x%p xid 0x%08x\n",
793 __func__
, rep
, req
, rqst
,
794 be32_to_cpu(headerp
->rm_xid
));
796 /* from here on, the reply is no longer an orphan */
798 xprt
->reestablish_timeout
= 0;
800 /* check for expected message types */
801 /* The order of some of these tests is important. */
802 switch (headerp
->rm_type
) {
804 /* never expect read chunks */
805 /* never expect reply chunks (two ways to check) */
806 /* never expect write chunks without having offered RDMA */
807 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
||
808 (headerp
->rm_body
.rm_chunks
[1] == xdr_zero
&&
809 headerp
->rm_body
.rm_chunks
[2] != xdr_zero
) ||
810 (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
&&
811 req
->rl_nchunks
== 0))
813 if (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
) {
814 /* count any expected write chunks in read reply */
815 /* start at write chunk array count */
816 iptr
= &headerp
->rm_body
.rm_chunks
[2];
817 rdmalen
= rpcrdma_count_chunks(rep
,
818 req
->rl_nchunks
, 1, &iptr
);
819 /* check for validity, and no reply chunk after */
820 if (rdmalen
< 0 || *iptr
++ != xdr_zero
)
823 ((unsigned char *)iptr
- (unsigned char *)headerp
);
824 status
= rep
->rr_len
+ rdmalen
;
825 r_xprt
->rx_stats
.total_rdma_reply
+= rdmalen
;
826 /* special case - last chunk may omit padding */
828 rdmalen
= 4 - rdmalen
;
832 /* else ordinary inline */
834 iptr
= (__be32
*)((unsigned char *)headerp
+
836 rep
->rr_len
-= RPCRDMA_HDRLEN_MIN
;
837 status
= rep
->rr_len
;
839 /* Fix up the rpc results for upper layer */
840 rpcrdma_inline_fixup(rqst
, (char *)iptr
, rep
->rr_len
, rdmalen
);
844 /* never expect read or write chunks, always reply chunks */
845 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
||
846 headerp
->rm_body
.rm_chunks
[1] != xdr_zero
||
847 headerp
->rm_body
.rm_chunks
[2] != xdr_one
||
848 req
->rl_nchunks
== 0)
850 iptr
= (__be32
*)((unsigned char *)headerp
+
852 rdmalen
= rpcrdma_count_chunks(rep
, req
->rl_nchunks
, 0, &iptr
);
855 r_xprt
->rx_stats
.total_rdma_reply
+= rdmalen
;
856 /* Reply chunk buffer already is the reply vector - no fixup. */
862 dprintk("%s: invalid rpcrdma reply header (type %d):"
863 " chunks[012] == %d %d %d"
864 " expected chunks <= %d\n",
865 __func__
, be32_to_cpu(headerp
->rm_type
),
866 headerp
->rm_body
.rm_chunks
[0],
867 headerp
->rm_body
.rm_chunks
[1],
868 headerp
->rm_body
.rm_chunks
[2],
871 r_xprt
->rx_stats
.bad_reply_count
++;
875 credits
= be32_to_cpu(headerp
->rm_credit
);
877 credits
= 1; /* don't deadlock */
878 else if (credits
> r_xprt
->rx_buf
.rb_max_requests
)
879 credits
= r_xprt
->rx_buf
.rb_max_requests
;
882 xprt
->cwnd
= credits
<< RPC_CWNDSHIFT
;
883 if (xprt
->cwnd
> cwnd
)
884 xprt_release_rqst_cong(rqst
->rq_task
);
886 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
887 __func__
, xprt
, rqst
, status
);
888 xprt_complete_rqst(rqst
->rq_task
, status
);
889 spin_unlock(&xprt
->transport_lock
);
This page took 0.072908 seconds and 6 git commands to generate.