2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
48 #include "xprt_rdma.h"
50 #include <linux/highmem.h>
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY RPCDBG_TRANS
56 enum rpcrdma_chunktype
{
64 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
65 static const char transfertypes
[][12] = {
66 "pure inline", /* no chunks */
67 " read chunk", /* some argument via rdma read */
68 "*read chunk", /* entire request via rdma read */
69 "write chunk", /* some result via rdma write */
70 "reply chunk" /* entire reply via rdma write */
74 /* The client can send a request inline as long as the RPCRDMA header
75 * plus the RPC call fit under the transport's inline limit. If the
76 * combined call message size exceeds that limit, the client must use
77 * the read chunk list for this operation.
79 static bool rpcrdma_args_inline(struct rpc_rqst
*rqst
)
81 unsigned int callsize
= RPCRDMA_HDRLEN_MIN
+ rqst
->rq_snd_buf
.len
;
83 return callsize
<= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst
);
86 /* The client can't know how large the actual reply will be. Thus it
87 * plans for the largest possible reply for that particular ULP
88 * operation. If the maximum combined reply message size exceeds that
89 * limit, the client must provide a write list or a reply chunk for
92 static bool rpcrdma_results_inline(struct rpc_rqst
*rqst
)
94 unsigned int repsize
= RPCRDMA_HDRLEN_MIN
+ rqst
->rq_rcv_buf
.buflen
;
96 return repsize
<= RPCRDMA_INLINE_READ_THRESHOLD(rqst
);
100 rpcrdma_tail_pullup(struct xdr_buf
*buf
)
102 size_t tlen
= buf
->tail
[0].iov_len
;
103 size_t skip
= tlen
& 3;
105 /* Do not include the tail if it is only an XDR pad */
109 /* xdr_write_pages() adds a pad at the beginning of the tail
110 * if the content in "buf->pages" is unaligned. Force the
111 * tail's actual content to land at the next XDR position
112 * after the head instead.
115 unsigned char *src
, *dst
;
118 src
= buf
->tail
[0].iov_base
;
119 dst
= buf
->head
[0].iov_base
;
120 dst
+= buf
->head
[0].iov_len
;
125 dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n",
126 __func__
, skip
, dst
, src
, tlen
);
128 for (count
= tlen
; count
; count
--)
135 /* Split "vec" on page boundaries into segments. FMR registers pages,
136 * not a byte range. Other modes coalesce these segments into a single
140 rpcrdma_convert_kvec(struct kvec
*vec
, struct rpcrdma_mr_seg
*seg
,
147 base
= vec
->iov_base
;
148 page_offset
= offset_in_page(base
);
149 remaining
= vec
->iov_len
;
150 while (remaining
&& n
< nsegs
) {
151 seg
[n
].mr_page
= NULL
;
152 seg
[n
].mr_offset
= base
;
153 seg
[n
].mr_len
= min_t(u32
, PAGE_SIZE
- page_offset
, remaining
);
154 remaining
-= seg
[n
].mr_len
;
155 base
+= seg
[n
].mr_len
;
163 * Chunk assembly from upper layer xdr_buf.
165 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
166 * elements. Segments are then coalesced when registered, if possible
167 * within the selected memreg mode.
169 * Returns positive number of segments converted, or a negative errno.
173 rpcrdma_convert_iovs(struct xdr_buf
*xdrbuf
, unsigned int pos
,
174 enum rpcrdma_chunktype type
, struct rpcrdma_mr_seg
*seg
, int nsegs
)
178 struct page
**ppages
;
181 n
= rpcrdma_convert_kvec(&xdrbuf
->head
[0], seg
, n
, nsegs
);
186 len
= xdrbuf
->page_len
;
187 ppages
= xdrbuf
->pages
+ (xdrbuf
->page_base
>> PAGE_SHIFT
);
188 page_base
= xdrbuf
->page_base
& ~PAGE_MASK
;
190 while (len
&& n
< nsegs
) {
192 /* alloc the pagelist for receiving buffer */
193 ppages
[p
] = alloc_page(GFP_ATOMIC
);
197 seg
[n
].mr_page
= ppages
[p
];
198 seg
[n
].mr_offset
= (void *)(unsigned long) page_base
;
199 seg
[n
].mr_len
= min_t(u32
, PAGE_SIZE
- page_base
, len
);
200 if (seg
[n
].mr_len
> PAGE_SIZE
)
202 len
-= seg
[n
].mr_len
;
205 page_base
= 0; /* page offset only applies to first page */
208 /* Message overflows the seg array */
209 if (len
&& n
== nsegs
)
212 /* When encoding the read list, the tail is always sent inline */
213 if (type
== rpcrdma_readch
)
216 if (xdrbuf
->tail
[0].iov_len
) {
217 /* the rpcrdma protocol allows us to omit any trailing
218 * xdr pad bytes, saving the server an RDMA operation. */
219 if (xdrbuf
->tail
[0].iov_len
< 4 && xprt_rdma_pad_optimize
)
221 n
= rpcrdma_convert_kvec(&xdrbuf
->tail
[0], seg
, n
, nsegs
);
230 * Create read/write chunk lists, and reply chunks, for RDMA
232 * Assume check against THRESHOLD has been done, and chunks are required.
233 * Assume only encoding one list entry for read|write chunks. The NFSv3
234 * protocol is simple enough to allow this as it only has a single "bulk
235 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
236 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
238 * When used for a single reply chunk (which is a special write
239 * chunk used for the entire reply, rather than just the data), it
240 * is used primarily for READDIR and READLINK which would otherwise
241 * be severely size-limited by a small rdma inline read max. The server
242 * response will come back as an RDMA Write, followed by a message
243 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
244 * chunks do not provide data alignment, however they do not require
245 * "fixup" (moving the response to the upper layer buffer) either.
247 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
249 * Read chunklist (a linked list):
250 * N elements, position P (same P for all chunks of same arg!):
251 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
253 * Write chunklist (a list of (one) counted array):
255 * 1 - N - HLOO - HLOO - ... - HLOO - 0
257 * Reply chunk (a counted array):
259 * 1 - N - HLOO - HLOO - ... - HLOO
261 * Returns positive RPC/RDMA header size, or negative errno.
265 rpcrdma_create_chunks(struct rpc_rqst
*rqst
, struct xdr_buf
*target
,
266 struct rpcrdma_msg
*headerp
, enum rpcrdma_chunktype type
)
268 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
269 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(rqst
->rq_xprt
);
270 int n
, nsegs
, nchunks
= 0;
272 struct rpcrdma_mr_seg
*seg
= req
->rl_segments
;
273 struct rpcrdma_read_chunk
*cur_rchunk
= NULL
;
274 struct rpcrdma_write_array
*warray
= NULL
;
275 struct rpcrdma_write_chunk
*cur_wchunk
= NULL
;
276 __be32
*iptr
= headerp
->rm_body
.rm_chunks
;
277 int (*map
)(struct rpcrdma_xprt
*, struct rpcrdma_mr_seg
*, int, bool);
279 if (type
== rpcrdma_readch
|| type
== rpcrdma_areadch
) {
280 /* a read chunk - server will RDMA Read our memory */
281 cur_rchunk
= (struct rpcrdma_read_chunk
*) iptr
;
283 /* a write or reply chunk - server will RDMA Write our memory */
284 *iptr
++ = xdr_zero
; /* encode a NULL read chunk list */
285 if (type
== rpcrdma_replych
)
286 *iptr
++ = xdr_zero
; /* a NULL write chunk list */
287 warray
= (struct rpcrdma_write_array
*) iptr
;
288 cur_wchunk
= (struct rpcrdma_write_chunk
*) (warray
+ 1);
291 if (type
== rpcrdma_replych
|| type
== rpcrdma_areadch
)
294 pos
= target
->head
[0].iov_len
;
296 nsegs
= rpcrdma_convert_iovs(target
, pos
, type
, seg
, RPCRDMA_MAX_SEGS
);
300 map
= r_xprt
->rx_ia
.ri_ops
->ro_map
;
302 n
= map(r_xprt
, seg
, nsegs
, cur_wchunk
!= NULL
);
305 if (cur_rchunk
) { /* read */
306 cur_rchunk
->rc_discrim
= xdr_one
;
307 /* all read chunks have the same "position" */
308 cur_rchunk
->rc_position
= cpu_to_be32(pos
);
309 cur_rchunk
->rc_target
.rs_handle
=
310 cpu_to_be32(seg
->mr_rkey
);
311 cur_rchunk
->rc_target
.rs_length
=
312 cpu_to_be32(seg
->mr_len
);
314 (__be32
*)&cur_rchunk
->rc_target
.rs_offset
,
316 dprintk("RPC: %s: read chunk "
317 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__
,
318 seg
->mr_len
, (unsigned long long)seg
->mr_base
,
319 seg
->mr_rkey
, pos
, n
< nsegs
? "more" : "last");
321 r_xprt
->rx_stats
.read_chunk_count
++;
322 } else { /* write/reply */
323 cur_wchunk
->wc_target
.rs_handle
=
324 cpu_to_be32(seg
->mr_rkey
);
325 cur_wchunk
->wc_target
.rs_length
=
326 cpu_to_be32(seg
->mr_len
);
328 (__be32
*)&cur_wchunk
->wc_target
.rs_offset
,
330 dprintk("RPC: %s: %s chunk "
331 "elem %d@0x%llx:0x%x (%s)\n", __func__
,
332 (type
== rpcrdma_replych
) ? "reply" : "write",
333 seg
->mr_len
, (unsigned long long)seg
->mr_base
,
334 seg
->mr_rkey
, n
< nsegs
? "more" : "last");
336 if (type
== rpcrdma_replych
)
337 r_xprt
->rx_stats
.reply_chunk_count
++;
339 r_xprt
->rx_stats
.write_chunk_count
++;
340 r_xprt
->rx_stats
.total_rdma_request
+= seg
->mr_len
;
347 /* success. all failures return above */
348 req
->rl_nchunks
= nchunks
;
351 * finish off header. If write, marshal discrim and nchunks.
354 iptr
= (__be32
*) cur_rchunk
;
355 *iptr
++ = xdr_zero
; /* finish the read chunk list */
356 *iptr
++ = xdr_zero
; /* encode a NULL write chunk list */
357 *iptr
++ = xdr_zero
; /* encode a NULL reply chunk */
359 warray
->wc_discrim
= xdr_one
;
360 warray
->wc_nchunks
= cpu_to_be32(nchunks
);
361 iptr
= (__be32
*) cur_wchunk
;
362 if (type
== rpcrdma_writech
) {
363 *iptr
++ = xdr_zero
; /* finish the write chunk list */
364 *iptr
++ = xdr_zero
; /* encode a NULL reply chunk */
369 * Return header size.
371 return (unsigned char *)iptr
- (unsigned char *)headerp
;
374 for (pos
= 0; nchunks
--;)
375 pos
+= r_xprt
->rx_ia
.ri_ops
->ro_unmap(r_xprt
,
376 &req
->rl_segments
[pos
]);
381 * Copy write data inline.
382 * This function is used for "small" requests. Data which is passed
383 * to RPC via iovecs (or page list) is copied directly into the
384 * pre-registered memory buffer for this request. For small amounts
385 * of data, this is efficient. The cutoff value is tunable.
387 static void rpcrdma_inline_pullup(struct rpc_rqst
*rqst
)
389 int i
, npages
, curlen
;
391 unsigned char *srcp
, *destp
;
392 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(rqst
->rq_xprt
);
394 struct page
**ppages
;
396 destp
= rqst
->rq_svec
[0].iov_base
;
397 curlen
= rqst
->rq_svec
[0].iov_len
;
400 dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n",
401 __func__
, destp
, rqst
->rq_slen
, curlen
);
403 copy_len
= rqst
->rq_snd_buf
.page_len
;
405 if (rqst
->rq_snd_buf
.tail
[0].iov_len
) {
406 curlen
= rqst
->rq_snd_buf
.tail
[0].iov_len
;
407 if (destp
+ copy_len
!= rqst
->rq_snd_buf
.tail
[0].iov_base
) {
408 memmove(destp
+ copy_len
,
409 rqst
->rq_snd_buf
.tail
[0].iov_base
, curlen
);
410 r_xprt
->rx_stats
.pullup_copy_count
+= curlen
;
412 dprintk("RPC: %s: tail destp 0x%p len %d\n",
413 __func__
, destp
+ copy_len
, curlen
);
414 rqst
->rq_svec
[0].iov_len
+= curlen
;
416 r_xprt
->rx_stats
.pullup_copy_count
+= copy_len
;
418 page_base
= rqst
->rq_snd_buf
.page_base
;
419 ppages
= rqst
->rq_snd_buf
.pages
+ (page_base
>> PAGE_SHIFT
);
420 page_base
&= ~PAGE_MASK
;
421 npages
= PAGE_ALIGN(page_base
+copy_len
) >> PAGE_SHIFT
;
422 for (i
= 0; copy_len
&& i
< npages
; i
++) {
423 curlen
= PAGE_SIZE
- page_base
;
424 if (curlen
> copy_len
)
426 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
427 __func__
, i
, destp
, copy_len
, curlen
);
428 srcp
= kmap_atomic(ppages
[i
]);
429 memcpy(destp
, srcp
+page_base
, curlen
);
431 rqst
->rq_svec
[0].iov_len
+= curlen
;
436 /* header now contains entire send message */
440 * Marshal a request: the primary job of this routine is to choose
441 * the transfer modes. See comments below.
443 * Uses multiple RDMA IOVs for a request:
444 * [0] -- RPC RDMA header, which uses memory from the *start* of the
445 * preregistered buffer that already holds the RPC data in
447 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
448 * [2] -- optional padding.
449 * [3] -- if padded, header only in [1] and data here.
451 * Returns zero on success, otherwise a negative errno.
455 rpcrdma_marshal_req(struct rpc_rqst
*rqst
)
457 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
458 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
459 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
463 enum rpcrdma_chunktype rtype
, wtype
;
464 struct rpcrdma_msg
*headerp
;
466 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
467 if (test_bit(RPC_BC_PA_IN_USE
, &rqst
->rq_bc_pa_state
))
468 return rpcrdma_bc_marshal_reply(rqst
);
472 * rpclen gets amount of data in first buffer, which is the
473 * pre-registered buffer.
475 base
= rqst
->rq_svec
[0].iov_base
;
476 rpclen
= rqst
->rq_svec
[0].iov_len
;
478 headerp
= rdmab_to_msg(req
->rl_rdmabuf
);
479 /* don't byte-swap XID, it's already done in request */
480 headerp
->rm_xid
= rqst
->rq_xid
;
481 headerp
->rm_vers
= rpcrdma_version
;
482 headerp
->rm_credit
= cpu_to_be32(r_xprt
->rx_buf
.rb_max_requests
);
483 headerp
->rm_type
= rdma_msg
;
486 * Chunks needed for results?
488 * o Read ops return data as write chunk(s), header as inline.
489 * o If the expected result is under the inline threshold, all ops
491 * o Large non-read ops return as a single reply chunk.
493 if (rqst
->rq_rcv_buf
.flags
& XDRBUF_READ
)
494 wtype
= rpcrdma_writech
;
495 else if (rpcrdma_results_inline(rqst
))
496 wtype
= rpcrdma_noch
;
498 wtype
= rpcrdma_replych
;
501 * Chunks needed for arguments?
503 * o If the total request is under the inline threshold, all ops
504 * are sent as inline.
505 * o Large write ops transmit data as read chunk(s), header as
507 * o Large non-write ops are sent with the entire message as a
508 * single read chunk (protocol 0-position special case).
510 * This assumes that the upper layer does not present a request
511 * that both has a data payload, and whose non-data arguments
512 * by themselves are larger than the inline threshold.
514 if (rpcrdma_args_inline(rqst
)) {
515 rtype
= rpcrdma_noch
;
516 } else if (rqst
->rq_snd_buf
.flags
& XDRBUF_WRITE
) {
517 rtype
= rpcrdma_readch
;
519 r_xprt
->rx_stats
.nomsg_call_count
++;
520 headerp
->rm_type
= htonl(RDMA_NOMSG
);
521 rtype
= rpcrdma_areadch
;
525 /* The following simplification is not true forever */
526 if (rtype
!= rpcrdma_noch
&& wtype
== rpcrdma_replych
)
527 wtype
= rpcrdma_noch
;
528 if (rtype
!= rpcrdma_noch
&& wtype
!= rpcrdma_noch
) {
529 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
534 hdrlen
= RPCRDMA_HDRLEN_MIN
;
537 * Pull up any extra send data into the preregistered buffer.
538 * When padding is in use and applies to the transfer, insert
539 * it and change the message type.
541 if (rtype
== rpcrdma_noch
) {
543 rpcrdma_inline_pullup(rqst
);
545 headerp
->rm_body
.rm_nochunks
.rm_empty
[0] = xdr_zero
;
546 headerp
->rm_body
.rm_nochunks
.rm_empty
[1] = xdr_zero
;
547 headerp
->rm_body
.rm_nochunks
.rm_empty
[2] = xdr_zero
;
548 /* new length after pullup */
549 rpclen
= rqst
->rq_svec
[0].iov_len
;
550 } else if (rtype
== rpcrdma_readch
)
551 rpclen
+= rpcrdma_tail_pullup(&rqst
->rq_snd_buf
);
552 if (rtype
!= rpcrdma_noch
) {
553 hdrlen
= rpcrdma_create_chunks(rqst
, &rqst
->rq_snd_buf
,
555 wtype
= rtype
; /* simplify dprintk */
557 } else if (wtype
!= rpcrdma_noch
) {
558 hdrlen
= rpcrdma_create_chunks(rqst
, &rqst
->rq_rcv_buf
,
564 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
565 " headerp 0x%p base 0x%p lkey 0x%x\n",
566 __func__
, transfertypes
[wtype
], hdrlen
, rpclen
,
567 headerp
, base
, rdmab_lkey(req
->rl_rdmabuf
));
570 * initialize send_iov's - normally only two: rdma chunk header and
571 * single preregistered RPC header buffer, but if padding is present,
572 * then use a preregistered (and zeroed) pad buffer between the RPC
573 * header and any write data. In all non-rdma cases, any following
574 * data has been copied into the RPC header buffer.
576 req
->rl_send_iov
[0].addr
= rdmab_addr(req
->rl_rdmabuf
);
577 req
->rl_send_iov
[0].length
= hdrlen
;
578 req
->rl_send_iov
[0].lkey
= rdmab_lkey(req
->rl_rdmabuf
);
581 if (rtype
== rpcrdma_areadch
)
584 req
->rl_send_iov
[1].addr
= rdmab_addr(req
->rl_sendbuf
);
585 req
->rl_send_iov
[1].length
= rpclen
;
586 req
->rl_send_iov
[1].lkey
= rdmab_lkey(req
->rl_sendbuf
);
593 * Chase down a received write or reply chunklist to get length
594 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
597 rpcrdma_count_chunks(struct rpcrdma_rep
*rep
, unsigned int max
, int wrchunk
, __be32
**iptrp
)
599 unsigned int i
, total_len
;
600 struct rpcrdma_write_chunk
*cur_wchunk
;
601 char *base
= (char *)rdmab_to_msg(rep
->rr_rdmabuf
);
603 i
= be32_to_cpu(**iptrp
);
606 cur_wchunk
= (struct rpcrdma_write_chunk
*) (*iptrp
+ 1);
609 struct rpcrdma_segment
*seg
= &cur_wchunk
->wc_target
;
612 xdr_decode_hyper((__be32
*)&seg
->rs_offset
, &off
);
613 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
615 be32_to_cpu(seg
->rs_length
),
616 (unsigned long long)off
,
617 be32_to_cpu(seg
->rs_handle
));
619 total_len
+= be32_to_cpu(seg
->rs_length
);
622 /* check and adjust for properly terminated write chunk */
624 __be32
*w
= (__be32
*) cur_wchunk
;
625 if (*w
++ != xdr_zero
)
627 cur_wchunk
= (struct rpcrdma_write_chunk
*) w
;
629 if ((char *)cur_wchunk
> base
+ rep
->rr_len
)
632 *iptrp
= (__be32
*) cur_wchunk
;
637 * Scatter inline received data back into provided iov's.
640 rpcrdma_inline_fixup(struct rpc_rqst
*rqst
, char *srcp
, int copy_len
, int pad
)
642 int i
, npages
, curlen
, olen
;
644 struct page
**ppages
;
647 curlen
= rqst
->rq_rcv_buf
.head
[0].iov_len
;
648 if (curlen
> copy_len
) { /* write chunk header fixup */
650 rqst
->rq_rcv_buf
.head
[0].iov_len
= curlen
;
653 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
654 __func__
, srcp
, copy_len
, curlen
);
656 /* Shift pointer for first receive segment only */
657 rqst
->rq_rcv_buf
.head
[0].iov_base
= srcp
;
663 rpcx_to_rdmax(rqst
->rq_xprt
)->rx_stats
.fixup_copy_count
+= olen
;
664 page_base
= rqst
->rq_rcv_buf
.page_base
;
665 ppages
= rqst
->rq_rcv_buf
.pages
+ (page_base
>> PAGE_SHIFT
);
666 page_base
&= ~PAGE_MASK
;
668 if (copy_len
&& rqst
->rq_rcv_buf
.page_len
) {
669 npages
= PAGE_ALIGN(page_base
+
670 rqst
->rq_rcv_buf
.page_len
) >> PAGE_SHIFT
;
671 for (; i
< npages
; i
++) {
672 curlen
= PAGE_SIZE
- page_base
;
673 if (curlen
> copy_len
)
675 dprintk("RPC: %s: page %d"
676 " srcp 0x%p len %d curlen %d\n",
677 __func__
, i
, srcp
, copy_len
, curlen
);
678 destp
= kmap_atomic(ppages
[i
]);
679 memcpy(destp
+ page_base
, srcp
, curlen
);
680 flush_dcache_page(ppages
[i
]);
681 kunmap_atomic(destp
);
690 if (copy_len
&& rqst
->rq_rcv_buf
.tail
[0].iov_len
) {
692 if (curlen
> rqst
->rq_rcv_buf
.tail
[0].iov_len
)
693 curlen
= rqst
->rq_rcv_buf
.tail
[0].iov_len
;
694 if (rqst
->rq_rcv_buf
.tail
[0].iov_base
!= srcp
)
695 memmove(rqst
->rq_rcv_buf
.tail
[0].iov_base
, srcp
, curlen
);
696 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
697 __func__
, srcp
, copy_len
, curlen
);
698 rqst
->rq_rcv_buf
.tail
[0].iov_len
= curlen
;
699 copy_len
-= curlen
; ++i
;
701 rqst
->rq_rcv_buf
.tail
[0].iov_len
= 0;
704 /* implicit padding on terminal chunk */
705 unsigned char *p
= rqst
->rq_rcv_buf
.tail
[0].iov_base
;
707 p
[rqst
->rq_rcv_buf
.tail
[0].iov_len
++] = 0;
711 dprintk("RPC: %s: %d bytes in"
712 " %d extra segments (%d lost)\n",
713 __func__
, olen
, i
, copy_len
);
715 /* TBD avoid a warning from call_decode() */
716 rqst
->rq_private_buf
= rqst
->rq_rcv_buf
;
720 rpcrdma_connect_worker(struct work_struct
*work
)
722 struct rpcrdma_ep
*ep
=
723 container_of(work
, struct rpcrdma_ep
, rep_connect_worker
.work
);
724 struct rpcrdma_xprt
*r_xprt
=
725 container_of(ep
, struct rpcrdma_xprt
, rx_ep
);
726 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
728 spin_lock_bh(&xprt
->transport_lock
);
729 if (++xprt
->connect_cookie
== 0) /* maintain a reserved value */
730 ++xprt
->connect_cookie
;
731 if (ep
->rep_connected
> 0) {
732 if (!xprt_test_and_set_connected(xprt
))
733 xprt_wake_pending_tasks(xprt
, 0);
735 if (xprt_test_and_clear_connected(xprt
))
736 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
738 spin_unlock_bh(&xprt
->transport_lock
);
741 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
742 /* By convention, backchannel calls arrive via rdma_msg type
743 * messages, and never populate the chunk lists. This makes
744 * the RPC/RDMA header small and fixed in size, so it is
745 * straightforward to check the RPC header's direction field.
748 rpcrdma_is_bcall(struct rpcrdma_msg
*headerp
)
750 __be32
*p
= (__be32
*)headerp
;
752 if (headerp
->rm_type
!= rdma_msg
)
754 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
)
756 if (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
)
758 if (headerp
->rm_body
.rm_chunks
[2] != xdr_zero
)
762 if (p
[7] != headerp
->rm_xid
)
765 if (p
[8] != cpu_to_be32(RPC_CALL
))
770 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
773 * This function is called when an async event is posted to
774 * the connection which changes the connection state. All it
775 * does at this point is mark the connection up/down, the rpc
776 * timers do the rest.
779 rpcrdma_conn_func(struct rpcrdma_ep
*ep
)
781 schedule_delayed_work(&ep
->rep_connect_worker
, 0);
784 /* Process received RPC/RDMA messages.
786 * Errors must result in the RPC task either being awakened, or
787 * allowed to timeout, to discover the errors at that time.
790 rpcrdma_reply_handler(struct rpcrdma_rep
*rep
)
792 struct rpcrdma_msg
*headerp
;
793 struct rpcrdma_req
*req
;
794 struct rpc_rqst
*rqst
;
795 struct rpcrdma_xprt
*r_xprt
= rep
->rr_rxprt
;
796 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
798 int rdmalen
, status
, rmerr
;
801 dprintk("RPC: %s: incoming rep %p\n", __func__
, rep
);
803 if (rep
->rr_len
== RPCRDMA_BAD_LEN
)
805 if (rep
->rr_len
< RPCRDMA_HDRLEN_ERR
)
808 headerp
= rdmab_to_msg(rep
->rr_rdmabuf
);
809 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
810 if (rpcrdma_is_bcall(headerp
))
814 /* Match incoming rpcrdma_rep to an rpcrdma_req to
815 * get context for handling any incoming chunks.
817 spin_lock_bh(&xprt
->transport_lock
);
818 rqst
= xprt_lookup_rqst(xprt
, headerp
->rm_xid
);
822 req
= rpcr_to_rdmar(rqst
);
826 /* Sanity checking has passed. We are now committed
827 * to complete this transaction.
829 list_del_init(&rqst
->rq_list
);
830 spin_unlock_bh(&xprt
->transport_lock
);
831 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
832 __func__
, rep
, req
, be32_to_cpu(headerp
->rm_xid
));
834 /* from here on, the reply is no longer an orphan */
836 xprt
->reestablish_timeout
= 0;
838 if (headerp
->rm_vers
!= rpcrdma_version
)
841 /* check for expected message types */
842 /* The order of some of these tests is important. */
843 switch (headerp
->rm_type
) {
845 /* never expect read chunks */
846 /* never expect reply chunks (two ways to check) */
847 /* never expect write chunks without having offered RDMA */
848 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
||
849 (headerp
->rm_body
.rm_chunks
[1] == xdr_zero
&&
850 headerp
->rm_body
.rm_chunks
[2] != xdr_zero
) ||
851 (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
&&
852 req
->rl_nchunks
== 0))
854 if (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
) {
855 /* count any expected write chunks in read reply */
856 /* start at write chunk array count */
857 iptr
= &headerp
->rm_body
.rm_chunks
[2];
858 rdmalen
= rpcrdma_count_chunks(rep
,
859 req
->rl_nchunks
, 1, &iptr
);
860 /* check for validity, and no reply chunk after */
861 if (rdmalen
< 0 || *iptr
++ != xdr_zero
)
864 ((unsigned char *)iptr
- (unsigned char *)headerp
);
865 status
= rep
->rr_len
+ rdmalen
;
866 r_xprt
->rx_stats
.total_rdma_reply
+= rdmalen
;
867 /* special case - last chunk may omit padding */
869 rdmalen
= 4 - rdmalen
;
873 /* else ordinary inline */
875 iptr
= (__be32
*)((unsigned char *)headerp
+
877 rep
->rr_len
-= RPCRDMA_HDRLEN_MIN
;
878 status
= rep
->rr_len
;
880 /* Fix up the rpc results for upper layer */
881 rpcrdma_inline_fixup(rqst
, (char *)iptr
, rep
->rr_len
, rdmalen
);
885 /* never expect read or write chunks, always reply chunks */
886 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
||
887 headerp
->rm_body
.rm_chunks
[1] != xdr_zero
||
888 headerp
->rm_body
.rm_chunks
[2] != xdr_one
||
889 req
->rl_nchunks
== 0)
891 iptr
= (__be32
*)((unsigned char *)headerp
+
893 rdmalen
= rpcrdma_count_chunks(rep
, req
->rl_nchunks
, 0, &iptr
);
896 r_xprt
->rx_stats
.total_rdma_reply
+= rdmalen
;
897 /* Reply chunk buffer already is the reply vector - no fixup. */
906 dprintk("%s: invalid rpcrdma reply header (type %d):"
907 " chunks[012] == %d %d %d"
908 " expected chunks <= %d\n",
909 __func__
, be32_to_cpu(headerp
->rm_type
),
910 headerp
->rm_body
.rm_chunks
[0],
911 headerp
->rm_body
.rm_chunks
[1],
912 headerp
->rm_body
.rm_chunks
[2],
915 r_xprt
->rx_stats
.bad_reply_count
++;
920 /* Invalidate and flush the data payloads before waking the
921 * waiting application. This guarantees the memory region is
922 * properly fenced from the server before the application
923 * accesses the data. It also ensures proper send flow
924 * control: waking the next RPC waits until this RPC has
925 * relinquished all its Send Queue entries.
928 r_xprt
->rx_ia
.ri_ops
->ro_unmap_sync(r_xprt
, req
);
930 spin_lock_bh(&xprt
->transport_lock
);
932 xprt
->cwnd
= atomic_read(&r_xprt
->rx_buf
.rb_credits
) << RPC_CWNDSHIFT
;
933 if (xprt
->cwnd
> cwnd
)
934 xprt_release_rqst_cong(rqst
->rq_task
);
936 xprt_complete_rqst(rqst
->rq_task
, status
);
937 spin_unlock_bh(&xprt
->transport_lock
);
938 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
939 __func__
, xprt
, rqst
, status
);
943 rpcrdma_recv_buffer_put(rep
);
944 if (r_xprt
->rx_ep
.rep_connected
== 1) {
945 r_xprt
->rx_ep
.rep_connected
= -EIO
;
946 rpcrdma_conn_func(&r_xprt
->rx_ep
);
950 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
952 rpcrdma_bc_receive_call(r_xprt
, rep
);
956 /* If the incoming reply terminated a pending RPC, the next
957 * RPC call will post a replacement receive buffer as it is
961 dprintk("RPC: %s: invalid version %d\n",
962 __func__
, be32_to_cpu(headerp
->rm_vers
));
964 r_xprt
->rx_stats
.bad_reply_count
++;
968 rmerr
= be32_to_cpu(headerp
->rm_body
.rm_error
.rm_err
);
971 pr_err("%s: server reports header version error (%u-%u)\n",
973 be32_to_cpu(headerp
->rm_body
.rm_error
.rm_vers_low
),
974 be32_to_cpu(headerp
->rm_body
.rm_error
.rm_vers_high
));
977 pr_err("%s: server reports header decoding error\n",
981 pr_err("%s: server reports unknown error %d\n",
985 r_xprt
->rx_stats
.bad_reply_count
++;
988 /* If no pending RPC transaction was matched, post a replacement
989 * receive buffer before returning.
992 dprintk("RPC: %s: short/invalid reply\n", __func__
);
996 spin_unlock_bh(&xprt
->transport_lock
);
997 dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
998 __func__
, be32_to_cpu(headerp
->rm_xid
),
1003 spin_unlock_bh(&xprt
->transport_lock
);
1005 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
1006 __func__
, rep
, req
, be32_to_cpu(headerp
->rm_xid
));
1009 r_xprt
->rx_stats
.bad_reply_count
++;
1010 if (rpcrdma_ep_post_recv(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, rep
))
1011 rpcrdma_recv_buffer_put(rep
);
This page took 0.051935 seconds and 5 git commands to generate.