4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 /** Implementation of client-side PortalRPC interfaces */
35 #define DEBUG_SUBSYSTEM S_RPC
37 #include "../include/obd_support.h"
38 #include "../include/obd_class.h"
39 #include "../include/lustre_lib.h"
40 #include "../include/lustre_ha.h"
41 #include "../include/lustre_import.h"
42 #include "../include/lustre_req_layout.h"
44 #include "ptlrpc_internal.h"
46 static int ptlrpc_send_new_req(struct ptlrpc_request
*req
);
47 static int ptlrpcd_check_work(struct ptlrpc_request
*req
);
50 * Initialize passed in client structure \a cl.
52 void ptlrpc_init_client(int req_portal
, int rep_portal
, char *name
,
53 struct ptlrpc_client
*cl
)
55 cl
->cli_request_portal
= req_portal
;
56 cl
->cli_reply_portal
= rep_portal
;
59 EXPORT_SYMBOL(ptlrpc_init_client
);
62 * Return PortalRPC connection for remote uud \a uuid
64 struct ptlrpc_connection
*ptlrpc_uuid_to_connection(struct obd_uuid
*uuid
)
66 struct ptlrpc_connection
*c
;
68 lnet_process_id_t peer
;
72 * ptlrpc_uuid_to_peer() initializes its 2nd parameter
73 * before accessing its values.
74 * coverity[uninit_use_in_call]
76 err
= ptlrpc_uuid_to_peer(uuid
, &peer
, &self
);
78 CNETERR("cannot find peer %s!\n", uuid
->uuid
);
82 c
= ptlrpc_connection_get(peer
, self
, uuid
);
84 memcpy(c
->c_remote_uuid
.uuid
,
85 uuid
->uuid
, sizeof(c
->c_remote_uuid
.uuid
));
88 CDEBUG(D_INFO
, "%s -> %p\n", uuid
->uuid
, c
);
92 EXPORT_SYMBOL(ptlrpc_uuid_to_connection
);
95 * Allocate and initialize new bulk descriptor on the sender.
96 * Returns pointer to the descriptor or NULL on error.
98 struct ptlrpc_bulk_desc
*ptlrpc_new_bulk(unsigned npages
, unsigned max_brw
,
99 unsigned type
, unsigned portal
)
101 struct ptlrpc_bulk_desc
*desc
;
104 desc
= kzalloc(offsetof(struct ptlrpc_bulk_desc
, bd_iov
[npages
]),
109 spin_lock_init(&desc
->bd_lock
);
110 init_waitqueue_head(&desc
->bd_waitq
);
111 desc
->bd_max_iov
= npages
;
112 desc
->bd_iov_count
= 0;
113 desc
->bd_portal
= portal
;
114 desc
->bd_type
= type
;
115 desc
->bd_md_count
= 0;
116 LASSERT(max_brw
> 0);
117 desc
->bd_md_max_brw
= min(max_brw
, PTLRPC_BULK_OPS_COUNT
);
119 * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
120 * node. Negotiated ocd_brw_size will always be <= this number.
122 for (i
= 0; i
< PTLRPC_BULK_OPS_COUNT
; i
++)
123 LNetInvalidateHandle(&desc
->bd_mds
[i
]);
129 * Prepare bulk descriptor for specified outgoing request \a req that
130 * can fit \a npages * pages. \a type is bulk type. \a portal is where
131 * the bulk to be sent. Used on client-side.
132 * Returns pointer to newly allocated initialized bulk descriptor or NULL on
135 struct ptlrpc_bulk_desc
*ptlrpc_prep_bulk_imp(struct ptlrpc_request
*req
,
136 unsigned npages
, unsigned max_brw
,
137 unsigned type
, unsigned portal
)
139 struct obd_import
*imp
= req
->rq_import
;
140 struct ptlrpc_bulk_desc
*desc
;
142 LASSERT(type
== BULK_PUT_SINK
|| type
== BULK_GET_SOURCE
);
143 desc
= ptlrpc_new_bulk(npages
, max_brw
, type
, portal
);
147 desc
->bd_import_generation
= req
->rq_import_generation
;
148 desc
->bd_import
= class_import_get(imp
);
151 desc
->bd_cbid
.cbid_fn
= client_bulk_callback
;
152 desc
->bd_cbid
.cbid_arg
= desc
;
154 /* This makes req own desc, and free it when she frees herself */
159 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp
);
162 * Add a page \a page to the bulk descriptor \a desc.
163 * Data to transfer in the page starts at offset \a pageoffset and
164 * amount of data to transfer from the page is \a len
166 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc
*desc
,
167 struct page
*page
, int pageoffset
, int len
, int pin
)
169 LASSERT(desc
->bd_iov_count
< desc
->bd_max_iov
);
171 LASSERT(pageoffset
>= 0);
173 LASSERT(pageoffset
+ len
<= PAGE_SIZE
);
180 ptlrpc_add_bulk_page(desc
, page
, pageoffset
, len
);
182 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page
);
185 * Uninitialize and free bulk descriptor \a desc.
186 * Works on bulk descriptors both from server and client side.
188 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc
*desc
, int unpin
)
192 LASSERT(desc
->bd_iov_count
!= LI_POISON
); /* not freed already */
193 LASSERT(desc
->bd_md_count
== 0); /* network hands off */
194 LASSERT((desc
->bd_export
!= NULL
) ^ (desc
->bd_import
!= NULL
));
196 sptlrpc_enc_pool_put_pages(desc
);
199 class_export_put(desc
->bd_export
);
201 class_import_put(desc
->bd_import
);
204 for (i
= 0; i
< desc
->bd_iov_count
; i
++)
205 put_page(desc
->bd_iov
[i
].bv_page
);
210 EXPORT_SYMBOL(__ptlrpc_free_bulk
);
213 * Set server timelimit for this req, i.e. how long are we willing to wait
214 * for reply before timing out this request.
216 void ptlrpc_at_set_req_timeout(struct ptlrpc_request
*req
)
222 LASSERT(req
->rq_import
);
228 * \a imp_server_timeout means this is reverse import and
229 * we send (currently only) ASTs to the client and cannot afford
230 * to wait too long for the reply, otherwise the other client
231 * (because of which we are sending this request) would
232 * timeout waiting for us
234 req
->rq_timeout
= req
->rq_import
->imp_server_timeout
?
235 obd_timeout
/ 2 : obd_timeout
;
237 at
= &req
->rq_import
->imp_at
;
238 idx
= import_at_get_index(req
->rq_import
,
239 req
->rq_request_portal
);
240 serv_est
= at_get(&at
->iat_service_estimate
[idx
]);
241 req
->rq_timeout
= at_est2timeout(serv_est
);
244 * We could get even fancier here, using history to predict increased
249 * Let the server know what this RPC timeout is by putting it in the
252 lustre_msg_set_timeout(req
->rq_reqmsg
, req
->rq_timeout
);
254 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout
);
256 /* Adjust max service estimate based on server value */
257 static void ptlrpc_at_adj_service(struct ptlrpc_request
*req
,
258 unsigned int serv_est
)
264 LASSERT(req
->rq_import
);
265 at
= &req
->rq_import
->imp_at
;
267 idx
= import_at_get_index(req
->rq_import
, req
->rq_request_portal
);
269 * max service estimates are tracked on the server side,
270 * so just keep minimal history here
272 oldse
= at_measured(&at
->iat_service_estimate
[idx
], serv_est
);
274 CDEBUG(D_ADAPTTO
, "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
275 req
->rq_import
->imp_obd
->obd_name
, req
->rq_request_portal
,
276 oldse
, at_get(&at
->iat_service_estimate
[idx
]));
279 /* Expected network latency per remote node (secs) */
280 int ptlrpc_at_get_net_latency(struct ptlrpc_request
*req
)
282 return AT_OFF
? 0 : at_get(&req
->rq_import
->imp_at
.iat_net_latency
);
285 /* Adjust expected network latency */
286 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request
*req
,
287 unsigned int service_time
)
289 unsigned int nl
, oldnl
;
291 time64_t now
= ktime_get_real_seconds();
293 LASSERT(req
->rq_import
);
295 if (service_time
> now
- req
->rq_sent
+ 3) {
297 * bz16408, however, this can also happen if early reply
298 * is lost and client RPC is expired and resent, early reply
299 * or reply of original RPC can still be fit in reply buffer
300 * of resent RPC, now client is measuring time from the
301 * resent time, but server sent back service time of original
304 CDEBUG((lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_RESENT
) ?
305 D_ADAPTTO
: D_WARNING
,
306 "Reported service time %u > total measured time "
307 CFS_DURATION_T
"\n", service_time
,
308 (long)(now
- req
->rq_sent
));
312 /* Network latency is total time less server processing time */
313 nl
= max_t(int, now
- req
->rq_sent
-
314 service_time
, 0) + 1; /* st rounding */
315 at
= &req
->rq_import
->imp_at
;
317 oldnl
= at_measured(&at
->iat_net_latency
, nl
);
319 CDEBUG(D_ADAPTTO
, "The network latency for %s (nid %s) has changed from %d to %d\n",
320 req
->rq_import
->imp_obd
->obd_name
,
322 &req
->rq_import
->imp_connection
->c_remote_uuid
),
323 oldnl
, at_get(&at
->iat_net_latency
));
326 static int unpack_reply(struct ptlrpc_request
*req
)
330 if (SPTLRPC_FLVR_POLICY(req
->rq_flvr
.sf_rpc
) != SPTLRPC_POLICY_NULL
) {
331 rc
= ptlrpc_unpack_rep_msg(req
, req
->rq_replen
);
333 DEBUG_REQ(D_ERROR
, req
, "unpack_rep failed: %d", rc
);
338 rc
= lustre_unpack_rep_ptlrpc_body(req
, MSG_PTLRPC_BODY_OFF
);
340 DEBUG_REQ(D_ERROR
, req
, "unpack ptlrpc body failed: %d", rc
);
347 * Handle an early reply message, called with the rq_lock held.
348 * If anything goes wrong just ignore it - same as if it never happened
350 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request
*req
)
351 __must_hold(&req
->rq_lock
)
353 struct ptlrpc_request
*early_req
;
358 spin_unlock(&req
->rq_lock
);
360 rc
= sptlrpc_cli_unwrap_early_reply(req
, &early_req
);
362 spin_lock(&req
->rq_lock
);
366 rc
= unpack_reply(early_req
);
368 /* Expecting to increase the service time estimate here */
369 ptlrpc_at_adj_service(req
,
370 lustre_msg_get_timeout(early_req
->rq_repmsg
));
371 ptlrpc_at_adj_net_latency(req
,
372 lustre_msg_get_service_time(early_req
->rq_repmsg
));
375 sptlrpc_cli_finish_early_reply(early_req
);
378 spin_lock(&req
->rq_lock
);
382 /* Adjust the local timeout for this req */
383 ptlrpc_at_set_req_timeout(req
);
385 spin_lock(&req
->rq_lock
);
386 olddl
= req
->rq_deadline
;
388 * server assumes it now has rq_timeout from when the request
389 * arrived, so the client should give it at least that long.
390 * since we don't know the arrival time we'll use the original
393 req
->rq_deadline
= req
->rq_sent
+ req
->rq_timeout
+
394 ptlrpc_at_get_net_latency(req
);
396 DEBUG_REQ(D_ADAPTTO
, req
,
397 "Early reply #%d, new deadline in %lds (%lds)",
399 (long)(req
->rq_deadline
- ktime_get_real_seconds()),
400 (long)(req
->rq_deadline
- olddl
));
405 static struct kmem_cache
*request_cache
;
407 int ptlrpc_request_cache_init(void)
409 request_cache
= kmem_cache_create("ptlrpc_cache",
410 sizeof(struct ptlrpc_request
),
411 0, SLAB_HWCACHE_ALIGN
, NULL
);
412 return !request_cache
? -ENOMEM
: 0;
415 void ptlrpc_request_cache_fini(void)
417 kmem_cache_destroy(request_cache
);
420 struct ptlrpc_request
*ptlrpc_request_cache_alloc(gfp_t flags
)
422 struct ptlrpc_request
*req
;
424 req
= kmem_cache_zalloc(request_cache
, flags
);
428 void ptlrpc_request_cache_free(struct ptlrpc_request
*req
)
430 kmem_cache_free(request_cache
, req
);
434 * Wind down request pool \a pool.
435 * Frees all requests from the pool too
437 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool
*pool
)
439 struct list_head
*l
, *tmp
;
440 struct ptlrpc_request
*req
;
442 spin_lock(&pool
->prp_lock
);
443 list_for_each_safe(l
, tmp
, &pool
->prp_req_list
) {
444 req
= list_entry(l
, struct ptlrpc_request
, rq_list
);
445 list_del(&req
->rq_list
);
446 LASSERT(req
->rq_reqbuf
);
447 LASSERT(req
->rq_reqbuf_len
== pool
->prp_rq_size
);
448 kvfree(req
->rq_reqbuf
);
449 ptlrpc_request_cache_free(req
);
451 spin_unlock(&pool
->prp_lock
);
454 EXPORT_SYMBOL(ptlrpc_free_rq_pool
);
457 * Allocates, initializes and adds \a num_rq requests to the pool \a pool
459 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool
*pool
, int num_rq
)
464 while (size
< pool
->prp_rq_size
)
467 LASSERTF(list_empty(&pool
->prp_req_list
) ||
468 size
== pool
->prp_rq_size
,
469 "Trying to change pool size with nonempty pool from %d to %d bytes\n",
470 pool
->prp_rq_size
, size
);
472 spin_lock(&pool
->prp_lock
);
473 pool
->prp_rq_size
= size
;
474 for (i
= 0; i
< num_rq
; i
++) {
475 struct ptlrpc_request
*req
;
476 struct lustre_msg
*msg
;
478 spin_unlock(&pool
->prp_lock
);
479 req
= ptlrpc_request_cache_alloc(GFP_NOFS
);
482 msg
= libcfs_kvzalloc(size
, GFP_NOFS
);
484 ptlrpc_request_cache_free(req
);
487 req
->rq_reqbuf
= msg
;
488 req
->rq_reqbuf_len
= size
;
490 spin_lock(&pool
->prp_lock
);
491 list_add_tail(&req
->rq_list
, &pool
->prp_req_list
);
493 spin_unlock(&pool
->prp_lock
);
496 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool
);
499 * Create and initialize new request pool with given attributes:
500 * \a num_rq - initial number of requests to create for the pool
501 * \a msgsize - maximum message size possible for requests in thid pool
502 * \a populate_pool - function to be called when more requests need to be added
504 * Returns pointer to newly created pool or NULL on error.
506 struct ptlrpc_request_pool
*
507 ptlrpc_init_rq_pool(int num_rq
, int msgsize
,
508 int (*populate_pool
)(struct ptlrpc_request_pool
*, int))
510 struct ptlrpc_request_pool
*pool
;
512 pool
= kzalloc(sizeof(struct ptlrpc_request_pool
), GFP_NOFS
);
517 * Request next power of two for the allocation, because internally
518 * kernel would do exactly this
521 spin_lock_init(&pool
->prp_lock
);
522 INIT_LIST_HEAD(&pool
->prp_req_list
);
523 pool
->prp_rq_size
= msgsize
+ SPTLRPC_MAX_PAYLOAD
;
524 pool
->prp_populate
= populate_pool
;
526 populate_pool(pool
, num_rq
);
530 EXPORT_SYMBOL(ptlrpc_init_rq_pool
);
533 * Fetches one request from pool \a pool
535 static struct ptlrpc_request
*
536 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool
*pool
)
538 struct ptlrpc_request
*request
;
539 struct lustre_msg
*reqbuf
;
544 spin_lock(&pool
->prp_lock
);
547 * See if we have anything in a pool, and bail out if nothing,
548 * in writeout path, where this matters, this is safe to do, because
549 * nothing is lost in this case, and when some in-flight requests
550 * complete, this code will be called again.
552 if (unlikely(list_empty(&pool
->prp_req_list
))) {
553 spin_unlock(&pool
->prp_lock
);
557 request
= list_entry(pool
->prp_req_list
.next
, struct ptlrpc_request
,
559 list_del_init(&request
->rq_list
);
560 spin_unlock(&pool
->prp_lock
);
562 LASSERT(request
->rq_reqbuf
);
563 LASSERT(request
->rq_pool
);
565 reqbuf
= request
->rq_reqbuf
;
566 memset(request
, 0, sizeof(*request
));
567 request
->rq_reqbuf
= reqbuf
;
568 request
->rq_reqbuf_len
= pool
->prp_rq_size
;
569 request
->rq_pool
= pool
;
575 * Returns freed \a request to pool.
577 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request
*request
)
579 struct ptlrpc_request_pool
*pool
= request
->rq_pool
;
581 spin_lock(&pool
->prp_lock
);
582 LASSERT(list_empty(&request
->rq_list
));
583 LASSERT(!request
->rq_receiving_reply
);
584 list_add_tail(&request
->rq_list
, &pool
->prp_req_list
);
585 spin_unlock(&pool
->prp_lock
);
588 int ptlrpc_request_bufs_pack(struct ptlrpc_request
*request
,
589 __u32 version
, int opcode
, char **bufs
,
590 struct ptlrpc_cli_ctx
*ctx
)
593 struct obd_import
*imp
;
597 count
= req_capsule_filled_sizes(&request
->rq_pill
, RCL_CLIENT
);
598 imp
= request
->rq_import
;
599 lengths
= request
->rq_pill
.rc_area
[RCL_CLIENT
];
602 request
->rq_cli_ctx
= sptlrpc_cli_ctx_get(ctx
);
604 rc
= sptlrpc_req_get_ctx(request
);
608 sptlrpc_req_set_flavor(request
, opcode
);
610 rc
= lustre_pack_request(request
, imp
->imp_msg_magic
, count
,
615 lustre_msg_add_version(request
->rq_reqmsg
, version
);
616 request
->rq_send_state
= LUSTRE_IMP_FULL
;
617 request
->rq_type
= PTL_RPC_MSG_REQUEST
;
619 request
->rq_req_cbid
.cbid_fn
= request_out_callback
;
620 request
->rq_req_cbid
.cbid_arg
= request
;
622 request
->rq_reply_cbid
.cbid_fn
= reply_in_callback
;
623 request
->rq_reply_cbid
.cbid_arg
= request
;
625 request
->rq_reply_deadline
= 0;
626 request
->rq_bulk_deadline
= 0;
627 request
->rq_req_deadline
= 0;
628 request
->rq_phase
= RQ_PHASE_NEW
;
629 request
->rq_next_phase
= RQ_PHASE_UNDEFINED
;
631 request
->rq_request_portal
= imp
->imp_client
->cli_request_portal
;
632 request
->rq_reply_portal
= imp
->imp_client
->cli_reply_portal
;
634 ptlrpc_at_set_req_timeout(request
);
636 request
->rq_xid
= ptlrpc_next_xid();
637 lustre_msg_set_opc(request
->rq_reqmsg
, opcode
);
639 /* Let's setup deadline for req/reply/bulk unlink for opcode. */
640 if (cfs_fail_val
== opcode
) {
641 time_t *fail_t
= NULL
, *fail2_t
= NULL
;
643 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
)) {
644 fail_t
= &request
->rq_bulk_deadline
;
645 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
)) {
646 fail_t
= &request
->rq_reply_deadline
;
647 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK
)) {
648 fail_t
= &request
->rq_req_deadline
;
649 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK
)) {
650 fail_t
= &request
->rq_reply_deadline
;
651 fail2_t
= &request
->rq_bulk_deadline
;
655 *fail_t
= ktime_get_real_seconds() + LONG_UNLINK
;
658 *fail2_t
= ktime_get_real_seconds() +
661 /* The RPC is infected, let the test change the
664 set_current_state(TASK_UNINTERRUPTIBLE
);
665 schedule_timeout(cfs_time_seconds(2));
666 set_current_state(TASK_RUNNING
);
673 LASSERT(!request
->rq_pool
);
674 sptlrpc_cli_ctx_put(request
->rq_cli_ctx
, 1);
676 class_import_put(imp
);
679 EXPORT_SYMBOL(ptlrpc_request_bufs_pack
);
682 * Pack request buffers for network transfer, performing necessary encryption
683 * steps if necessary.
685 int ptlrpc_request_pack(struct ptlrpc_request
*request
,
686 __u32 version
, int opcode
)
690 rc
= ptlrpc_request_bufs_pack(request
, version
, opcode
, NULL
, NULL
);
695 * For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
696 * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
697 * have to send old ptlrpc_body to keep interoperability with these
700 * Only three kinds of server->client RPCs so far:
705 * XXX This should be removed whenever we drop the interoperability with
706 * the these old clients.
708 if (opcode
== LDLM_BL_CALLBACK
|| opcode
== LDLM_CP_CALLBACK
||
709 opcode
== LDLM_GL_CALLBACK
)
710 req_capsule_shrink(&request
->rq_pill
, &RMF_PTLRPC_BODY
,
711 sizeof(struct ptlrpc_body_v2
), RCL_CLIENT
);
715 EXPORT_SYMBOL(ptlrpc_request_pack
);
718 * Helper function to allocate new request on import \a imp
719 * and possibly using existing request from pool \a pool if provided.
720 * Returns allocated request structure with import field filled or
724 struct ptlrpc_request
*__ptlrpc_request_alloc(struct obd_import
*imp
,
725 struct ptlrpc_request_pool
*pool
)
727 struct ptlrpc_request
*request
;
729 request
= ptlrpc_request_cache_alloc(GFP_NOFS
);
731 if (!request
&& pool
)
732 request
= ptlrpc_prep_req_from_pool(pool
);
735 ptlrpc_cli_req_init(request
);
737 LASSERTF((unsigned long)imp
> 0x1000, "%p", imp
);
738 LASSERT(imp
!= LP_POISON
);
739 LASSERTF((unsigned long)imp
->imp_client
> 0x1000, "%p\n",
741 LASSERT(imp
->imp_client
!= LP_POISON
);
743 request
->rq_import
= class_import_get(imp
);
745 CERROR("request allocation out of memory\n");
752 * Helper function for creating a request.
753 * Calls __ptlrpc_request_alloc to allocate new request structure and inits
754 * buffer structures according to capsule template \a format.
755 * Returns allocated request structure pointer or NULL on error.
757 static struct ptlrpc_request
*
758 ptlrpc_request_alloc_internal(struct obd_import
*imp
,
759 struct ptlrpc_request_pool
*pool
,
760 const struct req_format
*format
)
762 struct ptlrpc_request
*request
;
764 request
= __ptlrpc_request_alloc(imp
, pool
);
768 req_capsule_init(&request
->rq_pill
, request
, RCL_CLIENT
);
769 req_capsule_set(&request
->rq_pill
, format
);
774 * Allocate new request structure for import \a imp and initialize its
775 * buffer structure according to capsule template \a format.
777 struct ptlrpc_request
*ptlrpc_request_alloc(struct obd_import
*imp
,
778 const struct req_format
*format
)
780 return ptlrpc_request_alloc_internal(imp
, NULL
, format
);
782 EXPORT_SYMBOL(ptlrpc_request_alloc
);
785 * Allocate new request structure for import \a imp from pool \a pool and
786 * initialize its buffer structure according to capsule template \a format.
788 struct ptlrpc_request
*ptlrpc_request_alloc_pool(struct obd_import
*imp
,
789 struct ptlrpc_request_pool
*pool
,
790 const struct req_format
*format
)
792 return ptlrpc_request_alloc_internal(imp
, pool
, format
);
794 EXPORT_SYMBOL(ptlrpc_request_alloc_pool
);
797 * For requests not from pool, free memory of the request structure.
798 * For requests obtained from a pool earlier, return request back to pool.
800 void ptlrpc_request_free(struct ptlrpc_request
*request
)
802 if (request
->rq_pool
)
803 __ptlrpc_free_req_to_pool(request
);
805 ptlrpc_request_cache_free(request
);
807 EXPORT_SYMBOL(ptlrpc_request_free
);
810 * Allocate new request for operation \a opcode and immediately pack it for
812 * Only used for simple requests like OBD_PING where the only important
813 * part of the request is operation itself.
814 * Returns allocated request or NULL on error.
816 struct ptlrpc_request
*ptlrpc_request_alloc_pack(struct obd_import
*imp
,
817 const struct req_format
*format
,
818 __u32 version
, int opcode
)
820 struct ptlrpc_request
*req
= ptlrpc_request_alloc(imp
, format
);
824 rc
= ptlrpc_request_pack(req
, version
, opcode
);
826 ptlrpc_request_free(req
);
832 EXPORT_SYMBOL(ptlrpc_request_alloc_pack
);
835 * Allocate and initialize new request set structure on the current CPT.
836 * Returns a pointer to the newly allocated set structure or NULL on error.
838 struct ptlrpc_request_set
*ptlrpc_prep_set(void)
840 struct ptlrpc_request_set
*set
;
843 cpt
= cfs_cpt_current(cfs_cpt_table
, 0);
844 set
= kzalloc_node(sizeof(*set
), GFP_NOFS
,
845 cfs_cpt_spread_node(cfs_cpt_table
, cpt
));
848 atomic_set(&set
->set_refcount
, 1);
849 INIT_LIST_HEAD(&set
->set_requests
);
850 init_waitqueue_head(&set
->set_waitq
);
851 atomic_set(&set
->set_new_count
, 0);
852 atomic_set(&set
->set_remaining
, 0);
853 spin_lock_init(&set
->set_new_req_lock
);
854 INIT_LIST_HEAD(&set
->set_new_requests
);
855 INIT_LIST_HEAD(&set
->set_cblist
);
856 set
->set_max_inflight
= UINT_MAX
;
857 set
->set_producer
= NULL
;
858 set
->set_producer_arg
= NULL
;
863 EXPORT_SYMBOL(ptlrpc_prep_set
);
866 * Allocate and initialize new request set structure with flow control
867 * extension. This extension allows to control the number of requests in-flight
868 * for the whole set. A callback function to generate requests must be provided
869 * and the request set will keep the number of requests sent over the wire to
871 * Returns a pointer to the newly allocated set structure or NULL on error.
873 struct ptlrpc_request_set
*ptlrpc_prep_fcset(int max
, set_producer_func func
,
877 struct ptlrpc_request_set
*set
;
879 set
= ptlrpc_prep_set();
883 set
->set_max_inflight
= max
;
884 set
->set_producer
= func
;
885 set
->set_producer_arg
= arg
;
889 EXPORT_SYMBOL(ptlrpc_prep_fcset
);
892 * Wind down and free request set structure previously allocated with
894 * Ensures that all requests on the set have completed and removes
895 * all requests from the request list in a set.
896 * If any unsent request happen to be on the list, pretends that they got
897 * an error in flight and calls their completion handler.
899 void ptlrpc_set_destroy(struct ptlrpc_request_set
*set
)
901 struct list_head
*tmp
;
902 struct list_head
*next
;
906 /* Requests on the set should either all be completed, or all be new */
907 expected_phase
= (atomic_read(&set
->set_remaining
) == 0) ?
908 RQ_PHASE_COMPLETE
: RQ_PHASE_NEW
;
909 list_for_each(tmp
, &set
->set_requests
) {
910 struct ptlrpc_request
*req
=
911 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
913 LASSERT(req
->rq_phase
== expected_phase
);
917 LASSERTF(atomic_read(&set
->set_remaining
) == 0 ||
918 atomic_read(&set
->set_remaining
) == n
, "%d / %d\n",
919 atomic_read(&set
->set_remaining
), n
);
921 list_for_each_safe(tmp
, next
, &set
->set_requests
) {
922 struct ptlrpc_request
*req
=
923 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
924 list_del_init(&req
->rq_set_chain
);
926 LASSERT(req
->rq_phase
== expected_phase
);
928 if (req
->rq_phase
== RQ_PHASE_NEW
) {
929 ptlrpc_req_interpret(NULL
, req
, -EBADR
);
930 atomic_dec(&set
->set_remaining
);
933 spin_lock(&req
->rq_lock
);
935 req
->rq_invalid_rqset
= 0;
936 spin_unlock(&req
->rq_lock
);
938 ptlrpc_req_finished(req
);
941 LASSERT(atomic_read(&set
->set_remaining
) == 0);
943 ptlrpc_reqset_put(set
);
945 EXPORT_SYMBOL(ptlrpc_set_destroy
);
948 * Add a new request to the general purpose request set.
949 * Assumes request reference from the caller.
951 void ptlrpc_set_add_req(struct ptlrpc_request_set
*set
,
952 struct ptlrpc_request
*req
)
954 LASSERT(list_empty(&req
->rq_set_chain
));
956 /* The set takes over the caller's request reference */
957 list_add_tail(&req
->rq_set_chain
, &set
->set_requests
);
959 atomic_inc(&set
->set_remaining
);
960 req
->rq_queued_time
= cfs_time_current();
963 lustre_msg_set_jobid(req
->rq_reqmsg
, NULL
);
965 if (set
->set_producer
)
967 * If the request set has a producer callback, the RPC must be
970 ptlrpc_send_new_req(req
);
972 EXPORT_SYMBOL(ptlrpc_set_add_req
);
975 * Add a request to a request with dedicated server thread
976 * and wake the thread to make any necessary processing.
977 * Currently only used for ptlrpcd.
979 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl
*pc
,
980 struct ptlrpc_request
*req
)
982 struct ptlrpc_request_set
*set
= pc
->pc_set
;
985 LASSERT(!req
->rq_set
);
986 LASSERT(test_bit(LIOD_STOP
, &pc
->pc_flags
) == 0);
988 spin_lock(&set
->set_new_req_lock
);
989 /* The set takes over the caller's request reference. */
991 req
->rq_queued_time
= cfs_time_current();
992 list_add_tail(&req
->rq_set_chain
, &set
->set_new_requests
);
993 count
= atomic_inc_return(&set
->set_new_count
);
994 spin_unlock(&set
->set_new_req_lock
);
996 /* Only need to call wakeup once for the first entry. */
998 wake_up(&set
->set_waitq
);
1001 * XXX: It maybe unnecessary to wakeup all the partners. But to
1002 * guarantee the async RPC can be processed ASAP, we have
1003 * no other better choice. It maybe fixed in future.
1005 for (i
= 0; i
< pc
->pc_npartners
; i
++)
1006 wake_up(&pc
->pc_partners
[i
]->pc_set
->set_waitq
);
1009 EXPORT_SYMBOL(ptlrpc_set_add_new_req
);
1012 * Based on the current state of the import, determine if the request
1013 * can be sent, is an error, or should be delayed.
1015 * Returns true if this request should be delayed. If false, and
1016 * *status is set, then the request can not be sent and *status is the
1017 * error code. If false and status is 0, then request can be sent.
1019 * The imp->imp_lock must be held.
1021 static int ptlrpc_import_delay_req(struct obd_import
*imp
,
1022 struct ptlrpc_request
*req
, int *status
)
1028 if (req
->rq_ctx_init
|| req
->rq_ctx_fini
) {
1029 /* always allow ctx init/fini rpc go through */
1030 } else if (imp
->imp_state
== LUSTRE_IMP_NEW
) {
1031 DEBUG_REQ(D_ERROR
, req
, "Uninitialized import.");
1033 } else if (imp
->imp_state
== LUSTRE_IMP_CLOSED
) {
1034 /* pings may safely race with umount */
1035 DEBUG_REQ(lustre_msg_get_opc(req
->rq_reqmsg
) == OBD_PING
?
1036 D_HA
: D_ERROR
, req
, "IMP_CLOSED ");
1038 } else if (ptlrpc_send_limit_expired(req
)) {
1039 /* probably doesn't need to be a D_ERROR after initial testing */
1040 DEBUG_REQ(D_ERROR
, req
, "send limit expired ");
1042 } else if (req
->rq_send_state
== LUSTRE_IMP_CONNECTING
&&
1043 imp
->imp_state
== LUSTRE_IMP_CONNECTING
) {
1044 /* allow CONNECT even if import is invalid */
1045 if (atomic_read(&imp
->imp_inval_count
) != 0) {
1046 DEBUG_REQ(D_ERROR
, req
, "invalidate in flight");
1049 } else if (imp
->imp_invalid
|| imp
->imp_obd
->obd_no_recov
) {
1050 if (!imp
->imp_deactive
)
1051 DEBUG_REQ(D_NET
, req
, "IMP_INVALID");
1052 *status
= -ESHUTDOWN
; /* bz 12940 */
1053 } else if (req
->rq_import_generation
!= imp
->imp_generation
) {
1054 DEBUG_REQ(D_ERROR
, req
, "req wrong generation:");
1056 } else if (req
->rq_send_state
!= imp
->imp_state
) {
1057 /* invalidate in progress - any requests should be drop */
1058 if (atomic_read(&imp
->imp_inval_count
) != 0) {
1059 DEBUG_REQ(D_ERROR
, req
, "invalidate in flight");
1061 } else if (imp
->imp_dlm_fake
|| req
->rq_no_delay
) {
1062 *status
= -EWOULDBLOCK
;
1063 } else if (req
->rq_allow_replay
&&
1064 (imp
->imp_state
== LUSTRE_IMP_REPLAY
||
1065 imp
->imp_state
== LUSTRE_IMP_REPLAY_LOCKS
||
1066 imp
->imp_state
== LUSTRE_IMP_REPLAY_WAIT
||
1067 imp
->imp_state
== LUSTRE_IMP_RECOVER
)) {
1068 DEBUG_REQ(D_HA
, req
, "allow during recovery.\n");
1078 * Decide if the error message regarding provided request \a req
1079 * should be printed to the console or not.
1080 * Makes it's decision on request status and other properties.
1081 * Returns 1 to print error on the system console or 0 if not.
1083 static int ptlrpc_console_allow(struct ptlrpc_request
*req
)
1088 LASSERT(req
->rq_reqmsg
);
1089 opc
= lustre_msg_get_opc(req
->rq_reqmsg
);
1092 * Suppress particular reconnect errors which are to be expected. No
1093 * errors are suppressed for the initial connection on an import
1095 if ((lustre_handle_is_used(&req
->rq_import
->imp_remote_handle
)) &&
1096 (opc
== OST_CONNECT
|| opc
== MDS_CONNECT
|| opc
== MGS_CONNECT
)) {
1097 /* Suppress timed out reconnect requests */
1098 if (req
->rq_timedout
)
1101 /* Suppress unavailable/again reconnect requests */
1102 err
= lustre_msg_get_status(req
->rq_repmsg
);
1103 if (err
== -ENODEV
|| err
== -EAGAIN
)
1111 * Check request processing status.
1112 * Returns the status.
1114 static int ptlrpc_check_status(struct ptlrpc_request
*req
)
1118 err
= lustre_msg_get_status(req
->rq_repmsg
);
1119 if (lustre_msg_get_type(req
->rq_repmsg
) == PTL_RPC_MSG_ERR
) {
1120 struct obd_import
*imp
= req
->rq_import
;
1121 __u32 opc
= lustre_msg_get_opc(req
->rq_reqmsg
);
1123 if (ptlrpc_console_allow(req
))
1124 LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s, operation %s failed with %d.\n",
1125 imp
->imp_obd
->obd_name
,
1127 imp
->imp_connection
->c_peer
.nid
),
1128 ll_opcode2str(opc
), err
);
1129 return err
< 0 ? err
: -EINVAL
;
1133 DEBUG_REQ(D_INFO
, req
, "status is %d", err
);
1135 /* XXX: translate this error from net to host */
1136 DEBUG_REQ(D_INFO
, req
, "status is %d", err
);
1142 * save pre-versions of objects into request for replay.
1143 * Versions are obtained from server reply.
1146 static void ptlrpc_save_versions(struct ptlrpc_request
*req
)
1148 struct lustre_msg
*repmsg
= req
->rq_repmsg
;
1149 struct lustre_msg
*reqmsg
= req
->rq_reqmsg
;
1150 __u64
*versions
= lustre_msg_get_versions(repmsg
);
1152 if (lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_REPLAY
)
1156 lustre_msg_set_versions(reqmsg
, versions
);
1157 CDEBUG(D_INFO
, "Client save versions [%#llx/%#llx]\n",
1158 versions
[0], versions
[1]);
1162 * Callback function called when client receives RPC reply for \a req.
1163 * Returns 0 on success or error code.
1164 * The return value would be assigned to req->rq_status by the caller
1165 * as request processing status.
1166 * This function also decides if the request needs to be saved for later replay.
1168 static int after_reply(struct ptlrpc_request
*req
)
1170 struct obd_import
*imp
= req
->rq_import
;
1171 struct obd_device
*obd
= req
->rq_import
->imp_obd
;
1173 struct timespec64 work_start
;
1177 /* repbuf must be unlinked */
1178 LASSERT(!req
->rq_receiving_reply
&& req
->rq_reply_unlinked
);
1180 if (req
->rq_reply_truncated
) {
1181 if (ptlrpc_no_resend(req
)) {
1182 DEBUG_REQ(D_ERROR
, req
, "reply buffer overflow, expected: %d, actual size: %d",
1183 req
->rq_nob_received
, req
->rq_repbuf_len
);
1187 sptlrpc_cli_free_repbuf(req
);
1189 * Pass the required reply buffer size (include space for early
1190 * reply). NB: no need to round up because alloc_repbuf will
1193 req
->rq_replen
= req
->rq_nob_received
;
1194 req
->rq_nob_received
= 0;
1195 spin_lock(&req
->rq_lock
);
1197 spin_unlock(&req
->rq_lock
);
1202 * NB Until this point, the whole of the incoming message,
1203 * including buflens, status etc is in the sender's byte order.
1205 rc
= sptlrpc_cli_unwrap_reply(req
);
1207 DEBUG_REQ(D_ERROR
, req
, "unwrap reply failed (%d):", rc
);
1211 /* Security layer unwrap might ask resend this request. */
1215 rc
= unpack_reply(req
);
1219 /* retry indefinitely on EINPROGRESS */
1220 if (lustre_msg_get_status(req
->rq_repmsg
) == -EINPROGRESS
&&
1221 ptlrpc_no_resend(req
) == 0 && !req
->rq_no_retry_einprogress
) {
1222 time64_t now
= ktime_get_real_seconds();
1224 DEBUG_REQ(D_RPCTRACE
, req
, "Resending request on EINPROGRESS");
1225 spin_lock(&req
->rq_lock
);
1227 spin_unlock(&req
->rq_lock
);
1228 req
->rq_nr_resend
++;
1230 /* allocate new xid to avoid reply reconstruction */
1231 if (!req
->rq_bulk
) {
1232 /* new xid is already allocated for bulk in ptlrpc_check_set() */
1233 req
->rq_xid
= ptlrpc_next_xid();
1234 DEBUG_REQ(D_RPCTRACE
, req
, "Allocating new xid for resend on EINPROGRESS");
1237 /* Readjust the timeout for current conditions */
1238 ptlrpc_at_set_req_timeout(req
);
1240 * delay resend to give a chance to the server to get ready.
1241 * The delay is increased by 1s on every resend and is capped to
1242 * the current request timeout (i.e. obd_timeout if AT is off,
1243 * or AT service time x 125% + 5s, see at_est2timeout)
1245 if (req
->rq_nr_resend
> req
->rq_timeout
)
1246 req
->rq_sent
= now
+ req
->rq_timeout
;
1248 req
->rq_sent
= now
+ req
->rq_nr_resend
;
1253 ktime_get_real_ts64(&work_start
);
1254 timediff
= (work_start
.tv_sec
- req
->rq_sent_tv
.tv_sec
) * USEC_PER_SEC
+
1255 (work_start
.tv_nsec
- req
->rq_sent_tv
.tv_nsec
) /
1257 if (obd
->obd_svc_stats
) {
1258 lprocfs_counter_add(obd
->obd_svc_stats
, PTLRPC_REQWAIT_CNTR
,
1260 ptlrpc_lprocfs_rpc_sent(req
, timediff
);
1263 if (lustre_msg_get_type(req
->rq_repmsg
) != PTL_RPC_MSG_REPLY
&&
1264 lustre_msg_get_type(req
->rq_repmsg
) != PTL_RPC_MSG_ERR
) {
1265 DEBUG_REQ(D_ERROR
, req
, "invalid packet received (type=%u)",
1266 lustre_msg_get_type(req
->rq_repmsg
));
1270 if (lustre_msg_get_opc(req
->rq_reqmsg
) != OBD_PING
)
1271 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP
, cfs_fail_val
);
1272 ptlrpc_at_adj_service(req
, lustre_msg_get_timeout(req
->rq_repmsg
));
1273 ptlrpc_at_adj_net_latency(req
,
1274 lustre_msg_get_service_time(req
->rq_repmsg
));
1276 rc
= ptlrpc_check_status(req
);
1277 imp
->imp_connect_error
= rc
;
1281 * Either we've been evicted, or the server has failed for
1282 * some reason. Try to reconnect, and if that fails, punt to
1285 if (ll_rpc_recoverable_error(rc
)) {
1286 if (req
->rq_send_state
!= LUSTRE_IMP_FULL
||
1287 imp
->imp_obd
->obd_no_recov
|| imp
->imp_dlm_fake
) {
1290 ptlrpc_request_handle_notconn(req
);
1295 * Let's look if server sent slv. Do it only for RPC with
1298 ldlm_cli_update_pool(req
);
1301 /* Store transno in reqmsg for replay. */
1302 if (!(lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_REPLAY
)) {
1303 req
->rq_transno
= lustre_msg_get_transno(req
->rq_repmsg
);
1304 lustre_msg_set_transno(req
->rq_reqmsg
, req
->rq_transno
);
1307 if (imp
->imp_replayable
) {
1308 spin_lock(&imp
->imp_lock
);
1310 * No point in adding already-committed requests to the replay
1311 * list, we will just remove them immediately. b=9829
1313 if (req
->rq_transno
!= 0 &&
1315 lustre_msg_get_last_committed(req
->rq_repmsg
) ||
1317 /* version recovery */
1318 ptlrpc_save_versions(req
);
1319 ptlrpc_retain_replayable_request(req
, imp
);
1320 } else if (req
->rq_commit_cb
&&
1321 list_empty(&req
->rq_replay_list
)) {
1323 * NB: don't call rq_commit_cb if it's already on
1324 * rq_replay_list, ptlrpc_free_committed() will call
1325 * it later, see LU-3618 for details
1327 spin_unlock(&imp
->imp_lock
);
1328 req
->rq_commit_cb(req
);
1329 spin_lock(&imp
->imp_lock
);
1332 /* Replay-enabled imports return commit-status information. */
1333 if (lustre_msg_get_last_committed(req
->rq_repmsg
)) {
1334 imp
->imp_peer_committed_transno
=
1335 lustre_msg_get_last_committed(req
->rq_repmsg
);
1338 ptlrpc_free_committed(imp
);
1340 if (!list_empty(&imp
->imp_replay_list
)) {
1341 struct ptlrpc_request
*last
;
1343 last
= list_entry(imp
->imp_replay_list
.prev
,
1344 struct ptlrpc_request
,
1347 * Requests with rq_replay stay on the list even if no
1348 * commit is expected.
1350 if (last
->rq_transno
> imp
->imp_peer_committed_transno
)
1351 ptlrpc_pinger_commit_expected(imp
);
1354 spin_unlock(&imp
->imp_lock
);
1361 * Helper function to send request \a req over the network for the first time
1362 * Also adjusts request phase.
1363 * Returns 0 on success or error code.
1365 static int ptlrpc_send_new_req(struct ptlrpc_request
*req
)
1367 struct obd_import
*imp
= req
->rq_import
;
1370 LASSERT(req
->rq_phase
== RQ_PHASE_NEW
);
1371 if (req
->rq_sent
&& (req
->rq_sent
> ktime_get_real_seconds()) &&
1372 (!req
->rq_generation_set
||
1373 req
->rq_import_generation
== imp
->imp_generation
))
1376 ptlrpc_rqphase_move(req
, RQ_PHASE_RPC
);
1378 spin_lock(&imp
->imp_lock
);
1380 if (!req
->rq_generation_set
)
1381 req
->rq_import_generation
= imp
->imp_generation
;
1383 if (ptlrpc_import_delay_req(imp
, req
, &rc
)) {
1384 spin_lock(&req
->rq_lock
);
1385 req
->rq_waiting
= 1;
1386 spin_unlock(&req
->rq_lock
);
1388 DEBUG_REQ(D_HA
, req
, "req from PID %d waiting for recovery: (%s != %s)",
1389 lustre_msg_get_status(req
->rq_reqmsg
),
1390 ptlrpc_import_state_name(req
->rq_send_state
),
1391 ptlrpc_import_state_name(imp
->imp_state
));
1392 LASSERT(list_empty(&req
->rq_list
));
1393 list_add_tail(&req
->rq_list
, &imp
->imp_delayed_list
);
1394 atomic_inc(&req
->rq_import
->imp_inflight
);
1395 spin_unlock(&imp
->imp_lock
);
1400 spin_unlock(&imp
->imp_lock
);
1401 req
->rq_status
= rc
;
1402 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1406 LASSERT(list_empty(&req
->rq_list
));
1407 list_add_tail(&req
->rq_list
, &imp
->imp_sending_list
);
1408 atomic_inc(&req
->rq_import
->imp_inflight
);
1409 spin_unlock(&imp
->imp_lock
);
1411 lustre_msg_set_status(req
->rq_reqmsg
, current_pid());
1413 rc
= sptlrpc_req_refresh_ctx(req
, -1);
1416 req
->rq_status
= rc
;
1419 spin_lock(&req
->rq_lock
);
1420 req
->rq_wait_ctx
= 1;
1421 spin_unlock(&req
->rq_lock
);
1425 CDEBUG(D_RPCTRACE
, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
1427 imp
->imp_obd
->obd_uuid
.uuid
,
1428 lustre_msg_get_status(req
->rq_reqmsg
), req
->rq_xid
,
1429 libcfs_nid2str(imp
->imp_connection
->c_peer
.nid
),
1430 lustre_msg_get_opc(req
->rq_reqmsg
));
1432 rc
= ptl_send_rpc(req
, 0);
1434 DEBUG_REQ(D_HA
, req
, "send failed (%d); expect timeout", rc
);
1435 spin_lock(&req
->rq_lock
);
1436 req
->rq_net_err
= 1;
1437 spin_unlock(&req
->rq_lock
);
1443 static inline int ptlrpc_set_producer(struct ptlrpc_request_set
*set
)
1447 LASSERT(set
->set_producer
);
1449 remaining
= atomic_read(&set
->set_remaining
);
1452 * populate the ->set_requests list with requests until we
1453 * reach the maximum number of RPCs in flight for this set
1455 while (atomic_read(&set
->set_remaining
) < set
->set_max_inflight
) {
1456 rc
= set
->set_producer(set
, set
->set_producer_arg
);
1457 if (rc
== -ENOENT
) {
1458 /* no more RPC to produce */
1459 set
->set_producer
= NULL
;
1460 set
->set_producer_arg
= NULL
;
1465 return (atomic_read(&set
->set_remaining
) - remaining
);
1469 * this sends any unsent RPCs in \a set and returns 1 if all are sent
1470 * and no more replies are expected.
1471 * (it is possible to get less replies than requests sent e.g. due to timed out
1472 * requests or requests that we had trouble to send out)
1474 * NOTE: This function contains a potential schedule point (cond_resched()).
1476 int ptlrpc_check_set(const struct lu_env
*env
, struct ptlrpc_request_set
*set
)
1478 struct list_head
*tmp
, *next
;
1479 struct list_head comp_reqs
;
1480 int force_timer_recalc
= 0;
1482 if (atomic_read(&set
->set_remaining
) == 0)
1485 INIT_LIST_HEAD(&comp_reqs
);
1486 list_for_each_safe(tmp
, next
, &set
->set_requests
) {
1487 struct ptlrpc_request
*req
=
1488 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
1489 struct obd_import
*imp
= req
->rq_import
;
1490 int unregistered
= 0;
1494 * This schedule point is mainly for the ptlrpcd caller of this
1495 * function. Most ptlrpc sets are not long-lived and unbounded
1496 * in length, but at the least the set used by the ptlrpcd is.
1497 * Since the processing time is unbounded, we need to insert an
1498 * explicit schedule point to make the thread well-behaved.
1502 if (req
->rq_phase
== RQ_PHASE_NEW
&&
1503 ptlrpc_send_new_req(req
)) {
1504 force_timer_recalc
= 1;
1507 /* delayed send - skip */
1508 if (req
->rq_phase
== RQ_PHASE_NEW
&& req
->rq_sent
)
1511 /* delayed resend - skip */
1512 if (req
->rq_phase
== RQ_PHASE_RPC
&& req
->rq_resend
&&
1513 req
->rq_sent
> ktime_get_real_seconds())
1516 if (!(req
->rq_phase
== RQ_PHASE_RPC
||
1517 req
->rq_phase
== RQ_PHASE_BULK
||
1518 req
->rq_phase
== RQ_PHASE_INTERPRET
||
1519 req
->rq_phase
== RQ_PHASE_UNREG_RPC
||
1520 req
->rq_phase
== RQ_PHASE_UNREG_BULK
||
1521 req
->rq_phase
== RQ_PHASE_COMPLETE
)) {
1522 DEBUG_REQ(D_ERROR
, req
, "bad phase %x", req
->rq_phase
);
1526 if (req
->rq_phase
== RQ_PHASE_UNREG_RPC
||
1527 req
->rq_phase
== RQ_PHASE_UNREG_BULK
) {
1528 LASSERT(req
->rq_next_phase
!= req
->rq_phase
);
1529 LASSERT(req
->rq_next_phase
!= RQ_PHASE_UNDEFINED
);
1531 if (req
->rq_req_deadline
&&
1532 !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK
))
1533 req
->rq_req_deadline
= 0;
1534 if (req
->rq_reply_deadline
&&
1535 !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
))
1536 req
->rq_reply_deadline
= 0;
1537 if (req
->rq_bulk_deadline
&&
1538 !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
))
1539 req
->rq_bulk_deadline
= 0;
1542 * Skip processing until reply is unlinked. We
1543 * can't return to pool before that and we can't
1544 * call interpret before that. We need to make
1545 * sure that all rdma transfers finished and will
1546 * not corrupt any data.
1548 if (req
->rq_phase
== RQ_PHASE_UNREG_RPC
&&
1549 ptlrpc_client_recv_or_unlink(req
))
1551 if (req
->rq_phase
== RQ_PHASE_UNREG_BULK
&&
1552 ptlrpc_client_bulk_active(req
))
1556 * Turn fail_loc off to prevent it from looping
1559 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
)) {
1560 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
,
1563 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
)) {
1564 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
,
1568 /* Move to next phase if reply was successfully
1571 ptlrpc_rqphase_move(req
, req
->rq_next_phase
);
1574 if (req
->rq_phase
== RQ_PHASE_COMPLETE
) {
1575 list_move_tail(&req
->rq_set_chain
, &comp_reqs
);
1579 if (req
->rq_phase
== RQ_PHASE_INTERPRET
)
1582 /* Note that this also will start async reply unlink. */
1583 if (req
->rq_net_err
&& !req
->rq_timedout
) {
1584 ptlrpc_expire_one_request(req
, 1);
1586 /* Check if we still need to wait for unlink. */
1587 if (ptlrpc_client_recv_or_unlink(req
) ||
1588 ptlrpc_client_bulk_active(req
))
1590 /* If there is no need to resend, fail it now. */
1591 if (req
->rq_no_resend
) {
1592 if (req
->rq_status
== 0)
1593 req
->rq_status
= -EIO
;
1594 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1602 spin_lock(&req
->rq_lock
);
1603 req
->rq_replied
= 0;
1604 spin_unlock(&req
->rq_lock
);
1605 if (req
->rq_status
== 0)
1606 req
->rq_status
= -EIO
;
1607 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1612 * ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1613 * so it sets rq_intr regardless of individual rpc
1614 * timeouts. The synchronous IO waiting path sets
1615 * rq_intr irrespective of whether ptlrpcd
1616 * has seen a timeout. Our policy is to only interpret
1617 * interrupted rpcs after they have timed out, so we
1618 * need to enforce that here.
1621 if (req
->rq_intr
&& (req
->rq_timedout
|| req
->rq_waiting
||
1622 req
->rq_wait_ctx
)) {
1623 req
->rq_status
= -EINTR
;
1624 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1628 if (req
->rq_phase
== RQ_PHASE_RPC
) {
1629 if (req
->rq_timedout
|| req
->rq_resend
||
1630 req
->rq_waiting
|| req
->rq_wait_ctx
) {
1633 if (!ptlrpc_unregister_reply(req
, 1)) {
1634 ptlrpc_unregister_bulk(req
, 1);
1638 spin_lock(&imp
->imp_lock
);
1639 if (ptlrpc_import_delay_req(imp
, req
,
1642 * put on delay list - only if we wait
1643 * recovery finished - before send
1645 list_del_init(&req
->rq_list
);
1646 list_add_tail(&req
->rq_list
,
1647 &imp
->imp_delayed_list
);
1648 spin_unlock(&imp
->imp_lock
);
1653 req
->rq_status
= status
;
1654 ptlrpc_rqphase_move(req
,
1655 RQ_PHASE_INTERPRET
);
1656 spin_unlock(&imp
->imp_lock
);
1659 if (ptlrpc_no_resend(req
) &&
1660 !req
->rq_wait_ctx
) {
1661 req
->rq_status
= -ENOTCONN
;
1662 ptlrpc_rqphase_move(req
,
1663 RQ_PHASE_INTERPRET
);
1664 spin_unlock(&imp
->imp_lock
);
1668 list_del_init(&req
->rq_list
);
1669 list_add_tail(&req
->rq_list
,
1670 &imp
->imp_sending_list
);
1672 spin_unlock(&imp
->imp_lock
);
1674 spin_lock(&req
->rq_lock
);
1675 req
->rq_waiting
= 0;
1676 spin_unlock(&req
->rq_lock
);
1678 if (req
->rq_timedout
|| req
->rq_resend
) {
1679 /* This is re-sending anyway, let's mark req as resend. */
1680 spin_lock(&req
->rq_lock
);
1682 spin_unlock(&req
->rq_lock
);
1686 if (!ptlrpc_unregister_bulk(req
, 1))
1689 /* ensure previous bulk fails */
1690 old_xid
= req
->rq_xid
;
1691 req
->rq_xid
= ptlrpc_next_xid();
1692 CDEBUG(D_HA
, "resend bulk old x%llu new x%llu\n",
1693 old_xid
, req
->rq_xid
);
1697 * rq_wait_ctx is only touched by ptlrpcd,
1698 * so no lock is needed here.
1700 status
= sptlrpc_req_refresh_ctx(req
, -1);
1703 req
->rq_status
= status
;
1704 spin_lock(&req
->rq_lock
);
1705 req
->rq_wait_ctx
= 0;
1706 spin_unlock(&req
->rq_lock
);
1707 force_timer_recalc
= 1;
1709 spin_lock(&req
->rq_lock
);
1710 req
->rq_wait_ctx
= 1;
1711 spin_unlock(&req
->rq_lock
);
1716 spin_lock(&req
->rq_lock
);
1717 req
->rq_wait_ctx
= 0;
1718 spin_unlock(&req
->rq_lock
);
1721 rc
= ptl_send_rpc(req
, 0);
1723 DEBUG_REQ(D_HA
, req
,
1724 "send failed: rc = %d", rc
);
1725 force_timer_recalc
= 1;
1726 spin_lock(&req
->rq_lock
);
1727 req
->rq_net_err
= 1;
1728 spin_unlock(&req
->rq_lock
);
1731 /* need to reset the timeout */
1732 force_timer_recalc
= 1;
1735 spin_lock(&req
->rq_lock
);
1737 if (ptlrpc_client_early(req
)) {
1738 ptlrpc_at_recv_early_reply(req
);
1739 spin_unlock(&req
->rq_lock
);
1743 /* Still waiting for a reply? */
1744 if (ptlrpc_client_recv(req
)) {
1745 spin_unlock(&req
->rq_lock
);
1749 /* Did we actually receive a reply? */
1750 if (!ptlrpc_client_replied(req
)) {
1751 spin_unlock(&req
->rq_lock
);
1755 spin_unlock(&req
->rq_lock
);
1758 * unlink from net because we are going to
1759 * swab in-place of reply buffer
1761 unregistered
= ptlrpc_unregister_reply(req
, 1);
1765 req
->rq_status
= after_reply(req
);
1770 * If there is no bulk associated with this request,
1771 * then we're done and should let the interpreter
1772 * process the reply. Similarly if the RPC returned
1773 * an error, and therefore the bulk will never arrive.
1775 if (!req
->rq_bulk
|| req
->rq_status
< 0) {
1776 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1780 ptlrpc_rqphase_move(req
, RQ_PHASE_BULK
);
1783 LASSERT(req
->rq_phase
== RQ_PHASE_BULK
);
1784 if (ptlrpc_client_bulk_active(req
))
1787 if (req
->rq_bulk
->bd_failure
) {
1789 * The RPC reply arrived OK, but the bulk screwed
1790 * up! Dead weird since the server told us the RPC
1791 * was good after getting the REPLY for her GET or
1792 * the ACK for her PUT.
1794 DEBUG_REQ(D_ERROR
, req
, "bulk transfer failed");
1795 req
->rq_status
= -EIO
;
1798 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1801 LASSERT(req
->rq_phase
== RQ_PHASE_INTERPRET
);
1804 * This moves to "unregistering" phase we need to wait for
1807 if (!unregistered
&& !ptlrpc_unregister_reply(req
, 1)) {
1808 /* start async bulk unlink too */
1809 ptlrpc_unregister_bulk(req
, 1);
1813 if (!ptlrpc_unregister_bulk(req
, 1))
1816 /* When calling interpret receive should already be finished. */
1817 LASSERT(!req
->rq_receiving_reply
);
1819 ptlrpc_req_interpret(env
, req
, req
->rq_status
);
1821 if (ptlrpcd_check_work(req
)) {
1822 atomic_dec(&set
->set_remaining
);
1825 ptlrpc_rqphase_move(req
, RQ_PHASE_COMPLETE
);
1827 CDEBUG(req
->rq_reqmsg
? D_RPCTRACE
: 0,
1828 "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
1829 current_comm(), imp
->imp_obd
->obd_uuid
.uuid
,
1830 lustre_msg_get_status(req
->rq_reqmsg
), req
->rq_xid
,
1831 libcfs_nid2str(imp
->imp_connection
->c_peer
.nid
),
1832 lustre_msg_get_opc(req
->rq_reqmsg
));
1834 spin_lock(&imp
->imp_lock
);
1836 * Request already may be not on sending or delaying list. This
1837 * may happen in the case of marking it erroneous for the case
1838 * ptlrpc_import_delay_req(req, status) find it impossible to
1839 * allow sending this rpc and returns *status != 0.
1841 if (!list_empty(&req
->rq_list
)) {
1842 list_del_init(&req
->rq_list
);
1843 atomic_dec(&imp
->imp_inflight
);
1845 spin_unlock(&imp
->imp_lock
);
1847 atomic_dec(&set
->set_remaining
);
1848 wake_up_all(&imp
->imp_recovery_waitq
);
1850 if (set
->set_producer
) {
1851 /* produce a new request if possible */
1852 if (ptlrpc_set_producer(set
) > 0)
1853 force_timer_recalc
= 1;
1856 * free the request that has just been completed
1857 * in order not to pollute set->set_requests
1859 list_del_init(&req
->rq_set_chain
);
1860 spin_lock(&req
->rq_lock
);
1862 req
->rq_invalid_rqset
= 0;
1863 spin_unlock(&req
->rq_lock
);
1865 /* record rq_status to compute the final status later */
1866 if (req
->rq_status
!= 0)
1867 set
->set_rc
= req
->rq_status
;
1868 ptlrpc_req_finished(req
);
1870 list_move_tail(&req
->rq_set_chain
, &comp_reqs
);
1875 * move completed request at the head of list so it's easier for
1876 * caller to find them
1878 list_splice(&comp_reqs
, &set
->set_requests
);
1880 /* If we hit an error, we want to recover promptly. */
1881 return atomic_read(&set
->set_remaining
) == 0 || force_timer_recalc
;
1883 EXPORT_SYMBOL(ptlrpc_check_set
);
1886 * Time out request \a req. is \a async_unlink is set, that means do not wait
1887 * until LNet actually confirms network buffer unlinking.
1888 * Return 1 if we should give up further retrying attempts or 0 otherwise.
1890 int ptlrpc_expire_one_request(struct ptlrpc_request
*req
, int async_unlink
)
1892 struct obd_import
*imp
= req
->rq_import
;
1895 spin_lock(&req
->rq_lock
);
1896 req
->rq_timedout
= 1;
1897 spin_unlock(&req
->rq_lock
);
1899 DEBUG_REQ(D_WARNING
, req
, "Request sent has %s: [sent %lld/real %lld]",
1900 req
->rq_net_err
? "failed due to network error" :
1901 ((req
->rq_real_sent
== 0 ||
1902 req
->rq_real_sent
< req
->rq_sent
||
1903 req
->rq_real_sent
>= req
->rq_deadline
) ?
1904 "timed out for sent delay" : "timed out for slow reply"),
1905 (s64
)req
->rq_sent
, (s64
)req
->rq_real_sent
);
1907 if (imp
&& obd_debug_peer_on_timeout
)
1908 LNetDebugPeer(imp
->imp_connection
->c_peer
);
1910 ptlrpc_unregister_reply(req
, async_unlink
);
1911 ptlrpc_unregister_bulk(req
, async_unlink
);
1913 if (obd_dump_on_timeout
)
1914 libcfs_debug_dumplog();
1917 DEBUG_REQ(D_HA
, req
, "NULL import: already cleaned up?");
1921 atomic_inc(&imp
->imp_timeouts
);
1923 /* The DLM server doesn't want recovery run on its imports. */
1924 if (imp
->imp_dlm_fake
)
1928 * If this request is for recovery or other primordial tasks,
1929 * then error it out here.
1931 if (req
->rq_ctx_init
|| req
->rq_ctx_fini
||
1932 req
->rq_send_state
!= LUSTRE_IMP_FULL
||
1933 imp
->imp_obd
->obd_no_recov
) {
1934 DEBUG_REQ(D_RPCTRACE
, req
, "err -110, sent_state=%s (now=%s)",
1935 ptlrpc_import_state_name(req
->rq_send_state
),
1936 ptlrpc_import_state_name(imp
->imp_state
));
1937 spin_lock(&req
->rq_lock
);
1938 req
->rq_status
= -ETIMEDOUT
;
1940 spin_unlock(&req
->rq_lock
);
1945 * if a request can't be resent we can't wait for an answer after
1948 if (ptlrpc_no_resend(req
)) {
1949 DEBUG_REQ(D_RPCTRACE
, req
, "TIMEOUT-NORESEND:");
1953 ptlrpc_fail_import(imp
, lustre_msg_get_conn_cnt(req
->rq_reqmsg
));
1959 * Time out all uncompleted requests in request set pointed by \a data
1960 * Callback used when waiting on sets with l_wait_event.
1963 int ptlrpc_expired_set(void *data
)
1965 struct ptlrpc_request_set
*set
= data
;
1966 struct list_head
*tmp
;
1967 time64_t now
= ktime_get_real_seconds();
1969 /* A timeout expired. See which reqs it applies to... */
1970 list_for_each(tmp
, &set
->set_requests
) {
1971 struct ptlrpc_request
*req
=
1972 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
1974 /* don't expire request waiting for context */
1975 if (req
->rq_wait_ctx
)
1978 /* Request in-flight? */
1979 if (!((req
->rq_phase
== RQ_PHASE_RPC
&&
1980 !req
->rq_waiting
&& !req
->rq_resend
) ||
1981 (req
->rq_phase
== RQ_PHASE_BULK
)))
1984 if (req
->rq_timedout
|| /* already dealt with */
1985 req
->rq_deadline
> now
) /* not expired */
1989 * Deal with this guy. Do it asynchronously to not block
1992 ptlrpc_expire_one_request(req
, 1);
1996 * When waiting for a whole set, we always break out of the
1997 * sleep so we can recalculate the timeout, or enable interrupts
1998 * if everyone's timed out.
2002 EXPORT_SYMBOL(ptlrpc_expired_set
);
2005 * Sets rq_intr flag in \a req under spinlock.
2007 void ptlrpc_mark_interrupted(struct ptlrpc_request
*req
)
2009 spin_lock(&req
->rq_lock
);
2011 spin_unlock(&req
->rq_lock
);
2013 EXPORT_SYMBOL(ptlrpc_mark_interrupted
);
2016 * Interrupts (sets interrupted flag) all uncompleted requests in
2017 * a set \a data. Callback for l_wait_event for interruptible waits.
2019 void ptlrpc_interrupted_set(void *data
)
2021 struct ptlrpc_request_set
*set
= data
;
2022 struct list_head
*tmp
;
2024 CDEBUG(D_RPCTRACE
, "INTERRUPTED SET %p\n", set
);
2026 list_for_each(tmp
, &set
->set_requests
) {
2027 struct ptlrpc_request
*req
=
2028 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2030 if (req
->rq_phase
!= RQ_PHASE_RPC
&&
2031 req
->rq_phase
!= RQ_PHASE_UNREG_RPC
)
2034 ptlrpc_mark_interrupted(req
);
2037 EXPORT_SYMBOL(ptlrpc_interrupted_set
);
2040 * Get the smallest timeout in the set; this does NOT set a timeout.
2042 int ptlrpc_set_next_timeout(struct ptlrpc_request_set
*set
)
2044 struct list_head
*tmp
;
2045 time64_t now
= ktime_get_real_seconds();
2047 struct ptlrpc_request
*req
;
2050 list_for_each(tmp
, &set
->set_requests
) {
2051 req
= list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2053 /* Request in-flight? */
2054 if (!(((req
->rq_phase
== RQ_PHASE_RPC
) && !req
->rq_waiting
) ||
2055 (req
->rq_phase
== RQ_PHASE_BULK
) ||
2056 (req
->rq_phase
== RQ_PHASE_NEW
)))
2059 /* Already timed out. */
2060 if (req
->rq_timedout
)
2063 /* Waiting for ctx. */
2064 if (req
->rq_wait_ctx
)
2067 if (req
->rq_phase
== RQ_PHASE_NEW
)
2068 deadline
= req
->rq_sent
;
2069 else if (req
->rq_phase
== RQ_PHASE_RPC
&& req
->rq_resend
)
2070 deadline
= req
->rq_sent
;
2072 deadline
= req
->rq_sent
+ req
->rq_timeout
;
2074 if (deadline
<= now
) /* actually expired already */
2075 timeout
= 1; /* ASAP */
2076 else if (timeout
== 0 || timeout
> deadline
- now
)
2077 timeout
= deadline
- now
;
2081 EXPORT_SYMBOL(ptlrpc_set_next_timeout
);
2084 * Send all unset request from the set and then wait until all
2085 * requests in the set complete (either get a reply, timeout, get an
2086 * error or otherwise be interrupted).
2087 * Returns 0 on success or error code otherwise.
2089 int ptlrpc_set_wait(struct ptlrpc_request_set
*set
)
2091 struct list_head
*tmp
;
2092 struct ptlrpc_request
*req
;
2093 struct l_wait_info lwi
;
2096 if (set
->set_producer
)
2097 (void)ptlrpc_set_producer(set
);
2099 list_for_each(tmp
, &set
->set_requests
) {
2100 req
= list_entry(tmp
, struct ptlrpc_request
,
2102 if (req
->rq_phase
== RQ_PHASE_NEW
)
2103 (void)ptlrpc_send_new_req(req
);
2106 if (list_empty(&set
->set_requests
))
2110 timeout
= ptlrpc_set_next_timeout(set
);
2113 * wait until all complete, interrupted, or an in-flight
2116 CDEBUG(D_RPCTRACE
, "set %p going to sleep for %d seconds\n",
2119 if (timeout
== 0 && !signal_pending(current
))
2121 * No requests are in-flight (ether timed out
2122 * or delayed), so we can allow interrupts.
2123 * We still want to block for a limited time,
2124 * so we allow interrupts during the timeout.
2126 lwi
= LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
2128 ptlrpc_interrupted_set
, set
);
2131 * At least one request is in flight, so no
2132 * interrupts are allowed. Wait until all
2133 * complete, or an in-flight req times out.
2135 lwi
= LWI_TIMEOUT(cfs_time_seconds(timeout
? timeout
: 1),
2136 ptlrpc_expired_set
, set
);
2138 rc
= l_wait_event(set
->set_waitq
, ptlrpc_check_set(NULL
, set
), &lwi
);
2141 * LU-769 - if we ignored the signal because it was already
2142 * pending when we started, we need to handle it now or we risk
2143 * it being ignored forever
2145 if (rc
== -ETIMEDOUT
&& !lwi
.lwi_allow_intr
&&
2146 signal_pending(current
)) {
2147 sigset_t blocked_sigs
=
2148 cfs_block_sigsinv(LUSTRE_FATAL_SIGS
);
2151 * In fact we only interrupt for the "fatal" signals
2152 * like SIGINT or SIGKILL. We still ignore less
2153 * important signals since ptlrpc set is not easily
2154 * reentrant from userspace again
2156 if (signal_pending(current
))
2157 ptlrpc_interrupted_set(set
);
2158 cfs_restore_sigs(blocked_sigs
);
2161 LASSERT(rc
== 0 || rc
== -EINTR
|| rc
== -ETIMEDOUT
);
2164 * -EINTR => all requests have been flagged rq_intr so next
2166 * -ETIMEDOUT => someone timed out. When all reqs have
2167 * timed out, signals are enabled allowing completion with
2169 * I don't really care if we go once more round the loop in
2170 * the error cases -eeb.
2172 if (rc
== 0 && atomic_read(&set
->set_remaining
) == 0) {
2173 list_for_each(tmp
, &set
->set_requests
) {
2174 req
= list_entry(tmp
, struct ptlrpc_request
,
2176 spin_lock(&req
->rq_lock
);
2177 req
->rq_invalid_rqset
= 1;
2178 spin_unlock(&req
->rq_lock
);
2181 } while (rc
!= 0 || atomic_read(&set
->set_remaining
) != 0);
2183 LASSERT(atomic_read(&set
->set_remaining
) == 0);
2185 rc
= set
->set_rc
; /* rq_status of already freed requests if any */
2186 list_for_each(tmp
, &set
->set_requests
) {
2187 req
= list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2189 LASSERT(req
->rq_phase
== RQ_PHASE_COMPLETE
);
2190 if (req
->rq_status
!= 0)
2191 rc
= req
->rq_status
;
2194 if (set
->set_interpret
) {
2195 int (*interpreter
)(struct ptlrpc_request_set
*set
, void *, int) =
2197 rc
= interpreter(set
, set
->set_arg
, rc
);
2199 struct ptlrpc_set_cbdata
*cbdata
, *n
;
2202 list_for_each_entry_safe(cbdata
, n
,
2203 &set
->set_cblist
, psc_item
) {
2204 list_del_init(&cbdata
->psc_item
);
2205 err
= cbdata
->psc_interpret(set
, cbdata
->psc_data
, rc
);
2214 EXPORT_SYMBOL(ptlrpc_set_wait
);
2217 * Helper function for request freeing.
2218 * Called when request count reached zero and request needs to be freed.
2219 * Removes request from all sorts of sending/replay lists it might be on,
2220 * frees network buffers if any are present.
2221 * If \a locked is set, that means caller is already holding import imp_lock
2222 * and so we no longer need to reobtain it (for certain lists manipulations)
2224 static void __ptlrpc_free_req(struct ptlrpc_request
*request
, int locked
)
2228 LASSERT(!request
->rq_srv_req
);
2229 LASSERT(!request
->rq_export
);
2230 LASSERTF(!request
->rq_receiving_reply
, "req %p\n", request
);
2231 LASSERTF(list_empty(&request
->rq_list
), "req %p\n", request
);
2232 LASSERTF(list_empty(&request
->rq_set_chain
), "req %p\n", request
);
2233 LASSERTF(!request
->rq_replay
, "req %p\n", request
);
2235 req_capsule_fini(&request
->rq_pill
);
2238 * We must take it off the imp_replay_list first. Otherwise, we'll set
2239 * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2241 if (request
->rq_import
) {
2243 spin_lock(&request
->rq_import
->imp_lock
);
2244 list_del_init(&request
->rq_replay_list
);
2246 spin_unlock(&request
->rq_import
->imp_lock
);
2248 LASSERTF(list_empty(&request
->rq_replay_list
), "req %p\n", request
);
2250 if (atomic_read(&request
->rq_refcount
) != 0) {
2251 DEBUG_REQ(D_ERROR
, request
,
2252 "freeing request with nonzero refcount");
2256 if (request
->rq_repbuf
)
2257 sptlrpc_cli_free_repbuf(request
);
2259 if (request
->rq_import
) {
2260 class_import_put(request
->rq_import
);
2261 request
->rq_import
= NULL
;
2263 if (request
->rq_bulk
)
2264 ptlrpc_free_bulk_pin(request
->rq_bulk
);
2266 if (request
->rq_reqbuf
|| request
->rq_clrbuf
)
2267 sptlrpc_cli_free_reqbuf(request
);
2269 if (request
->rq_cli_ctx
)
2270 sptlrpc_req_put_ctx(request
, !locked
);
2272 if (request
->rq_pool
)
2273 __ptlrpc_free_req_to_pool(request
);
2275 ptlrpc_request_cache_free(request
);
2280 * Drops one reference count for request \a request.
2281 * \a locked set indicates that caller holds import imp_lock.
2282 * Frees the request when reference count reaches zero.
2284 static int __ptlrpc_req_finished(struct ptlrpc_request
*request
, int locked
)
2289 if (request
== LP_POISON
||
2290 request
->rq_reqmsg
== LP_POISON
) {
2291 CERROR("dereferencing freed request (bug 575)\n");
2296 DEBUG_REQ(D_INFO
, request
, "refcount now %u",
2297 atomic_read(&request
->rq_refcount
) - 1);
2299 if (atomic_dec_and_test(&request
->rq_refcount
)) {
2300 __ptlrpc_free_req(request
, locked
);
2308 * Drops one reference count for a request.
2310 void ptlrpc_req_finished(struct ptlrpc_request
*request
)
2312 __ptlrpc_req_finished(request
, 0);
2314 EXPORT_SYMBOL(ptlrpc_req_finished
);
2317 * Returns xid of a \a request
2319 __u64
ptlrpc_req_xid(struct ptlrpc_request
*request
)
2321 return request
->rq_xid
;
2323 EXPORT_SYMBOL(ptlrpc_req_xid
);
2326 * Disengage the client's reply buffer from the network
2327 * NB does _NOT_ unregister any client-side bulk.
2328 * IDEMPOTENT, but _not_ safe against concurrent callers.
2329 * The request owner (i.e. the thread doing the I/O) must call...
2330 * Returns 0 on success or 1 if unregistering cannot be made.
2332 int ptlrpc_unregister_reply(struct ptlrpc_request
*request
, int async
)
2335 wait_queue_head_t
*wq
;
2336 struct l_wait_info lwi
;
2339 LASSERT(!in_interrupt());
2341 /* Let's setup deadline for reply unlink. */
2342 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
) &&
2343 async
&& request
->rq_reply_deadline
== 0 && cfs_fail_val
== 0)
2344 request
->rq_reply_deadline
=
2345 ktime_get_real_seconds() + LONG_UNLINK
;
2347 /* Nothing left to do. */
2348 if (!ptlrpc_client_recv_or_unlink(request
))
2351 LNetMDUnlink(request
->rq_reply_md_h
);
2353 /* Let's check it once again. */
2354 if (!ptlrpc_client_recv_or_unlink(request
))
2357 /* Move to "Unregistering" phase as reply was not unlinked yet. */
2358 ptlrpc_rqphase_move(request
, RQ_PHASE_UNREG_RPC
);
2360 /* Do not wait for unlink to finish. */
2365 * We have to l_wait_event() whatever the result, to give liblustre
2366 * a chance to run reply_in_callback(), and to make sure we've
2367 * unlinked before returning a req to the pool.
2369 if (request
->rq_set
)
2370 wq
= &request
->rq_set
->set_waitq
;
2372 wq
= &request
->rq_reply_waitq
;
2376 * Network access will complete in finite time but the HUGE
2377 * timeout lets us CWARN for visibility of sluggish NALs
2379 lwi
= LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK
),
2380 cfs_time_seconds(1), NULL
, NULL
);
2381 rc
= l_wait_event(*wq
, !ptlrpc_client_recv_or_unlink(request
),
2384 ptlrpc_rqphase_move(request
, request
->rq_next_phase
);
2388 LASSERT(rc
== -ETIMEDOUT
);
2389 DEBUG_REQ(D_WARNING
, request
,
2390 "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2391 request
->rq_receiving_reply
,
2392 request
->rq_req_unlinked
,
2393 request
->rq_reply_unlinked
);
2397 EXPORT_SYMBOL(ptlrpc_unregister_reply
);
2399 static void ptlrpc_free_request(struct ptlrpc_request
*req
)
2401 spin_lock(&req
->rq_lock
);
2403 spin_unlock(&req
->rq_lock
);
2405 if (req
->rq_commit_cb
)
2406 req
->rq_commit_cb(req
);
2407 list_del_init(&req
->rq_replay_list
);
2409 __ptlrpc_req_finished(req
, 1);
2413 * the request is committed and dropped from the replay list of its import
2415 void ptlrpc_request_committed(struct ptlrpc_request
*req
, int force
)
2417 struct obd_import
*imp
= req
->rq_import
;
2419 spin_lock(&imp
->imp_lock
);
2420 if (list_empty(&req
->rq_replay_list
)) {
2421 spin_unlock(&imp
->imp_lock
);
2425 if (force
|| req
->rq_transno
<= imp
->imp_peer_committed_transno
)
2426 ptlrpc_free_request(req
);
2428 spin_unlock(&imp
->imp_lock
);
2430 EXPORT_SYMBOL(ptlrpc_request_committed
);
2433 * Iterates through replay_list on import and prunes
2434 * all requests have transno smaller than last_committed for the
2435 * import and don't have rq_replay set.
2436 * Since requests are sorted in transno order, stops when meeting first
2437 * transno bigger than last_committed.
2438 * caller must hold imp->imp_lock
2440 void ptlrpc_free_committed(struct obd_import
*imp
)
2442 struct ptlrpc_request
*req
, *saved
;
2443 struct ptlrpc_request
*last_req
= NULL
; /* temporary fire escape */
2444 bool skip_committed_list
= true;
2446 assert_spin_locked(&imp
->imp_lock
);
2448 if (imp
->imp_peer_committed_transno
== imp
->imp_last_transno_checked
&&
2449 imp
->imp_generation
== imp
->imp_last_generation_checked
) {
2450 CDEBUG(D_INFO
, "%s: skip recheck: last_committed %llu\n",
2451 imp
->imp_obd
->obd_name
, imp
->imp_peer_committed_transno
);
2454 CDEBUG(D_RPCTRACE
, "%s: committing for last_committed %llu gen %d\n",
2455 imp
->imp_obd
->obd_name
, imp
->imp_peer_committed_transno
,
2456 imp
->imp_generation
);
2458 if (imp
->imp_generation
!= imp
->imp_last_generation_checked
)
2459 skip_committed_list
= false;
2461 imp
->imp_last_transno_checked
= imp
->imp_peer_committed_transno
;
2462 imp
->imp_last_generation_checked
= imp
->imp_generation
;
2464 list_for_each_entry_safe(req
, saved
, &imp
->imp_replay_list
,
2466 /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
2467 LASSERT(req
!= last_req
);
2470 if (req
->rq_transno
== 0) {
2471 DEBUG_REQ(D_EMERG
, req
, "zero transno during replay");
2474 if (req
->rq_import_generation
< imp
->imp_generation
) {
2475 DEBUG_REQ(D_RPCTRACE
, req
, "free request with old gen");
2479 /* not yet committed */
2480 if (req
->rq_transno
> imp
->imp_peer_committed_transno
) {
2481 DEBUG_REQ(D_RPCTRACE
, req
, "stopping search");
2485 if (req
->rq_replay
) {
2486 DEBUG_REQ(D_RPCTRACE
, req
, "keeping (FL_REPLAY)");
2487 list_move_tail(&req
->rq_replay_list
,
2488 &imp
->imp_committed_list
);
2492 DEBUG_REQ(D_INFO
, req
, "commit (last_committed %llu)",
2493 imp
->imp_peer_committed_transno
);
2495 ptlrpc_free_request(req
);
2497 if (skip_committed_list
)
2500 list_for_each_entry_safe(req
, saved
, &imp
->imp_committed_list
,
2502 LASSERT(req
->rq_transno
!= 0);
2503 if (req
->rq_import_generation
< imp
->imp_generation
) {
2504 DEBUG_REQ(D_RPCTRACE
, req
, "free stale open request");
2505 ptlrpc_free_request(req
);
2511 * Schedule previously sent request for resend.
2512 * For bulk requests we assign new xid (to avoid problems with
2513 * lost replies and therefore several transfers landing into same buffer
2514 * from different sending attempts).
2516 void ptlrpc_resend_req(struct ptlrpc_request
*req
)
2518 DEBUG_REQ(D_HA
, req
, "going to resend");
2519 spin_lock(&req
->rq_lock
);
2522 * Request got reply but linked to the import list still.
2523 * Let ptlrpc_check_set() to process it.
2525 if (ptlrpc_client_replied(req
)) {
2526 spin_unlock(&req
->rq_lock
);
2527 DEBUG_REQ(D_HA
, req
, "it has reply, so skip it");
2531 lustre_msg_set_handle(req
->rq_reqmsg
, &(struct lustre_handle
){ 0 });
2532 req
->rq_status
= -EAGAIN
;
2535 req
->rq_net_err
= 0;
2536 req
->rq_timedout
= 0;
2538 __u64 old_xid
= req
->rq_xid
;
2540 /* ensure previous bulk fails */
2541 req
->rq_xid
= ptlrpc_next_xid();
2542 CDEBUG(D_HA
, "resend bulk old x%llu new x%llu\n",
2543 old_xid
, req
->rq_xid
);
2545 ptlrpc_client_wake_req(req
);
2546 spin_unlock(&req
->rq_lock
);
2548 EXPORT_SYMBOL(ptlrpc_resend_req
);
2551 * Grab additional reference on a request \a req
2553 struct ptlrpc_request
*ptlrpc_request_addref(struct ptlrpc_request
*req
)
2555 atomic_inc(&req
->rq_refcount
);
2558 EXPORT_SYMBOL(ptlrpc_request_addref
);
2561 * Add a request to import replay_list.
2562 * Must be called under imp_lock
2564 void ptlrpc_retain_replayable_request(struct ptlrpc_request
*req
,
2565 struct obd_import
*imp
)
2567 struct list_head
*tmp
;
2569 assert_spin_locked(&imp
->imp_lock
);
2571 if (req
->rq_transno
== 0) {
2572 DEBUG_REQ(D_EMERG
, req
, "saving request with zero transno");
2577 * clear this for new requests that were resent as well
2578 * as resent replayed requests.
2580 lustre_msg_clear_flags(req
->rq_reqmsg
, MSG_RESENT
);
2582 /* don't re-add requests that have been replayed */
2583 if (!list_empty(&req
->rq_replay_list
))
2586 lustre_msg_add_flags(req
->rq_reqmsg
, MSG_REPLAY
);
2588 LASSERT(imp
->imp_replayable
);
2589 /* Balanced in ptlrpc_free_committed, usually. */
2590 ptlrpc_request_addref(req
);
2591 list_for_each_prev(tmp
, &imp
->imp_replay_list
) {
2592 struct ptlrpc_request
*iter
=
2593 list_entry(tmp
, struct ptlrpc_request
, rq_replay_list
);
2596 * We may have duplicate transnos if we create and then
2597 * open a file, or for closes retained if to match creating
2598 * opens, so use req->rq_xid as a secondary key.
2599 * (See bugs 684, 685, and 428.)
2600 * XXX no longer needed, but all opens need transnos!
2602 if (iter
->rq_transno
> req
->rq_transno
)
2605 if (iter
->rq_transno
== req
->rq_transno
) {
2606 LASSERT(iter
->rq_xid
!= req
->rq_xid
);
2607 if (iter
->rq_xid
> req
->rq_xid
)
2611 list_add(&req
->rq_replay_list
, &iter
->rq_replay_list
);
2615 list_add(&req
->rq_replay_list
, &imp
->imp_replay_list
);
2617 EXPORT_SYMBOL(ptlrpc_retain_replayable_request
);
2620 * Send request and wait until it completes.
2621 * Returns request processing status.
2623 int ptlrpc_queue_wait(struct ptlrpc_request
*req
)
2625 struct ptlrpc_request_set
*set
;
2628 LASSERT(!req
->rq_set
);
2629 LASSERT(!req
->rq_receiving_reply
);
2631 set
= ptlrpc_prep_set();
2633 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM
);
2637 /* for distributed debugging */
2638 lustre_msg_set_status(req
->rq_reqmsg
, current_pid());
2640 /* add a ref for the set (see comment in ptlrpc_set_add_req) */
2641 ptlrpc_request_addref(req
);
2642 ptlrpc_set_add_req(set
, req
);
2643 rc
= ptlrpc_set_wait(set
);
2644 ptlrpc_set_destroy(set
);
2648 EXPORT_SYMBOL(ptlrpc_queue_wait
);
2651 * Callback used for replayed requests reply processing.
2652 * In case of successful reply calls registered request replay callback.
2653 * In case of error restart replay process.
2655 static int ptlrpc_replay_interpret(const struct lu_env
*env
,
2656 struct ptlrpc_request
*req
,
2659 struct ptlrpc_replay_async_args
*aa
= data
;
2660 struct obd_import
*imp
= req
->rq_import
;
2662 atomic_dec(&imp
->imp_replay_inflight
);
2664 if (!ptlrpc_client_replied(req
)) {
2665 CERROR("request replay timed out, restarting recovery\n");
2670 if (lustre_msg_get_type(req
->rq_repmsg
) == PTL_RPC_MSG_ERR
&&
2671 (lustre_msg_get_status(req
->rq_repmsg
) == -ENOTCONN
||
2672 lustre_msg_get_status(req
->rq_repmsg
) == -ENODEV
)) {
2673 rc
= lustre_msg_get_status(req
->rq_repmsg
);
2677 /** VBR: check version failure */
2678 if (lustre_msg_get_status(req
->rq_repmsg
) == -EOVERFLOW
) {
2679 /** replay was failed due to version mismatch */
2680 DEBUG_REQ(D_WARNING
, req
, "Version mismatch during replay\n");
2681 spin_lock(&imp
->imp_lock
);
2682 imp
->imp_vbr_failed
= 1;
2683 imp
->imp_no_lock_replay
= 1;
2684 spin_unlock(&imp
->imp_lock
);
2685 lustre_msg_set_status(req
->rq_repmsg
, aa
->praa_old_status
);
2687 /** The transno had better not change over replay. */
2688 LASSERTF(lustre_msg_get_transno(req
->rq_reqmsg
) ==
2689 lustre_msg_get_transno(req
->rq_repmsg
) ||
2690 lustre_msg_get_transno(req
->rq_repmsg
) == 0,
2692 lustre_msg_get_transno(req
->rq_reqmsg
),
2693 lustre_msg_get_transno(req
->rq_repmsg
));
2696 spin_lock(&imp
->imp_lock
);
2697 /** if replays by version then gap occur on server, no trust to locks */
2698 if (lustre_msg_get_flags(req
->rq_repmsg
) & MSG_VERSION_REPLAY
)
2699 imp
->imp_no_lock_replay
= 1;
2700 imp
->imp_last_replay_transno
= lustre_msg_get_transno(req
->rq_reqmsg
);
2701 spin_unlock(&imp
->imp_lock
);
2702 LASSERT(imp
->imp_last_replay_transno
);
2704 /* transaction number shouldn't be bigger than the latest replayed */
2705 if (req
->rq_transno
> lustre_msg_get_transno(req
->rq_reqmsg
)) {
2706 DEBUG_REQ(D_ERROR
, req
,
2707 "Reported transno %llu is bigger than the replayed one: %llu",
2709 lustre_msg_get_transno(req
->rq_reqmsg
));
2714 DEBUG_REQ(D_HA
, req
, "got rep");
2716 /* let the callback do fixups, possibly including in the request */
2717 if (req
->rq_replay_cb
)
2718 req
->rq_replay_cb(req
);
2720 if (ptlrpc_client_replied(req
) &&
2721 lustre_msg_get_status(req
->rq_repmsg
) != aa
->praa_old_status
) {
2722 DEBUG_REQ(D_ERROR
, req
, "status %d, old was %d",
2723 lustre_msg_get_status(req
->rq_repmsg
),
2724 aa
->praa_old_status
);
2726 /* Put it back for re-replay. */
2727 lustre_msg_set_status(req
->rq_repmsg
, aa
->praa_old_status
);
2731 * Errors while replay can set transno to 0, but
2732 * imp_last_replay_transno shouldn't be set to 0 anyway
2734 if (req
->rq_transno
== 0)
2735 CERROR("Transno is 0 during replay!\n");
2737 /* continue with recovery */
2738 rc
= ptlrpc_import_recovery_state_machine(imp
);
2740 req
->rq_send_state
= aa
->praa_old_state
;
2743 /* this replay failed, so restart recovery */
2744 ptlrpc_connect_import(imp
);
2750 * Prepares and queues request for replay.
2751 * Adds it to ptlrpcd queue for actual sending.
2752 * Returns 0 on success.
2754 int ptlrpc_replay_req(struct ptlrpc_request
*req
)
2756 struct ptlrpc_replay_async_args
*aa
;
2758 LASSERT(req
->rq_import
->imp_state
== LUSTRE_IMP_REPLAY
);
2760 LASSERT(sizeof(*aa
) <= sizeof(req
->rq_async_args
));
2761 aa
= ptlrpc_req_async_args(req
);
2762 memset(aa
, 0, sizeof(*aa
));
2764 /* Prepare request to be resent with ptlrpcd */
2765 aa
->praa_old_state
= req
->rq_send_state
;
2766 req
->rq_send_state
= LUSTRE_IMP_REPLAY
;
2767 req
->rq_phase
= RQ_PHASE_NEW
;
2768 req
->rq_next_phase
= RQ_PHASE_UNDEFINED
;
2770 aa
->praa_old_status
= lustre_msg_get_status(req
->rq_repmsg
);
2772 req
->rq_interpret_reply
= ptlrpc_replay_interpret
;
2773 /* Readjust the timeout for current conditions */
2774 ptlrpc_at_set_req_timeout(req
);
2777 * Tell server the net_latency, so the server can calculate how long
2778 * it should wait for next replay
2780 lustre_msg_set_service_time(req
->rq_reqmsg
,
2781 ptlrpc_at_get_net_latency(req
));
2782 DEBUG_REQ(D_HA
, req
, "REPLAY");
2784 atomic_inc(&req
->rq_import
->imp_replay_inflight
);
2785 ptlrpc_request_addref(req
); /* ptlrpcd needs a ref */
2787 ptlrpcd_add_req(req
);
2790 EXPORT_SYMBOL(ptlrpc_replay_req
);
2793 * Aborts all in-flight request on import \a imp sending and delayed lists
2795 void ptlrpc_abort_inflight(struct obd_import
*imp
)
2797 struct list_head
*tmp
, *n
;
2800 * Make sure that no new requests get processed for this import.
2801 * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2802 * this flag and then putting requests on sending_list or delayed_list.
2804 spin_lock(&imp
->imp_lock
);
2807 * XXX locking? Maybe we should remove each request with the list
2808 * locked? Also, how do we know if the requests on the list are
2809 * being freed at this time?
2811 list_for_each_safe(tmp
, n
, &imp
->imp_sending_list
) {
2812 struct ptlrpc_request
*req
=
2813 list_entry(tmp
, struct ptlrpc_request
, rq_list
);
2815 DEBUG_REQ(D_RPCTRACE
, req
, "inflight");
2817 spin_lock(&req
->rq_lock
);
2818 if (req
->rq_import_generation
< imp
->imp_generation
) {
2820 req
->rq_status
= -EIO
;
2821 ptlrpc_client_wake_req(req
);
2823 spin_unlock(&req
->rq_lock
);
2826 list_for_each_safe(tmp
, n
, &imp
->imp_delayed_list
) {
2827 struct ptlrpc_request
*req
=
2828 list_entry(tmp
, struct ptlrpc_request
, rq_list
);
2830 DEBUG_REQ(D_RPCTRACE
, req
, "aborting waiting req");
2832 spin_lock(&req
->rq_lock
);
2833 if (req
->rq_import_generation
< imp
->imp_generation
) {
2835 req
->rq_status
= -EIO
;
2836 ptlrpc_client_wake_req(req
);
2838 spin_unlock(&req
->rq_lock
);
2842 * Last chance to free reqs left on the replay list, but we
2843 * will still leak reqs that haven't committed.
2845 if (imp
->imp_replayable
)
2846 ptlrpc_free_committed(imp
);
2848 spin_unlock(&imp
->imp_lock
);
2850 EXPORT_SYMBOL(ptlrpc_abort_inflight
);
2853 * Abort all uncompleted requests in request set \a set
2855 void ptlrpc_abort_set(struct ptlrpc_request_set
*set
)
2857 struct list_head
*tmp
, *pos
;
2859 list_for_each_safe(pos
, tmp
, &set
->set_requests
) {
2860 struct ptlrpc_request
*req
=
2861 list_entry(pos
, struct ptlrpc_request
, rq_set_chain
);
2863 spin_lock(&req
->rq_lock
);
2864 if (req
->rq_phase
!= RQ_PHASE_RPC
) {
2865 spin_unlock(&req
->rq_lock
);
2870 req
->rq_status
= -EINTR
;
2871 ptlrpc_client_wake_req(req
);
2872 spin_unlock(&req
->rq_lock
);
2876 static __u64 ptlrpc_last_xid
;
2877 static spinlock_t ptlrpc_last_xid_lock
;
2880 * Initialize the XID for the node. This is common among all requests on
2881 * this node, and only requires the property that it is monotonically
2882 * increasing. It does not need to be sequential. Since this is also used
2883 * as the RDMA match bits, it is important that a single client NOT have
2884 * the same match bits for two different in-flight requests, hence we do
2885 * NOT want to have an XID per target or similar.
2887 * To avoid an unlikely collision between match bits after a client reboot
2888 * (which would deliver old data into the wrong RDMA buffer) initialize
2889 * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2890 * If the time is clearly incorrect, we instead use a 62-bit random number.
2891 * In the worst case the random number will overflow 1M RPCs per second in
2892 * 9133 years, or permutations thereof.
2894 #define YEAR_2004 (1ULL << 30)
2895 void ptlrpc_init_xid(void)
2897 time64_t now
= ktime_get_real_seconds();
2899 spin_lock_init(&ptlrpc_last_xid_lock
);
2900 if (now
< YEAR_2004
) {
2901 cfs_get_random_bytes(&ptlrpc_last_xid
, sizeof(ptlrpc_last_xid
));
2902 ptlrpc_last_xid
>>= 2;
2903 ptlrpc_last_xid
|= (1ULL << 61);
2905 ptlrpc_last_xid
= (__u64
)now
<< 20;
2908 /* Always need to be aligned to a power-of-two for multi-bulk BRW */
2909 CLASSERT(((PTLRPC_BULK_OPS_COUNT
- 1) & PTLRPC_BULK_OPS_COUNT
) == 0);
2910 ptlrpc_last_xid
&= PTLRPC_BULK_OPS_MASK
;
2914 * Increase xid and returns resulting new value to the caller.
2916 * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
2917 * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
2918 * itself uses the last bulk xid needed, so the server can determine the
2919 * the number of bulk transfers from the RPC XID and a bitmask. The starting
2920 * xid must align to a power-of-two value.
2922 * This is assumed to be true due to the initial ptlrpc_last_xid
2923 * value also being initialized to a power-of-two value. LU-1431
2925 __u64
ptlrpc_next_xid(void)
2929 spin_lock(&ptlrpc_last_xid_lock
);
2930 next
= ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2931 ptlrpc_last_xid
= next
;
2932 spin_unlock(&ptlrpc_last_xid_lock
);
2936 EXPORT_SYMBOL(ptlrpc_next_xid
);
2939 * Get a glimpse at what next xid value might have been.
2940 * Returns possible next xid.
2942 __u64
ptlrpc_sample_next_xid(void)
2944 #if BITS_PER_LONG == 32
2945 /* need to avoid possible word tearing on 32-bit systems */
2948 spin_lock(&ptlrpc_last_xid_lock
);
2949 next
= ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2950 spin_unlock(&ptlrpc_last_xid_lock
);
2954 /* No need to lock, since returned value is racy anyways */
2955 return ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2958 EXPORT_SYMBOL(ptlrpc_sample_next_xid
);
2961 * Functions for operating ptlrpc workers.
2963 * A ptlrpc work is a function which will be running inside ptlrpc context.
2964 * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
2966 * 1. after a work is created, it can be used many times, that is:
2967 * handler = ptlrpcd_alloc_work();
2968 * ptlrpcd_queue_work();
2970 * queue it again when necessary:
2971 * ptlrpcd_queue_work();
2972 * ptlrpcd_destroy_work();
2973 * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
2974 * it will only be queued once in any time. Also as its name implies, it may
2975 * have delay before it really runs by ptlrpcd thread.
2977 struct ptlrpc_work_async_args
{
2978 int (*cb
)(const struct lu_env
*, void *);
2982 static void ptlrpcd_add_work_req(struct ptlrpc_request
*req
)
2984 /* re-initialize the req */
2985 req
->rq_timeout
= obd_timeout
;
2986 req
->rq_sent
= ktime_get_real_seconds();
2987 req
->rq_deadline
= req
->rq_sent
+ req
->rq_timeout
;
2988 req
->rq_phase
= RQ_PHASE_INTERPRET
;
2989 req
->rq_next_phase
= RQ_PHASE_COMPLETE
;
2990 req
->rq_xid
= ptlrpc_next_xid();
2991 req
->rq_import_generation
= req
->rq_import
->imp_generation
;
2993 ptlrpcd_add_req(req
);
2996 static int work_interpreter(const struct lu_env
*env
,
2997 struct ptlrpc_request
*req
, void *data
, int rc
)
2999 struct ptlrpc_work_async_args
*arg
= data
;
3001 LASSERT(ptlrpcd_check_work(req
));
3003 rc
= arg
->cb(env
, arg
->cbdata
);
3005 list_del_init(&req
->rq_set_chain
);
3008 if (atomic_dec_return(&req
->rq_refcount
) > 1) {
3009 atomic_set(&req
->rq_refcount
, 2);
3010 ptlrpcd_add_work_req(req
);
3015 static int worker_format
;
3017 static int ptlrpcd_check_work(struct ptlrpc_request
*req
)
3019 return req
->rq_pill
.rc_fmt
== (void *)&worker_format
;
3023 * Create a work for ptlrpc.
3025 void *ptlrpcd_alloc_work(struct obd_import
*imp
,
3026 int (*cb
)(const struct lu_env
*, void *), void *cbdata
)
3028 struct ptlrpc_request
*req
= NULL
;
3029 struct ptlrpc_work_async_args
*args
;
3034 return ERR_PTR(-EINVAL
);
3036 /* copy some code from deprecated fakereq. */
3037 req
= ptlrpc_request_cache_alloc(GFP_NOFS
);
3039 CERROR("ptlrpc: run out of memory!\n");
3040 return ERR_PTR(-ENOMEM
);
3043 ptlrpc_cli_req_init(req
);
3045 req
->rq_send_state
= LUSTRE_IMP_FULL
;
3046 req
->rq_type
= PTL_RPC_MSG_REQUEST
;
3047 req
->rq_import
= class_import_get(imp
);
3048 req
->rq_interpret_reply
= work_interpreter
;
3049 /* don't want reply */
3050 req
->rq_no_delay
= 1;
3051 req
->rq_no_resend
= 1;
3052 req
->rq_pill
.rc_fmt
= (void *)&worker_format
;
3054 CLASSERT(sizeof(*args
) <= sizeof(req
->rq_async_args
));
3055 args
= ptlrpc_req_async_args(req
);
3057 args
->cbdata
= cbdata
;
3061 EXPORT_SYMBOL(ptlrpcd_alloc_work
);
3063 void ptlrpcd_destroy_work(void *handler
)
3065 struct ptlrpc_request
*req
= handler
;
3068 ptlrpc_req_finished(req
);
3070 EXPORT_SYMBOL(ptlrpcd_destroy_work
);
3072 int ptlrpcd_queue_work(void *handler
)
3074 struct ptlrpc_request
*req
= handler
;
3077 * Check if the req is already being queued.
3079 * Here comes a trick: it lacks a way of checking if a req is being
3080 * processed reliably in ptlrpc. Here I have to use refcount of req
3081 * for this purpose. This is okay because the caller should use this
3082 * req as opaque data. - Jinshan
3084 LASSERT(atomic_read(&req
->rq_refcount
) > 0);
3085 if (atomic_inc_return(&req
->rq_refcount
) == 2)
3086 ptlrpcd_add_work_req(req
);
3089 EXPORT_SYMBOL(ptlrpcd_queue_work
);