4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 /** Implementation of client-side PortalRPC interfaces */
39 #define DEBUG_SUBSYSTEM S_RPC
41 #include "../include/obd_support.h"
42 #include "../include/obd_class.h"
43 #include "../include/lustre_lib.h"
44 #include "../include/lustre_ha.h"
45 #include "../include/lustre_import.h"
46 #include "../include/lustre_req_layout.h"
48 #include "ptlrpc_internal.h"
50 static int ptlrpc_send_new_req(struct ptlrpc_request
*req
);
51 static int ptlrpcd_check_work(struct ptlrpc_request
*req
);
54 * Initialize passed in client structure \a cl.
56 void ptlrpc_init_client(int req_portal
, int rep_portal
, char *name
,
57 struct ptlrpc_client
*cl
)
59 cl
->cli_request_portal
= req_portal
;
60 cl
->cli_reply_portal
= rep_portal
;
63 EXPORT_SYMBOL(ptlrpc_init_client
);
66 * Return PortalRPC connection for remote uud \a uuid
68 struct ptlrpc_connection
*ptlrpc_uuid_to_connection(struct obd_uuid
*uuid
)
70 struct ptlrpc_connection
*c
;
72 lnet_process_id_t peer
;
76 * ptlrpc_uuid_to_peer() initializes its 2nd parameter
77 * before accessing its values.
78 * coverity[uninit_use_in_call]
80 err
= ptlrpc_uuid_to_peer(uuid
, &peer
, &self
);
82 CNETERR("cannot find peer %s!\n", uuid
->uuid
);
86 c
= ptlrpc_connection_get(peer
, self
, uuid
);
88 memcpy(c
->c_remote_uuid
.uuid
,
89 uuid
->uuid
, sizeof(c
->c_remote_uuid
.uuid
));
92 CDEBUG(D_INFO
, "%s -> %p\n", uuid
->uuid
, c
);
96 EXPORT_SYMBOL(ptlrpc_uuid_to_connection
);
99 * Allocate and initialize new bulk descriptor on the sender.
100 * Returns pointer to the descriptor or NULL on error.
102 struct ptlrpc_bulk_desc
*ptlrpc_new_bulk(unsigned npages
, unsigned max_brw
,
103 unsigned type
, unsigned portal
)
105 struct ptlrpc_bulk_desc
*desc
;
108 desc
= kzalloc(offsetof(struct ptlrpc_bulk_desc
, bd_iov
[npages
]),
113 spin_lock_init(&desc
->bd_lock
);
114 init_waitqueue_head(&desc
->bd_waitq
);
115 desc
->bd_max_iov
= npages
;
116 desc
->bd_iov_count
= 0;
117 desc
->bd_portal
= portal
;
118 desc
->bd_type
= type
;
119 desc
->bd_md_count
= 0;
120 LASSERT(max_brw
> 0);
121 desc
->bd_md_max_brw
= min(max_brw
, PTLRPC_BULK_OPS_COUNT
);
123 * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
124 * node. Negotiated ocd_brw_size will always be <= this number.
126 for (i
= 0; i
< PTLRPC_BULK_OPS_COUNT
; i
++)
127 LNetInvalidateHandle(&desc
->bd_mds
[i
]);
133 * Prepare bulk descriptor for specified outgoing request \a req that
134 * can fit \a npages * pages. \a type is bulk type. \a portal is where
135 * the bulk to be sent. Used on client-side.
136 * Returns pointer to newly allocated initialized bulk descriptor or NULL on
139 struct ptlrpc_bulk_desc
*ptlrpc_prep_bulk_imp(struct ptlrpc_request
*req
,
140 unsigned npages
, unsigned max_brw
,
141 unsigned type
, unsigned portal
)
143 struct obd_import
*imp
= req
->rq_import
;
144 struct ptlrpc_bulk_desc
*desc
;
146 LASSERT(type
== BULK_PUT_SINK
|| type
== BULK_GET_SOURCE
);
147 desc
= ptlrpc_new_bulk(npages
, max_brw
, type
, portal
);
151 desc
->bd_import_generation
= req
->rq_import_generation
;
152 desc
->bd_import
= class_import_get(imp
);
155 desc
->bd_cbid
.cbid_fn
= client_bulk_callback
;
156 desc
->bd_cbid
.cbid_arg
= desc
;
158 /* This makes req own desc, and free it when she frees herself */
163 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp
);
166 * Add a page \a page to the bulk descriptor \a desc.
167 * Data to transfer in the page starts at offset \a pageoffset and
168 * amount of data to transfer from the page is \a len
170 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc
*desc
,
171 struct page
*page
, int pageoffset
, int len
, int pin
)
173 LASSERT(desc
->bd_iov_count
< desc
->bd_max_iov
);
175 LASSERT(pageoffset
>= 0);
177 LASSERT(pageoffset
+ len
<= PAGE_CACHE_SIZE
);
182 page_cache_get(page
);
184 ptlrpc_add_bulk_page(desc
, page
, pageoffset
, len
);
186 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page
);
189 * Uninitialize and free bulk descriptor \a desc.
190 * Works on bulk descriptors both from server and client side.
192 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc
*desc
, int unpin
)
196 LASSERT(desc
->bd_iov_count
!= LI_POISON
); /* not freed already */
197 LASSERT(desc
->bd_md_count
== 0); /* network hands off */
198 LASSERT((desc
->bd_export
!= NULL
) ^ (desc
->bd_import
!= NULL
));
200 sptlrpc_enc_pool_put_pages(desc
);
203 class_export_put(desc
->bd_export
);
205 class_import_put(desc
->bd_import
);
208 for (i
= 0; i
< desc
->bd_iov_count
; i
++)
209 page_cache_release(desc
->bd_iov
[i
].kiov_page
);
214 EXPORT_SYMBOL(__ptlrpc_free_bulk
);
217 * Set server timelimit for this req, i.e. how long are we willing to wait
218 * for reply before timing out this request.
220 void ptlrpc_at_set_req_timeout(struct ptlrpc_request
*req
)
226 LASSERT(req
->rq_import
);
232 * \a imp_server_timeout means this is reverse import and
233 * we send (currently only) ASTs to the client and cannot afford
234 * to wait too long for the reply, otherwise the other client
235 * (because of which we are sending this request) would
236 * timeout waiting for us
238 req
->rq_timeout
= req
->rq_import
->imp_server_timeout
?
239 obd_timeout
/ 2 : obd_timeout
;
241 at
= &req
->rq_import
->imp_at
;
242 idx
= import_at_get_index(req
->rq_import
,
243 req
->rq_request_portal
);
244 serv_est
= at_get(&at
->iat_service_estimate
[idx
]);
245 req
->rq_timeout
= at_est2timeout(serv_est
);
248 * We could get even fancier here, using history to predict increased
253 * Let the server know what this RPC timeout is by putting it in the
256 lustre_msg_set_timeout(req
->rq_reqmsg
, req
->rq_timeout
);
258 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout
);
260 /* Adjust max service estimate based on server value */
261 static void ptlrpc_at_adj_service(struct ptlrpc_request
*req
,
262 unsigned int serv_est
)
268 LASSERT(req
->rq_import
);
269 at
= &req
->rq_import
->imp_at
;
271 idx
= import_at_get_index(req
->rq_import
, req
->rq_request_portal
);
273 * max service estimates are tracked on the server side,
274 * so just keep minimal history here
276 oldse
= at_measured(&at
->iat_service_estimate
[idx
], serv_est
);
278 CDEBUG(D_ADAPTTO
, "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
279 req
->rq_import
->imp_obd
->obd_name
, req
->rq_request_portal
,
280 oldse
, at_get(&at
->iat_service_estimate
[idx
]));
283 /* Expected network latency per remote node (secs) */
284 int ptlrpc_at_get_net_latency(struct ptlrpc_request
*req
)
286 return AT_OFF
? 0 : at_get(&req
->rq_import
->imp_at
.iat_net_latency
);
289 /* Adjust expected network latency */
290 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request
*req
,
291 unsigned int service_time
)
293 unsigned int nl
, oldnl
;
295 time64_t now
= ktime_get_real_seconds();
297 LASSERT(req
->rq_import
);
299 if (service_time
> now
- req
->rq_sent
+ 3) {
301 * bz16408, however, this can also happen if early reply
302 * is lost and client RPC is expired and resent, early reply
303 * or reply of original RPC can still be fit in reply buffer
304 * of resent RPC, now client is measuring time from the
305 * resent time, but server sent back service time of original
308 CDEBUG((lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_RESENT
) ?
309 D_ADAPTTO
: D_WARNING
,
310 "Reported service time %u > total measured time "
311 CFS_DURATION_T
"\n", service_time
,
312 (long)(now
- req
->rq_sent
));
316 /* Network latency is total time less server processing time */
317 nl
= max_t(int, now
- req
->rq_sent
-
318 service_time
, 0) + 1; /* st rounding */
319 at
= &req
->rq_import
->imp_at
;
321 oldnl
= at_measured(&at
->iat_net_latency
, nl
);
323 CDEBUG(D_ADAPTTO
, "The network latency for %s (nid %s) has changed from %d to %d\n",
324 req
->rq_import
->imp_obd
->obd_name
,
326 &req
->rq_import
->imp_connection
->c_remote_uuid
),
327 oldnl
, at_get(&at
->iat_net_latency
));
330 static int unpack_reply(struct ptlrpc_request
*req
)
334 if (SPTLRPC_FLVR_POLICY(req
->rq_flvr
.sf_rpc
) != SPTLRPC_POLICY_NULL
) {
335 rc
= ptlrpc_unpack_rep_msg(req
, req
->rq_replen
);
337 DEBUG_REQ(D_ERROR
, req
, "unpack_rep failed: %d", rc
);
342 rc
= lustre_unpack_rep_ptlrpc_body(req
, MSG_PTLRPC_BODY_OFF
);
344 DEBUG_REQ(D_ERROR
, req
, "unpack ptlrpc body failed: %d", rc
);
351 * Handle an early reply message, called with the rq_lock held.
352 * If anything goes wrong just ignore it - same as if it never happened
354 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request
*req
)
355 __must_hold(&req
->rq_lock
)
357 struct ptlrpc_request
*early_req
;
362 spin_unlock(&req
->rq_lock
);
364 rc
= sptlrpc_cli_unwrap_early_reply(req
, &early_req
);
366 spin_lock(&req
->rq_lock
);
370 rc
= unpack_reply(early_req
);
372 /* Expecting to increase the service time estimate here */
373 ptlrpc_at_adj_service(req
,
374 lustre_msg_get_timeout(early_req
->rq_repmsg
));
375 ptlrpc_at_adj_net_latency(req
,
376 lustre_msg_get_service_time(early_req
->rq_repmsg
));
379 sptlrpc_cli_finish_early_reply(early_req
);
382 spin_lock(&req
->rq_lock
);
386 /* Adjust the local timeout for this req */
387 ptlrpc_at_set_req_timeout(req
);
389 spin_lock(&req
->rq_lock
);
390 olddl
= req
->rq_deadline
;
392 * server assumes it now has rq_timeout from when it sent the
393 * early reply, so client should give it at least that long.
395 req
->rq_deadline
= ktime_get_real_seconds() + req
->rq_timeout
+
396 ptlrpc_at_get_net_latency(req
);
398 DEBUG_REQ(D_ADAPTTO
, req
,
399 "Early reply #%d, new deadline in %lds (%lds)",
401 (long)(req
->rq_deadline
- ktime_get_real_seconds()),
402 (long)(req
->rq_deadline
- olddl
));
407 static struct kmem_cache
*request_cache
;
409 int ptlrpc_request_cache_init(void)
411 request_cache
= kmem_cache_create("ptlrpc_cache",
412 sizeof(struct ptlrpc_request
),
413 0, SLAB_HWCACHE_ALIGN
, NULL
);
414 return !request_cache
? -ENOMEM
: 0;
417 void ptlrpc_request_cache_fini(void)
419 kmem_cache_destroy(request_cache
);
422 struct ptlrpc_request
*ptlrpc_request_cache_alloc(gfp_t flags
)
424 struct ptlrpc_request
*req
;
426 req
= kmem_cache_zalloc(request_cache
, flags
);
430 void ptlrpc_request_cache_free(struct ptlrpc_request
*req
)
432 kmem_cache_free(request_cache
, req
);
436 * Wind down request pool \a pool.
437 * Frees all requests from the pool too
439 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool
*pool
)
441 struct list_head
*l
, *tmp
;
442 struct ptlrpc_request
*req
;
444 spin_lock(&pool
->prp_lock
);
445 list_for_each_safe(l
, tmp
, &pool
->prp_req_list
) {
446 req
= list_entry(l
, struct ptlrpc_request
, rq_list
);
447 list_del(&req
->rq_list
);
448 LASSERT(req
->rq_reqbuf
);
449 LASSERT(req
->rq_reqbuf_len
== pool
->prp_rq_size
);
450 kvfree(req
->rq_reqbuf
);
451 ptlrpc_request_cache_free(req
);
453 spin_unlock(&pool
->prp_lock
);
456 EXPORT_SYMBOL(ptlrpc_free_rq_pool
);
459 * Allocates, initializes and adds \a num_rq requests to the pool \a pool
461 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool
*pool
, int num_rq
)
466 while (size
< pool
->prp_rq_size
)
469 LASSERTF(list_empty(&pool
->prp_req_list
) ||
470 size
== pool
->prp_rq_size
,
471 "Trying to change pool size with nonempty pool from %d to %d bytes\n",
472 pool
->prp_rq_size
, size
);
474 spin_lock(&pool
->prp_lock
);
475 pool
->prp_rq_size
= size
;
476 for (i
= 0; i
< num_rq
; i
++) {
477 struct ptlrpc_request
*req
;
478 struct lustre_msg
*msg
;
480 spin_unlock(&pool
->prp_lock
);
481 req
= ptlrpc_request_cache_alloc(GFP_NOFS
);
484 msg
= libcfs_kvzalloc(size
, GFP_NOFS
);
486 ptlrpc_request_cache_free(req
);
489 req
->rq_reqbuf
= msg
;
490 req
->rq_reqbuf_len
= size
;
492 spin_lock(&pool
->prp_lock
);
493 list_add_tail(&req
->rq_list
, &pool
->prp_req_list
);
495 spin_unlock(&pool
->prp_lock
);
498 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool
);
501 * Create and initialize new request pool with given attributes:
502 * \a num_rq - initial number of requests to create for the pool
503 * \a msgsize - maximum message size possible for requests in thid pool
504 * \a populate_pool - function to be called when more requests need to be added
506 * Returns pointer to newly created pool or NULL on error.
508 struct ptlrpc_request_pool
*
509 ptlrpc_init_rq_pool(int num_rq
, int msgsize
,
510 int (*populate_pool
)(struct ptlrpc_request_pool
*, int))
512 struct ptlrpc_request_pool
*pool
;
514 pool
= kzalloc(sizeof(struct ptlrpc_request_pool
), GFP_NOFS
);
519 * Request next power of two for the allocation, because internally
520 * kernel would do exactly this
523 spin_lock_init(&pool
->prp_lock
);
524 INIT_LIST_HEAD(&pool
->prp_req_list
);
525 pool
->prp_rq_size
= msgsize
+ SPTLRPC_MAX_PAYLOAD
;
526 pool
->prp_populate
= populate_pool
;
528 populate_pool(pool
, num_rq
);
532 EXPORT_SYMBOL(ptlrpc_init_rq_pool
);
535 * Fetches one request from pool \a pool
537 static struct ptlrpc_request
*
538 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool
*pool
)
540 struct ptlrpc_request
*request
;
541 struct lustre_msg
*reqbuf
;
546 spin_lock(&pool
->prp_lock
);
549 * See if we have anything in a pool, and bail out if nothing,
550 * in writeout path, where this matters, this is safe to do, because
551 * nothing is lost in this case, and when some in-flight requests
552 * complete, this code will be called again.
554 if (unlikely(list_empty(&pool
->prp_req_list
))) {
555 spin_unlock(&pool
->prp_lock
);
559 request
= list_entry(pool
->prp_req_list
.next
, struct ptlrpc_request
,
561 list_del_init(&request
->rq_list
);
562 spin_unlock(&pool
->prp_lock
);
564 LASSERT(request
->rq_reqbuf
);
565 LASSERT(request
->rq_pool
);
567 reqbuf
= request
->rq_reqbuf
;
568 memset(request
, 0, sizeof(*request
));
569 request
->rq_reqbuf
= reqbuf
;
570 request
->rq_reqbuf_len
= pool
->prp_rq_size
;
571 request
->rq_pool
= pool
;
577 * Returns freed \a request to pool.
579 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request
*request
)
581 struct ptlrpc_request_pool
*pool
= request
->rq_pool
;
583 spin_lock(&pool
->prp_lock
);
584 LASSERT(list_empty(&request
->rq_list
));
585 LASSERT(!request
->rq_receiving_reply
);
586 list_add_tail(&request
->rq_list
, &pool
->prp_req_list
);
587 spin_unlock(&pool
->prp_lock
);
590 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request
*request
,
591 __u32 version
, int opcode
,
592 int count
, __u32
*lengths
, char **bufs
,
593 struct ptlrpc_cli_ctx
*ctx
)
595 struct obd_import
*imp
= request
->rq_import
;
599 request
->rq_cli_ctx
= sptlrpc_cli_ctx_get(ctx
);
601 rc
= sptlrpc_req_get_ctx(request
);
606 sptlrpc_req_set_flavor(request
, opcode
);
608 rc
= lustre_pack_request(request
, imp
->imp_msg_magic
, count
,
611 LASSERT(!request
->rq_pool
);
615 lustre_msg_add_version(request
->rq_reqmsg
, version
);
616 request
->rq_send_state
= LUSTRE_IMP_FULL
;
617 request
->rq_type
= PTL_RPC_MSG_REQUEST
;
618 request
->rq_export
= NULL
;
620 request
->rq_req_cbid
.cbid_fn
= request_out_callback
;
621 request
->rq_req_cbid
.cbid_arg
= request
;
623 request
->rq_reply_cbid
.cbid_fn
= reply_in_callback
;
624 request
->rq_reply_cbid
.cbid_arg
= request
;
626 request
->rq_reply_deadline
= 0;
627 request
->rq_phase
= RQ_PHASE_NEW
;
628 request
->rq_next_phase
= RQ_PHASE_UNDEFINED
;
630 request
->rq_request_portal
= imp
->imp_client
->cli_request_portal
;
631 request
->rq_reply_portal
= imp
->imp_client
->cli_reply_portal
;
633 ptlrpc_at_set_req_timeout(request
);
635 spin_lock_init(&request
->rq_lock
);
636 INIT_LIST_HEAD(&request
->rq_list
);
637 INIT_LIST_HEAD(&request
->rq_timed_list
);
638 INIT_LIST_HEAD(&request
->rq_replay_list
);
639 INIT_LIST_HEAD(&request
->rq_ctx_chain
);
640 INIT_LIST_HEAD(&request
->rq_set_chain
);
641 INIT_LIST_HEAD(&request
->rq_history_list
);
642 INIT_LIST_HEAD(&request
->rq_exp_list
);
643 init_waitqueue_head(&request
->rq_reply_waitq
);
644 init_waitqueue_head(&request
->rq_set_waitq
);
645 request
->rq_xid
= ptlrpc_next_xid();
646 atomic_set(&request
->rq_refcount
, 1);
648 lustre_msg_set_opc(request
->rq_reqmsg
, opcode
);
652 sptlrpc_cli_ctx_put(request
->rq_cli_ctx
, 1);
654 class_import_put(imp
);
658 int ptlrpc_request_bufs_pack(struct ptlrpc_request
*request
,
659 __u32 version
, int opcode
, char **bufs
,
660 struct ptlrpc_cli_ctx
*ctx
)
664 count
= req_capsule_filled_sizes(&request
->rq_pill
, RCL_CLIENT
);
665 return __ptlrpc_request_bufs_pack(request
, version
, opcode
, count
,
666 request
->rq_pill
.rc_area
[RCL_CLIENT
],
669 EXPORT_SYMBOL(ptlrpc_request_bufs_pack
);
672 * Pack request buffers for network transfer, performing necessary encryption
673 * steps if necessary.
675 int ptlrpc_request_pack(struct ptlrpc_request
*request
,
676 __u32 version
, int opcode
)
680 rc
= ptlrpc_request_bufs_pack(request
, version
, opcode
, NULL
, NULL
);
685 * For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
686 * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
687 * have to send old ptlrpc_body to keep interoperability with these
690 * Only three kinds of server->client RPCs so far:
695 * XXX This should be removed whenever we drop the interoperability with
696 * the these old clients.
698 if (opcode
== LDLM_BL_CALLBACK
|| opcode
== LDLM_CP_CALLBACK
||
699 opcode
== LDLM_GL_CALLBACK
)
700 req_capsule_shrink(&request
->rq_pill
, &RMF_PTLRPC_BODY
,
701 sizeof(struct ptlrpc_body_v2
), RCL_CLIENT
);
705 EXPORT_SYMBOL(ptlrpc_request_pack
);
708 * Helper function to allocate new request on import \a imp
709 * and possibly using existing request from pool \a pool if provided.
710 * Returns allocated request structure with import field filled or
714 struct ptlrpc_request
*__ptlrpc_request_alloc(struct obd_import
*imp
,
715 struct ptlrpc_request_pool
*pool
)
717 struct ptlrpc_request
*request
;
719 request
= ptlrpc_request_cache_alloc(GFP_NOFS
);
721 if (!request
&& pool
)
722 request
= ptlrpc_prep_req_from_pool(pool
);
725 LASSERTF((unsigned long)imp
> 0x1000, "%p\n", imp
);
726 LASSERT(imp
!= LP_POISON
);
727 LASSERTF((unsigned long)imp
->imp_client
> 0x1000, "%p\n",
729 LASSERT(imp
->imp_client
!= LP_POISON
);
731 request
->rq_import
= class_import_get(imp
);
733 CERROR("request allocation out of memory\n");
740 * Helper function for creating a request.
741 * Calls __ptlrpc_request_alloc to allocate new request structure and inits
742 * buffer structures according to capsule template \a format.
743 * Returns allocated request structure pointer or NULL on error.
745 static struct ptlrpc_request
*
746 ptlrpc_request_alloc_internal(struct obd_import
*imp
,
747 struct ptlrpc_request_pool
*pool
,
748 const struct req_format
*format
)
750 struct ptlrpc_request
*request
;
752 request
= __ptlrpc_request_alloc(imp
, pool
);
756 req_capsule_init(&request
->rq_pill
, request
, RCL_CLIENT
);
757 req_capsule_set(&request
->rq_pill
, format
);
762 * Allocate new request structure for import \a imp and initialize its
763 * buffer structure according to capsule template \a format.
765 struct ptlrpc_request
*ptlrpc_request_alloc(struct obd_import
*imp
,
766 const struct req_format
*format
)
768 return ptlrpc_request_alloc_internal(imp
, NULL
, format
);
770 EXPORT_SYMBOL(ptlrpc_request_alloc
);
773 * Allocate new request structure for import \a imp from pool \a pool and
774 * initialize its buffer structure according to capsule template \a format.
776 struct ptlrpc_request
*ptlrpc_request_alloc_pool(struct obd_import
*imp
,
777 struct ptlrpc_request_pool
*pool
,
778 const struct req_format
*format
)
780 return ptlrpc_request_alloc_internal(imp
, pool
, format
);
782 EXPORT_SYMBOL(ptlrpc_request_alloc_pool
);
785 * For requests not from pool, free memory of the request structure.
786 * For requests obtained from a pool earlier, return request back to pool.
788 void ptlrpc_request_free(struct ptlrpc_request
*request
)
790 if (request
->rq_pool
)
791 __ptlrpc_free_req_to_pool(request
);
793 ptlrpc_request_cache_free(request
);
795 EXPORT_SYMBOL(ptlrpc_request_free
);
798 * Allocate new request for operation \a opcode and immediately pack it for
800 * Only used for simple requests like OBD_PING where the only important
801 * part of the request is operation itself.
802 * Returns allocated request or NULL on error.
804 struct ptlrpc_request
*ptlrpc_request_alloc_pack(struct obd_import
*imp
,
805 const struct req_format
*format
,
806 __u32 version
, int opcode
)
808 struct ptlrpc_request
*req
= ptlrpc_request_alloc(imp
, format
);
812 rc
= ptlrpc_request_pack(req
, version
, opcode
);
814 ptlrpc_request_free(req
);
820 EXPORT_SYMBOL(ptlrpc_request_alloc_pack
);
823 * Allocate and initialize new request set structure on the current CPT.
824 * Returns a pointer to the newly allocated set structure or NULL on error.
826 struct ptlrpc_request_set
*ptlrpc_prep_set(void)
828 struct ptlrpc_request_set
*set
;
831 cpt
= cfs_cpt_current(cfs_cpt_table
, 0);
832 set
= kzalloc_node(sizeof(*set
), GFP_NOFS
,
833 cfs_cpt_spread_node(cfs_cpt_table
, cpt
));
836 atomic_set(&set
->set_refcount
, 1);
837 INIT_LIST_HEAD(&set
->set_requests
);
838 init_waitqueue_head(&set
->set_waitq
);
839 atomic_set(&set
->set_new_count
, 0);
840 atomic_set(&set
->set_remaining
, 0);
841 spin_lock_init(&set
->set_new_req_lock
);
842 INIT_LIST_HEAD(&set
->set_new_requests
);
843 INIT_LIST_HEAD(&set
->set_cblist
);
844 set
->set_max_inflight
= UINT_MAX
;
845 set
->set_producer
= NULL
;
846 set
->set_producer_arg
= NULL
;
851 EXPORT_SYMBOL(ptlrpc_prep_set
);
854 * Allocate and initialize new request set structure with flow control
855 * extension. This extension allows to control the number of requests in-flight
856 * for the whole set. A callback function to generate requests must be provided
857 * and the request set will keep the number of requests sent over the wire to
859 * Returns a pointer to the newly allocated set structure or NULL on error.
861 struct ptlrpc_request_set
*ptlrpc_prep_fcset(int max
, set_producer_func func
,
865 struct ptlrpc_request_set
*set
;
867 set
= ptlrpc_prep_set();
871 set
->set_max_inflight
= max
;
872 set
->set_producer
= func
;
873 set
->set_producer_arg
= arg
;
877 EXPORT_SYMBOL(ptlrpc_prep_fcset
);
880 * Wind down and free request set structure previously allocated with
882 * Ensures that all requests on the set have completed and removes
883 * all requests from the request list in a set.
884 * If any unsent request happen to be on the list, pretends that they got
885 * an error in flight and calls their completion handler.
887 void ptlrpc_set_destroy(struct ptlrpc_request_set
*set
)
889 struct list_head
*tmp
;
890 struct list_head
*next
;
894 /* Requests on the set should either all be completed, or all be new */
895 expected_phase
= (atomic_read(&set
->set_remaining
) == 0) ?
896 RQ_PHASE_COMPLETE
: RQ_PHASE_NEW
;
897 list_for_each(tmp
, &set
->set_requests
) {
898 struct ptlrpc_request
*req
=
899 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
901 LASSERT(req
->rq_phase
== expected_phase
);
905 LASSERTF(atomic_read(&set
->set_remaining
) == 0 ||
906 atomic_read(&set
->set_remaining
) == n
, "%d / %d\n",
907 atomic_read(&set
->set_remaining
), n
);
909 list_for_each_safe(tmp
, next
, &set
->set_requests
) {
910 struct ptlrpc_request
*req
=
911 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
912 list_del_init(&req
->rq_set_chain
);
914 LASSERT(req
->rq_phase
== expected_phase
);
916 if (req
->rq_phase
== RQ_PHASE_NEW
) {
917 ptlrpc_req_interpret(NULL
, req
, -EBADR
);
918 atomic_dec(&set
->set_remaining
);
921 spin_lock(&req
->rq_lock
);
923 req
->rq_invalid_rqset
= 0;
924 spin_unlock(&req
->rq_lock
);
926 ptlrpc_req_finished(req
);
929 LASSERT(atomic_read(&set
->set_remaining
) == 0);
931 ptlrpc_reqset_put(set
);
933 EXPORT_SYMBOL(ptlrpc_set_destroy
);
936 * Add a new request to the general purpose request set.
937 * Assumes request reference from the caller.
939 void ptlrpc_set_add_req(struct ptlrpc_request_set
*set
,
940 struct ptlrpc_request
*req
)
942 LASSERT(list_empty(&req
->rq_set_chain
));
944 /* The set takes over the caller's request reference */
945 list_add_tail(&req
->rq_set_chain
, &set
->set_requests
);
947 atomic_inc(&set
->set_remaining
);
948 req
->rq_queued_time
= cfs_time_current();
951 lustre_msg_set_jobid(req
->rq_reqmsg
, NULL
);
953 if (set
->set_producer
)
955 * If the request set has a producer callback, the RPC must be
958 ptlrpc_send_new_req(req
);
960 EXPORT_SYMBOL(ptlrpc_set_add_req
);
963 * Add a request to a request with dedicated server thread
964 * and wake the thread to make any necessary processing.
965 * Currently only used for ptlrpcd.
967 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl
*pc
,
968 struct ptlrpc_request
*req
)
970 struct ptlrpc_request_set
*set
= pc
->pc_set
;
973 LASSERT(!req
->rq_set
);
974 LASSERT(test_bit(LIOD_STOP
, &pc
->pc_flags
) == 0);
976 spin_lock(&set
->set_new_req_lock
);
977 /* The set takes over the caller's request reference. */
979 req
->rq_queued_time
= cfs_time_current();
980 list_add_tail(&req
->rq_set_chain
, &set
->set_new_requests
);
981 count
= atomic_inc_return(&set
->set_new_count
);
982 spin_unlock(&set
->set_new_req_lock
);
984 /* Only need to call wakeup once for the first entry. */
986 wake_up(&set
->set_waitq
);
989 * XXX: It maybe unnecessary to wakeup all the partners. But to
990 * guarantee the async RPC can be processed ASAP, we have
991 * no other better choice. It maybe fixed in future.
993 for (i
= 0; i
< pc
->pc_npartners
; i
++)
994 wake_up(&pc
->pc_partners
[i
]->pc_set
->set_waitq
);
997 EXPORT_SYMBOL(ptlrpc_set_add_new_req
);
1000 * Based on the current state of the import, determine if the request
1001 * can be sent, is an error, or should be delayed.
1003 * Returns true if this request should be delayed. If false, and
1004 * *status is set, then the request can not be sent and *status is the
1005 * error code. If false and status is 0, then request can be sent.
1007 * The imp->imp_lock must be held.
1009 static int ptlrpc_import_delay_req(struct obd_import
*imp
,
1010 struct ptlrpc_request
*req
, int *status
)
1016 if (req
->rq_ctx_init
|| req
->rq_ctx_fini
) {
1017 /* always allow ctx init/fini rpc go through */
1018 } else if (imp
->imp_state
== LUSTRE_IMP_NEW
) {
1019 DEBUG_REQ(D_ERROR
, req
, "Uninitialized import.");
1021 } else if (imp
->imp_state
== LUSTRE_IMP_CLOSED
) {
1022 /* pings may safely race with umount */
1023 DEBUG_REQ(lustre_msg_get_opc(req
->rq_reqmsg
) == OBD_PING
?
1024 D_HA
: D_ERROR
, req
, "IMP_CLOSED ");
1026 } else if (ptlrpc_send_limit_expired(req
)) {
1027 /* probably doesn't need to be a D_ERROR after initial testing */
1028 DEBUG_REQ(D_ERROR
, req
, "send limit expired ");
1030 } else if (req
->rq_send_state
== LUSTRE_IMP_CONNECTING
&&
1031 imp
->imp_state
== LUSTRE_IMP_CONNECTING
) {
1032 /* allow CONNECT even if import is invalid */
1033 if (atomic_read(&imp
->imp_inval_count
) != 0) {
1034 DEBUG_REQ(D_ERROR
, req
, "invalidate in flight");
1037 } else if (imp
->imp_invalid
|| imp
->imp_obd
->obd_no_recov
) {
1038 if (!imp
->imp_deactive
)
1039 DEBUG_REQ(D_NET
, req
, "IMP_INVALID");
1040 *status
= -ESHUTDOWN
; /* bz 12940 */
1041 } else if (req
->rq_import_generation
!= imp
->imp_generation
) {
1042 DEBUG_REQ(D_ERROR
, req
, "req wrong generation:");
1044 } else if (req
->rq_send_state
!= imp
->imp_state
) {
1045 /* invalidate in progress - any requests should be drop */
1046 if (atomic_read(&imp
->imp_inval_count
) != 0) {
1047 DEBUG_REQ(D_ERROR
, req
, "invalidate in flight");
1049 } else if (imp
->imp_dlm_fake
|| req
->rq_no_delay
) {
1050 *status
= -EWOULDBLOCK
;
1051 } else if (req
->rq_allow_replay
&&
1052 (imp
->imp_state
== LUSTRE_IMP_REPLAY
||
1053 imp
->imp_state
== LUSTRE_IMP_REPLAY_LOCKS
||
1054 imp
->imp_state
== LUSTRE_IMP_REPLAY_WAIT
||
1055 imp
->imp_state
== LUSTRE_IMP_RECOVER
)) {
1056 DEBUG_REQ(D_HA
, req
, "allow during recovery.\n");
1066 * Decide if the error message regarding provided request \a req
1067 * should be printed to the console or not.
1068 * Makes it's decision on request status and other properties.
1069 * Returns 1 to print error on the system console or 0 if not.
1071 static int ptlrpc_console_allow(struct ptlrpc_request
*req
)
1076 LASSERT(req
->rq_reqmsg
);
1077 opc
= lustre_msg_get_opc(req
->rq_reqmsg
);
1080 * Suppress particular reconnect errors which are to be expected. No
1081 * errors are suppressed for the initial connection on an import
1083 if ((lustre_handle_is_used(&req
->rq_import
->imp_remote_handle
)) &&
1084 (opc
== OST_CONNECT
|| opc
== MDS_CONNECT
|| opc
== MGS_CONNECT
)) {
1086 /* Suppress timed out reconnect requests */
1087 if (req
->rq_timedout
)
1090 /* Suppress unavailable/again reconnect requests */
1091 err
= lustre_msg_get_status(req
->rq_repmsg
);
1092 if (err
== -ENODEV
|| err
== -EAGAIN
)
1100 * Check request processing status.
1101 * Returns the status.
1103 static int ptlrpc_check_status(struct ptlrpc_request
*req
)
1107 err
= lustre_msg_get_status(req
->rq_repmsg
);
1108 if (lustre_msg_get_type(req
->rq_repmsg
) == PTL_RPC_MSG_ERR
) {
1109 struct obd_import
*imp
= req
->rq_import
;
1110 __u32 opc
= lustre_msg_get_opc(req
->rq_reqmsg
);
1112 if (ptlrpc_console_allow(req
))
1113 LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s, operation %s failed with %d.\n",
1114 imp
->imp_obd
->obd_name
,
1116 imp
->imp_connection
->c_peer
.nid
),
1117 ll_opcode2str(opc
), err
);
1118 return err
< 0 ? err
: -EINVAL
;
1122 DEBUG_REQ(D_INFO
, req
, "status is %d", err
);
1124 /* XXX: translate this error from net to host */
1125 DEBUG_REQ(D_INFO
, req
, "status is %d", err
);
1131 * save pre-versions of objects into request for replay.
1132 * Versions are obtained from server reply.
1135 static void ptlrpc_save_versions(struct ptlrpc_request
*req
)
1137 struct lustre_msg
*repmsg
= req
->rq_repmsg
;
1138 struct lustre_msg
*reqmsg
= req
->rq_reqmsg
;
1139 __u64
*versions
= lustre_msg_get_versions(repmsg
);
1141 if (lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_REPLAY
)
1145 lustre_msg_set_versions(reqmsg
, versions
);
1146 CDEBUG(D_INFO
, "Client save versions [%#llx/%#llx]\n",
1147 versions
[0], versions
[1]);
1151 * Callback function called when client receives RPC reply for \a req.
1152 * Returns 0 on success or error code.
1153 * The return value would be assigned to req->rq_status by the caller
1154 * as request processing status.
1155 * This function also decides if the request needs to be saved for later replay.
1157 static int after_reply(struct ptlrpc_request
*req
)
1159 struct obd_import
*imp
= req
->rq_import
;
1160 struct obd_device
*obd
= req
->rq_import
->imp_obd
;
1162 struct timespec64 work_start
;
1166 /* repbuf must be unlinked */
1167 LASSERT(!req
->rq_receiving_reply
&& !req
->rq_reply_unlink
);
1169 if (req
->rq_reply_truncate
) {
1170 if (ptlrpc_no_resend(req
)) {
1171 DEBUG_REQ(D_ERROR
, req
, "reply buffer overflow, expected: %d, actual size: %d",
1172 req
->rq_nob_received
, req
->rq_repbuf_len
);
1176 sptlrpc_cli_free_repbuf(req
);
1178 * Pass the required reply buffer size (include space for early
1179 * reply). NB: no need to round up because alloc_repbuf will
1182 req
->rq_replen
= req
->rq_nob_received
;
1183 req
->rq_nob_received
= 0;
1184 spin_lock(&req
->rq_lock
);
1186 spin_unlock(&req
->rq_lock
);
1191 * NB Until this point, the whole of the incoming message,
1192 * including buflens, status etc is in the sender's byte order.
1194 rc
= sptlrpc_cli_unwrap_reply(req
);
1196 DEBUG_REQ(D_ERROR
, req
, "unwrap reply failed (%d):", rc
);
1200 /* Security layer unwrap might ask resend this request. */
1204 rc
= unpack_reply(req
);
1208 /* retry indefinitely on EINPROGRESS */
1209 if (lustre_msg_get_status(req
->rq_repmsg
) == -EINPROGRESS
&&
1210 ptlrpc_no_resend(req
) == 0 && !req
->rq_no_retry_einprogress
) {
1211 time64_t now
= ktime_get_real_seconds();
1213 DEBUG_REQ(D_RPCTRACE
, req
, "Resending request on EINPROGRESS");
1214 spin_lock(&req
->rq_lock
);
1216 spin_unlock(&req
->rq_lock
);
1217 req
->rq_nr_resend
++;
1219 /* allocate new xid to avoid reply reconstruction */
1220 if (!req
->rq_bulk
) {
1221 /* new xid is already allocated for bulk in ptlrpc_check_set() */
1222 req
->rq_xid
= ptlrpc_next_xid();
1223 DEBUG_REQ(D_RPCTRACE
, req
, "Allocating new xid for resend on EINPROGRESS");
1226 /* Readjust the timeout for current conditions */
1227 ptlrpc_at_set_req_timeout(req
);
1229 * delay resend to give a chance to the server to get ready.
1230 * The delay is increased by 1s on every resend and is capped to
1231 * the current request timeout (i.e. obd_timeout if AT is off,
1232 * or AT service time x 125% + 5s, see at_est2timeout)
1234 if (req
->rq_nr_resend
> req
->rq_timeout
)
1235 req
->rq_sent
= now
+ req
->rq_timeout
;
1237 req
->rq_sent
= now
+ req
->rq_nr_resend
;
1242 ktime_get_real_ts64(&work_start
);
1243 timediff
= (work_start
.tv_sec
- req
->rq_arrival_time
.tv_sec
) * USEC_PER_SEC
+
1244 (work_start
.tv_nsec
- req
->rq_arrival_time
.tv_nsec
) / NSEC_PER_USEC
;
1245 if (obd
->obd_svc_stats
) {
1246 lprocfs_counter_add(obd
->obd_svc_stats
, PTLRPC_REQWAIT_CNTR
,
1248 ptlrpc_lprocfs_rpc_sent(req
, timediff
);
1251 if (lustre_msg_get_type(req
->rq_repmsg
) != PTL_RPC_MSG_REPLY
&&
1252 lustre_msg_get_type(req
->rq_repmsg
) != PTL_RPC_MSG_ERR
) {
1253 DEBUG_REQ(D_ERROR
, req
, "invalid packet received (type=%u)",
1254 lustre_msg_get_type(req
->rq_repmsg
));
1258 if (lustre_msg_get_opc(req
->rq_reqmsg
) != OBD_PING
)
1259 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP
, cfs_fail_val
);
1260 ptlrpc_at_adj_service(req
, lustre_msg_get_timeout(req
->rq_repmsg
));
1261 ptlrpc_at_adj_net_latency(req
,
1262 lustre_msg_get_service_time(req
->rq_repmsg
));
1264 rc
= ptlrpc_check_status(req
);
1265 imp
->imp_connect_error
= rc
;
1269 * Either we've been evicted, or the server has failed for
1270 * some reason. Try to reconnect, and if that fails, punt to
1273 if (ll_rpc_recoverable_error(rc
)) {
1274 if (req
->rq_send_state
!= LUSTRE_IMP_FULL
||
1275 imp
->imp_obd
->obd_no_recov
|| imp
->imp_dlm_fake
) {
1278 ptlrpc_request_handle_notconn(req
);
1283 * Let's look if server sent slv. Do it only for RPC with
1286 ldlm_cli_update_pool(req
);
1289 /* Store transno in reqmsg for replay. */
1290 if (!(lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_REPLAY
)) {
1291 req
->rq_transno
= lustre_msg_get_transno(req
->rq_repmsg
);
1292 lustre_msg_set_transno(req
->rq_reqmsg
, req
->rq_transno
);
1295 if (imp
->imp_replayable
) {
1296 spin_lock(&imp
->imp_lock
);
1298 * No point in adding already-committed requests to the replay
1299 * list, we will just remove them immediately. b=9829
1301 if (req
->rq_transno
!= 0 &&
1303 lustre_msg_get_last_committed(req
->rq_repmsg
) ||
1305 /* version recovery */
1306 ptlrpc_save_versions(req
);
1307 ptlrpc_retain_replayable_request(req
, imp
);
1308 } else if (req
->rq_commit_cb
&&
1309 list_empty(&req
->rq_replay_list
)) {
1311 * NB: don't call rq_commit_cb if it's already on
1312 * rq_replay_list, ptlrpc_free_committed() will call
1313 * it later, see LU-3618 for details
1315 spin_unlock(&imp
->imp_lock
);
1316 req
->rq_commit_cb(req
);
1317 spin_lock(&imp
->imp_lock
);
1320 /* Replay-enabled imports return commit-status information. */
1321 if (lustre_msg_get_last_committed(req
->rq_repmsg
)) {
1322 imp
->imp_peer_committed_transno
=
1323 lustre_msg_get_last_committed(req
->rq_repmsg
);
1326 ptlrpc_free_committed(imp
);
1328 if (!list_empty(&imp
->imp_replay_list
)) {
1329 struct ptlrpc_request
*last
;
1331 last
= list_entry(imp
->imp_replay_list
.prev
,
1332 struct ptlrpc_request
,
1335 * Requests with rq_replay stay on the list even if no
1336 * commit is expected.
1338 if (last
->rq_transno
> imp
->imp_peer_committed_transno
)
1339 ptlrpc_pinger_commit_expected(imp
);
1342 spin_unlock(&imp
->imp_lock
);
1349 * Helper function to send request \a req over the network for the first time
1350 * Also adjusts request phase.
1351 * Returns 0 on success or error code.
1353 static int ptlrpc_send_new_req(struct ptlrpc_request
*req
)
1355 struct obd_import
*imp
= req
->rq_import
;
1358 LASSERT(req
->rq_phase
== RQ_PHASE_NEW
);
1359 if (req
->rq_sent
&& (req
->rq_sent
> ktime_get_real_seconds()) &&
1360 (!req
->rq_generation_set
||
1361 req
->rq_import_generation
== imp
->imp_generation
))
1364 ptlrpc_rqphase_move(req
, RQ_PHASE_RPC
);
1366 spin_lock(&imp
->imp_lock
);
1368 if (!req
->rq_generation_set
)
1369 req
->rq_import_generation
= imp
->imp_generation
;
1371 if (ptlrpc_import_delay_req(imp
, req
, &rc
)) {
1372 spin_lock(&req
->rq_lock
);
1373 req
->rq_waiting
= 1;
1374 spin_unlock(&req
->rq_lock
);
1376 DEBUG_REQ(D_HA
, req
, "req from PID %d waiting for recovery: (%s != %s)",
1377 lustre_msg_get_status(req
->rq_reqmsg
),
1378 ptlrpc_import_state_name(req
->rq_send_state
),
1379 ptlrpc_import_state_name(imp
->imp_state
));
1380 LASSERT(list_empty(&req
->rq_list
));
1381 list_add_tail(&req
->rq_list
, &imp
->imp_delayed_list
);
1382 atomic_inc(&req
->rq_import
->imp_inflight
);
1383 spin_unlock(&imp
->imp_lock
);
1388 spin_unlock(&imp
->imp_lock
);
1389 req
->rq_status
= rc
;
1390 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1394 LASSERT(list_empty(&req
->rq_list
));
1395 list_add_tail(&req
->rq_list
, &imp
->imp_sending_list
);
1396 atomic_inc(&req
->rq_import
->imp_inflight
);
1397 spin_unlock(&imp
->imp_lock
);
1399 lustre_msg_set_status(req
->rq_reqmsg
, current_pid());
1401 rc
= sptlrpc_req_refresh_ctx(req
, -1);
1404 req
->rq_status
= rc
;
1407 spin_lock(&req
->rq_lock
);
1408 req
->rq_wait_ctx
= 1;
1409 spin_unlock(&req
->rq_lock
);
1413 CDEBUG(D_RPCTRACE
, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
1415 imp
->imp_obd
->obd_uuid
.uuid
,
1416 lustre_msg_get_status(req
->rq_reqmsg
), req
->rq_xid
,
1417 libcfs_nid2str(imp
->imp_connection
->c_peer
.nid
),
1418 lustre_msg_get_opc(req
->rq_reqmsg
));
1420 rc
= ptl_send_rpc(req
, 0);
1422 DEBUG_REQ(D_HA
, req
, "send failed (%d); expect timeout", rc
);
1423 spin_lock(&req
->rq_lock
);
1424 req
->rq_net_err
= 1;
1425 spin_unlock(&req
->rq_lock
);
1431 static inline int ptlrpc_set_producer(struct ptlrpc_request_set
*set
)
1435 LASSERT(set
->set_producer
);
1437 remaining
= atomic_read(&set
->set_remaining
);
1440 * populate the ->set_requests list with requests until we
1441 * reach the maximum number of RPCs in flight for this set
1443 while (atomic_read(&set
->set_remaining
) < set
->set_max_inflight
) {
1444 rc
= set
->set_producer(set
, set
->set_producer_arg
);
1445 if (rc
== -ENOENT
) {
1446 /* no more RPC to produce */
1447 set
->set_producer
= NULL
;
1448 set
->set_producer_arg
= NULL
;
1453 return (atomic_read(&set
->set_remaining
) - remaining
);
1457 * this sends any unsent RPCs in \a set and returns 1 if all are sent
1458 * and no more replies are expected.
1459 * (it is possible to get less replies than requests sent e.g. due to timed out
1460 * requests or requests that we had trouble to send out)
1462 * NOTE: This function contains a potential schedule point (cond_resched()).
1464 int ptlrpc_check_set(const struct lu_env
*env
, struct ptlrpc_request_set
*set
)
1466 struct list_head
*tmp
, *next
;
1467 struct list_head comp_reqs
;
1468 int force_timer_recalc
= 0;
1470 if (atomic_read(&set
->set_remaining
) == 0)
1473 INIT_LIST_HEAD(&comp_reqs
);
1474 list_for_each_safe(tmp
, next
, &set
->set_requests
) {
1475 struct ptlrpc_request
*req
=
1476 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
1477 struct obd_import
*imp
= req
->rq_import
;
1478 int unregistered
= 0;
1482 * This schedule point is mainly for the ptlrpcd caller of this
1483 * function. Most ptlrpc sets are not long-lived and unbounded
1484 * in length, but at the least the set used by the ptlrpcd is.
1485 * Since the processing time is unbounded, we need to insert an
1486 * explicit schedule point to make the thread well-behaved.
1490 if (req
->rq_phase
== RQ_PHASE_NEW
&&
1491 ptlrpc_send_new_req(req
)) {
1492 force_timer_recalc
= 1;
1495 /* delayed send - skip */
1496 if (req
->rq_phase
== RQ_PHASE_NEW
&& req
->rq_sent
)
1499 /* delayed resend - skip */
1500 if (req
->rq_phase
== RQ_PHASE_RPC
&& req
->rq_resend
&&
1501 req
->rq_sent
> ktime_get_real_seconds())
1504 if (!(req
->rq_phase
== RQ_PHASE_RPC
||
1505 req
->rq_phase
== RQ_PHASE_BULK
||
1506 req
->rq_phase
== RQ_PHASE_INTERPRET
||
1507 req
->rq_phase
== RQ_PHASE_UNREGISTERING
||
1508 req
->rq_phase
== RQ_PHASE_COMPLETE
)) {
1509 DEBUG_REQ(D_ERROR
, req
, "bad phase %x", req
->rq_phase
);
1513 if (req
->rq_phase
== RQ_PHASE_UNREGISTERING
) {
1514 LASSERT(req
->rq_next_phase
!= req
->rq_phase
);
1515 LASSERT(req
->rq_next_phase
!= RQ_PHASE_UNDEFINED
);
1518 * Skip processing until reply is unlinked. We
1519 * can't return to pool before that and we can't
1520 * call interpret before that. We need to make
1521 * sure that all rdma transfers finished and will
1522 * not corrupt any data.
1524 if (ptlrpc_client_recv_or_unlink(req
) ||
1525 ptlrpc_client_bulk_active(req
))
1529 * Turn fail_loc off to prevent it from looping
1532 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
)) {
1533 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
,
1536 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
)) {
1537 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
,
1541 /* Move to next phase if reply was successfully
1544 ptlrpc_rqphase_move(req
, req
->rq_next_phase
);
1547 if (req
->rq_phase
== RQ_PHASE_COMPLETE
) {
1548 list_move_tail(&req
->rq_set_chain
, &comp_reqs
);
1552 if (req
->rq_phase
== RQ_PHASE_INTERPRET
)
1555 /* Note that this also will start async reply unlink. */
1556 if (req
->rq_net_err
&& !req
->rq_timedout
) {
1557 ptlrpc_expire_one_request(req
, 1);
1559 /* Check if we still need to wait for unlink. */
1560 if (ptlrpc_client_recv_or_unlink(req
) ||
1561 ptlrpc_client_bulk_active(req
))
1563 /* If there is no need to resend, fail it now. */
1564 if (req
->rq_no_resend
) {
1565 if (req
->rq_status
== 0)
1566 req
->rq_status
= -EIO
;
1567 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1575 spin_lock(&req
->rq_lock
);
1576 req
->rq_replied
= 0;
1577 spin_unlock(&req
->rq_lock
);
1578 if (req
->rq_status
== 0)
1579 req
->rq_status
= -EIO
;
1580 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1585 * ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1586 * so it sets rq_intr regardless of individual rpc
1587 * timeouts. The synchronous IO waiting path sets
1588 * rq_intr irrespective of whether ptlrpcd
1589 * has seen a timeout. Our policy is to only interpret
1590 * interrupted rpcs after they have timed out, so we
1591 * need to enforce that here.
1594 if (req
->rq_intr
&& (req
->rq_timedout
|| req
->rq_waiting
||
1595 req
->rq_wait_ctx
)) {
1596 req
->rq_status
= -EINTR
;
1597 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1601 if (req
->rq_phase
== RQ_PHASE_RPC
) {
1602 if (req
->rq_timedout
|| req
->rq_resend
||
1603 req
->rq_waiting
|| req
->rq_wait_ctx
) {
1606 if (!ptlrpc_unregister_reply(req
, 1))
1609 spin_lock(&imp
->imp_lock
);
1610 if (ptlrpc_import_delay_req(imp
, req
,
1613 * put on delay list - only if we wait
1614 * recovery finished - before send
1616 list_del_init(&req
->rq_list
);
1617 list_add_tail(&req
->rq_list
,
1618 &imp
->imp_delayed_list
);
1619 spin_unlock(&imp
->imp_lock
);
1624 req
->rq_status
= status
;
1625 ptlrpc_rqphase_move(req
,
1626 RQ_PHASE_INTERPRET
);
1627 spin_unlock(&imp
->imp_lock
);
1630 if (ptlrpc_no_resend(req
) &&
1631 !req
->rq_wait_ctx
) {
1632 req
->rq_status
= -ENOTCONN
;
1633 ptlrpc_rqphase_move(req
,
1634 RQ_PHASE_INTERPRET
);
1635 spin_unlock(&imp
->imp_lock
);
1639 list_del_init(&req
->rq_list
);
1640 list_add_tail(&req
->rq_list
,
1641 &imp
->imp_sending_list
);
1643 spin_unlock(&imp
->imp_lock
);
1645 spin_lock(&req
->rq_lock
);
1646 req
->rq_waiting
= 0;
1647 spin_unlock(&req
->rq_lock
);
1649 if (req
->rq_timedout
|| req
->rq_resend
) {
1650 /* This is re-sending anyway, let's mark req as resend. */
1651 spin_lock(&req
->rq_lock
);
1653 spin_unlock(&req
->rq_lock
);
1657 if (!ptlrpc_unregister_bulk(req
, 1))
1660 /* ensure previous bulk fails */
1661 old_xid
= req
->rq_xid
;
1662 req
->rq_xid
= ptlrpc_next_xid();
1663 CDEBUG(D_HA
, "resend bulk old x%llu new x%llu\n",
1664 old_xid
, req
->rq_xid
);
1668 * rq_wait_ctx is only touched by ptlrpcd,
1669 * so no lock is needed here.
1671 status
= sptlrpc_req_refresh_ctx(req
, -1);
1674 req
->rq_status
= status
;
1675 spin_lock(&req
->rq_lock
);
1676 req
->rq_wait_ctx
= 0;
1677 spin_unlock(&req
->rq_lock
);
1678 force_timer_recalc
= 1;
1680 spin_lock(&req
->rq_lock
);
1681 req
->rq_wait_ctx
= 1;
1682 spin_unlock(&req
->rq_lock
);
1687 spin_lock(&req
->rq_lock
);
1688 req
->rq_wait_ctx
= 0;
1689 spin_unlock(&req
->rq_lock
);
1692 rc
= ptl_send_rpc(req
, 0);
1694 DEBUG_REQ(D_HA
, req
,
1695 "send failed: rc = %d", rc
);
1696 force_timer_recalc
= 1;
1697 spin_lock(&req
->rq_lock
);
1698 req
->rq_net_err
= 1;
1699 spin_unlock(&req
->rq_lock
);
1702 /* need to reset the timeout */
1703 force_timer_recalc
= 1;
1706 spin_lock(&req
->rq_lock
);
1708 if (ptlrpc_client_early(req
)) {
1709 ptlrpc_at_recv_early_reply(req
);
1710 spin_unlock(&req
->rq_lock
);
1714 /* Still waiting for a reply? */
1715 if (ptlrpc_client_recv(req
)) {
1716 spin_unlock(&req
->rq_lock
);
1720 /* Did we actually receive a reply? */
1721 if (!ptlrpc_client_replied(req
)) {
1722 spin_unlock(&req
->rq_lock
);
1726 spin_unlock(&req
->rq_lock
);
1729 * unlink from net because we are going to
1730 * swab in-place of reply buffer
1732 unregistered
= ptlrpc_unregister_reply(req
, 1);
1736 req
->rq_status
= after_reply(req
);
1741 * If there is no bulk associated with this request,
1742 * then we're done and should let the interpreter
1743 * process the reply. Similarly if the RPC returned
1744 * an error, and therefore the bulk will never arrive.
1746 if (!req
->rq_bulk
|| req
->rq_status
< 0) {
1747 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1751 ptlrpc_rqphase_move(req
, RQ_PHASE_BULK
);
1754 LASSERT(req
->rq_phase
== RQ_PHASE_BULK
);
1755 if (ptlrpc_client_bulk_active(req
))
1758 if (req
->rq_bulk
->bd_failure
) {
1760 * The RPC reply arrived OK, but the bulk screwed
1761 * up! Dead weird since the server told us the RPC
1762 * was good after getting the REPLY for her GET or
1763 * the ACK for her PUT.
1765 DEBUG_REQ(D_ERROR
, req
, "bulk transfer failed");
1766 req
->rq_status
= -EIO
;
1769 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1772 LASSERT(req
->rq_phase
== RQ_PHASE_INTERPRET
);
1775 * This moves to "unregistering" phase we need to wait for
1778 if (!unregistered
&& !ptlrpc_unregister_reply(req
, 1)) {
1779 /* start async bulk unlink too */
1780 ptlrpc_unregister_bulk(req
, 1);
1784 if (!ptlrpc_unregister_bulk(req
, 1))
1787 /* When calling interpret receive should already be finished. */
1788 LASSERT(!req
->rq_receiving_reply
);
1790 ptlrpc_req_interpret(env
, req
, req
->rq_status
);
1792 if (ptlrpcd_check_work(req
)) {
1793 atomic_dec(&set
->set_remaining
);
1796 ptlrpc_rqphase_move(req
, RQ_PHASE_COMPLETE
);
1798 CDEBUG(req
->rq_reqmsg
? D_RPCTRACE
: 0,
1799 "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
1800 current_comm(), imp
->imp_obd
->obd_uuid
.uuid
,
1801 lustre_msg_get_status(req
->rq_reqmsg
), req
->rq_xid
,
1802 libcfs_nid2str(imp
->imp_connection
->c_peer
.nid
),
1803 lustre_msg_get_opc(req
->rq_reqmsg
));
1805 spin_lock(&imp
->imp_lock
);
1807 * Request already may be not on sending or delaying list. This
1808 * may happen in the case of marking it erroneous for the case
1809 * ptlrpc_import_delay_req(req, status) find it impossible to
1810 * allow sending this rpc and returns *status != 0.
1812 if (!list_empty(&req
->rq_list
)) {
1813 list_del_init(&req
->rq_list
);
1814 atomic_dec(&imp
->imp_inflight
);
1816 spin_unlock(&imp
->imp_lock
);
1818 atomic_dec(&set
->set_remaining
);
1819 wake_up_all(&imp
->imp_recovery_waitq
);
1821 if (set
->set_producer
) {
1822 /* produce a new request if possible */
1823 if (ptlrpc_set_producer(set
) > 0)
1824 force_timer_recalc
= 1;
1827 * free the request that has just been completed
1828 * in order not to pollute set->set_requests
1830 list_del_init(&req
->rq_set_chain
);
1831 spin_lock(&req
->rq_lock
);
1833 req
->rq_invalid_rqset
= 0;
1834 spin_unlock(&req
->rq_lock
);
1836 /* record rq_status to compute the final status later */
1837 if (req
->rq_status
!= 0)
1838 set
->set_rc
= req
->rq_status
;
1839 ptlrpc_req_finished(req
);
1841 list_move_tail(&req
->rq_set_chain
, &comp_reqs
);
1846 * move completed request at the head of list so it's easier for
1847 * caller to find them
1849 list_splice(&comp_reqs
, &set
->set_requests
);
1851 /* If we hit an error, we want to recover promptly. */
1852 return atomic_read(&set
->set_remaining
) == 0 || force_timer_recalc
;
1854 EXPORT_SYMBOL(ptlrpc_check_set
);
1857 * Time out request \a req. is \a async_unlink is set, that means do not wait
1858 * until LNet actually confirms network buffer unlinking.
1859 * Return 1 if we should give up further retrying attempts or 0 otherwise.
1861 int ptlrpc_expire_one_request(struct ptlrpc_request
*req
, int async_unlink
)
1863 struct obd_import
*imp
= req
->rq_import
;
1866 spin_lock(&req
->rq_lock
);
1867 req
->rq_timedout
= 1;
1868 spin_unlock(&req
->rq_lock
);
1870 DEBUG_REQ(D_WARNING
, req
, "Request sent has %s: [sent %lld/real %lld]",
1871 req
->rq_net_err
? "failed due to network error" :
1872 ((req
->rq_real_sent
== 0 ||
1873 req
->rq_real_sent
< req
->rq_sent
||
1874 req
->rq_real_sent
>= req
->rq_deadline
) ?
1875 "timed out for sent delay" : "timed out for slow reply"),
1876 (s64
)req
->rq_sent
, (s64
)req
->rq_real_sent
);
1878 if (imp
&& obd_debug_peer_on_timeout
)
1879 LNetDebugPeer(imp
->imp_connection
->c_peer
);
1881 ptlrpc_unregister_reply(req
, async_unlink
);
1882 ptlrpc_unregister_bulk(req
, async_unlink
);
1884 if (obd_dump_on_timeout
)
1885 libcfs_debug_dumplog();
1888 DEBUG_REQ(D_HA
, req
, "NULL import: already cleaned up?");
1892 atomic_inc(&imp
->imp_timeouts
);
1894 /* The DLM server doesn't want recovery run on its imports. */
1895 if (imp
->imp_dlm_fake
)
1899 * If this request is for recovery or other primordial tasks,
1900 * then error it out here.
1902 if (req
->rq_ctx_init
|| req
->rq_ctx_fini
||
1903 req
->rq_send_state
!= LUSTRE_IMP_FULL
||
1904 imp
->imp_obd
->obd_no_recov
) {
1905 DEBUG_REQ(D_RPCTRACE
, req
, "err -110, sent_state=%s (now=%s)",
1906 ptlrpc_import_state_name(req
->rq_send_state
),
1907 ptlrpc_import_state_name(imp
->imp_state
));
1908 spin_lock(&req
->rq_lock
);
1909 req
->rq_status
= -ETIMEDOUT
;
1911 spin_unlock(&req
->rq_lock
);
1916 * if a request can't be resent we can't wait for an answer after
1919 if (ptlrpc_no_resend(req
)) {
1920 DEBUG_REQ(D_RPCTRACE
, req
, "TIMEOUT-NORESEND:");
1924 ptlrpc_fail_import(imp
, lustre_msg_get_conn_cnt(req
->rq_reqmsg
));
1930 * Time out all uncompleted requests in request set pointed by \a data
1931 * Callback used when waiting on sets with l_wait_event.
1934 int ptlrpc_expired_set(void *data
)
1936 struct ptlrpc_request_set
*set
= data
;
1937 struct list_head
*tmp
;
1938 time64_t now
= ktime_get_real_seconds();
1940 /* A timeout expired. See which reqs it applies to... */
1941 list_for_each(tmp
, &set
->set_requests
) {
1942 struct ptlrpc_request
*req
=
1943 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
1945 /* don't expire request waiting for context */
1946 if (req
->rq_wait_ctx
)
1949 /* Request in-flight? */
1950 if (!((req
->rq_phase
== RQ_PHASE_RPC
&&
1951 !req
->rq_waiting
&& !req
->rq_resend
) ||
1952 (req
->rq_phase
== RQ_PHASE_BULK
)))
1955 if (req
->rq_timedout
|| /* already dealt with */
1956 req
->rq_deadline
> now
) /* not expired */
1960 * Deal with this guy. Do it asynchronously to not block
1963 ptlrpc_expire_one_request(req
, 1);
1967 * When waiting for a whole set, we always break out of the
1968 * sleep so we can recalculate the timeout, or enable interrupts
1969 * if everyone's timed out.
1973 EXPORT_SYMBOL(ptlrpc_expired_set
);
1976 * Sets rq_intr flag in \a req under spinlock.
1978 void ptlrpc_mark_interrupted(struct ptlrpc_request
*req
)
1980 spin_lock(&req
->rq_lock
);
1982 spin_unlock(&req
->rq_lock
);
1984 EXPORT_SYMBOL(ptlrpc_mark_interrupted
);
1987 * Interrupts (sets interrupted flag) all uncompleted requests in
1988 * a set \a data. Callback for l_wait_event for interruptible waits.
1990 void ptlrpc_interrupted_set(void *data
)
1992 struct ptlrpc_request_set
*set
= data
;
1993 struct list_head
*tmp
;
1995 CDEBUG(D_RPCTRACE
, "INTERRUPTED SET %p\n", set
);
1997 list_for_each(tmp
, &set
->set_requests
) {
1998 struct ptlrpc_request
*req
=
1999 list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2001 if (req
->rq_phase
!= RQ_PHASE_RPC
&&
2002 req
->rq_phase
!= RQ_PHASE_UNREGISTERING
)
2005 ptlrpc_mark_interrupted(req
);
2008 EXPORT_SYMBOL(ptlrpc_interrupted_set
);
2011 * Get the smallest timeout in the set; this does NOT set a timeout.
2013 int ptlrpc_set_next_timeout(struct ptlrpc_request_set
*set
)
2015 struct list_head
*tmp
;
2016 time64_t now
= ktime_get_real_seconds();
2018 struct ptlrpc_request
*req
;
2021 list_for_each(tmp
, &set
->set_requests
) {
2022 req
= list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2024 /* Request in-flight? */
2025 if (!(((req
->rq_phase
== RQ_PHASE_RPC
) && !req
->rq_waiting
) ||
2026 (req
->rq_phase
== RQ_PHASE_BULK
) ||
2027 (req
->rq_phase
== RQ_PHASE_NEW
)))
2030 /* Already timed out. */
2031 if (req
->rq_timedout
)
2034 /* Waiting for ctx. */
2035 if (req
->rq_wait_ctx
)
2038 if (req
->rq_phase
== RQ_PHASE_NEW
)
2039 deadline
= req
->rq_sent
;
2040 else if (req
->rq_phase
== RQ_PHASE_RPC
&& req
->rq_resend
)
2041 deadline
= req
->rq_sent
;
2043 deadline
= req
->rq_sent
+ req
->rq_timeout
;
2045 if (deadline
<= now
) /* actually expired already */
2046 timeout
= 1; /* ASAP */
2047 else if (timeout
== 0 || timeout
> deadline
- now
)
2048 timeout
= deadline
- now
;
2052 EXPORT_SYMBOL(ptlrpc_set_next_timeout
);
2055 * Send all unset request from the set and then wait until all
2056 * requests in the set complete (either get a reply, timeout, get an
2057 * error or otherwise be interrupted).
2058 * Returns 0 on success or error code otherwise.
2060 int ptlrpc_set_wait(struct ptlrpc_request_set
*set
)
2062 struct list_head
*tmp
;
2063 struct ptlrpc_request
*req
;
2064 struct l_wait_info lwi
;
2067 if (set
->set_producer
)
2068 (void)ptlrpc_set_producer(set
);
2070 list_for_each(tmp
, &set
->set_requests
) {
2071 req
= list_entry(tmp
, struct ptlrpc_request
,
2073 if (req
->rq_phase
== RQ_PHASE_NEW
)
2074 (void)ptlrpc_send_new_req(req
);
2077 if (list_empty(&set
->set_requests
))
2081 timeout
= ptlrpc_set_next_timeout(set
);
2084 * wait until all complete, interrupted, or an in-flight
2087 CDEBUG(D_RPCTRACE
, "set %p going to sleep for %d seconds\n",
2090 if (timeout
== 0 && !cfs_signal_pending())
2092 * No requests are in-flight (ether timed out
2093 * or delayed), so we can allow interrupts.
2094 * We still want to block for a limited time,
2095 * so we allow interrupts during the timeout.
2097 lwi
= LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
2099 ptlrpc_interrupted_set
, set
);
2102 * At least one request is in flight, so no
2103 * interrupts are allowed. Wait until all
2104 * complete, or an in-flight req times out.
2106 lwi
= LWI_TIMEOUT(cfs_time_seconds(timeout
? timeout
: 1),
2107 ptlrpc_expired_set
, set
);
2109 rc
= l_wait_event(set
->set_waitq
, ptlrpc_check_set(NULL
, set
), &lwi
);
2112 * LU-769 - if we ignored the signal because it was already
2113 * pending when we started, we need to handle it now or we risk
2114 * it being ignored forever
2116 if (rc
== -ETIMEDOUT
&& !lwi
.lwi_allow_intr
&&
2117 cfs_signal_pending()) {
2118 sigset_t blocked_sigs
=
2119 cfs_block_sigsinv(LUSTRE_FATAL_SIGS
);
2122 * In fact we only interrupt for the "fatal" signals
2123 * like SIGINT or SIGKILL. We still ignore less
2124 * important signals since ptlrpc set is not easily
2125 * reentrant from userspace again
2127 if (cfs_signal_pending())
2128 ptlrpc_interrupted_set(set
);
2129 cfs_restore_sigs(blocked_sigs
);
2132 LASSERT(rc
== 0 || rc
== -EINTR
|| rc
== -ETIMEDOUT
);
2135 * -EINTR => all requests have been flagged rq_intr so next
2137 * -ETIMEDOUT => someone timed out. When all reqs have
2138 * timed out, signals are enabled allowing completion with
2140 * I don't really care if we go once more round the loop in
2141 * the error cases -eeb.
2143 if (rc
== 0 && atomic_read(&set
->set_remaining
) == 0) {
2144 list_for_each(tmp
, &set
->set_requests
) {
2145 req
= list_entry(tmp
, struct ptlrpc_request
,
2147 spin_lock(&req
->rq_lock
);
2148 req
->rq_invalid_rqset
= 1;
2149 spin_unlock(&req
->rq_lock
);
2152 } while (rc
!= 0 || atomic_read(&set
->set_remaining
) != 0);
2154 LASSERT(atomic_read(&set
->set_remaining
) == 0);
2156 rc
= set
->set_rc
; /* rq_status of already freed requests if any */
2157 list_for_each(tmp
, &set
->set_requests
) {
2158 req
= list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2160 LASSERT(req
->rq_phase
== RQ_PHASE_COMPLETE
);
2161 if (req
->rq_status
!= 0)
2162 rc
= req
->rq_status
;
2165 if (set
->set_interpret
) {
2166 int (*interpreter
)(struct ptlrpc_request_set
*set
, void *, int) =
2168 rc
= interpreter(set
, set
->set_arg
, rc
);
2170 struct ptlrpc_set_cbdata
*cbdata
, *n
;
2173 list_for_each_entry_safe(cbdata
, n
,
2174 &set
->set_cblist
, psc_item
) {
2175 list_del_init(&cbdata
->psc_item
);
2176 err
= cbdata
->psc_interpret(set
, cbdata
->psc_data
, rc
);
2185 EXPORT_SYMBOL(ptlrpc_set_wait
);
2188 * Helper function for request freeing.
2189 * Called when request count reached zero and request needs to be freed.
2190 * Removes request from all sorts of sending/replay lists it might be on,
2191 * frees network buffers if any are present.
2192 * If \a locked is set, that means caller is already holding import imp_lock
2193 * and so we no longer need to reobtain it (for certain lists manipulations)
2195 static void __ptlrpc_free_req(struct ptlrpc_request
*request
, int locked
)
2199 LASSERTF(!request
->rq_receiving_reply
, "req %p\n", request
);
2200 LASSERTF(!request
->rq_rqbd
, "req %p\n", request
);/* client-side */
2201 LASSERTF(list_empty(&request
->rq_list
), "req %p\n", request
);
2202 LASSERTF(list_empty(&request
->rq_set_chain
), "req %p\n", request
);
2203 LASSERTF(list_empty(&request
->rq_exp_list
), "req %p\n", request
);
2204 LASSERTF(!request
->rq_replay
, "req %p\n", request
);
2206 req_capsule_fini(&request
->rq_pill
);
2209 * We must take it off the imp_replay_list first. Otherwise, we'll set
2210 * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2212 if (request
->rq_import
) {
2214 spin_lock(&request
->rq_import
->imp_lock
);
2215 list_del_init(&request
->rq_replay_list
);
2217 spin_unlock(&request
->rq_import
->imp_lock
);
2219 LASSERTF(list_empty(&request
->rq_replay_list
), "req %p\n", request
);
2221 if (atomic_read(&request
->rq_refcount
) != 0) {
2222 DEBUG_REQ(D_ERROR
, request
,
2223 "freeing request with nonzero refcount");
2227 if (request
->rq_repbuf
)
2228 sptlrpc_cli_free_repbuf(request
);
2229 if (request
->rq_export
) {
2230 class_export_put(request
->rq_export
);
2231 request
->rq_export
= NULL
;
2233 if (request
->rq_import
) {
2234 class_import_put(request
->rq_import
);
2235 request
->rq_import
= NULL
;
2237 if (request
->rq_bulk
)
2238 ptlrpc_free_bulk_pin(request
->rq_bulk
);
2240 if (request
->rq_reqbuf
|| request
->rq_clrbuf
)
2241 sptlrpc_cli_free_reqbuf(request
);
2243 if (request
->rq_cli_ctx
)
2244 sptlrpc_req_put_ctx(request
, !locked
);
2246 if (request
->rq_pool
)
2247 __ptlrpc_free_req_to_pool(request
);
2249 ptlrpc_request_cache_free(request
);
2254 * Drops one reference count for request \a request.
2255 * \a locked set indicates that caller holds import imp_lock.
2256 * Frees the request when reference count reaches zero.
2258 static int __ptlrpc_req_finished(struct ptlrpc_request
*request
, int locked
)
2263 if (request
== LP_POISON
||
2264 request
->rq_reqmsg
== LP_POISON
) {
2265 CERROR("dereferencing freed request (bug 575)\n");
2270 DEBUG_REQ(D_INFO
, request
, "refcount now %u",
2271 atomic_read(&request
->rq_refcount
) - 1);
2273 if (atomic_dec_and_test(&request
->rq_refcount
)) {
2274 __ptlrpc_free_req(request
, locked
);
2282 * Drops one reference count for a request.
2284 void ptlrpc_req_finished(struct ptlrpc_request
*request
)
2286 __ptlrpc_req_finished(request
, 0);
2288 EXPORT_SYMBOL(ptlrpc_req_finished
);
2291 * Returns xid of a \a request
2293 __u64
ptlrpc_req_xid(struct ptlrpc_request
*request
)
2295 return request
->rq_xid
;
2297 EXPORT_SYMBOL(ptlrpc_req_xid
);
2300 * Disengage the client's reply buffer from the network
2301 * NB does _NOT_ unregister any client-side bulk.
2302 * IDEMPOTENT, but _not_ safe against concurrent callers.
2303 * The request owner (i.e. the thread doing the I/O) must call...
2304 * Returns 0 on success or 1 if unregistering cannot be made.
2306 int ptlrpc_unregister_reply(struct ptlrpc_request
*request
, int async
)
2309 wait_queue_head_t
*wq
;
2310 struct l_wait_info lwi
;
2313 LASSERT(!in_interrupt());
2315 /* Let's setup deadline for reply unlink. */
2316 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
) &&
2317 async
&& request
->rq_reply_deadline
== 0)
2318 request
->rq_reply_deadline
= ktime_get_real_seconds()+LONG_UNLINK
;
2320 /* Nothing left to do. */
2321 if (!ptlrpc_client_recv_or_unlink(request
))
2324 LNetMDUnlink(request
->rq_reply_md_h
);
2326 /* Let's check it once again. */
2327 if (!ptlrpc_client_recv_or_unlink(request
))
2330 /* Move to "Unregistering" phase as reply was not unlinked yet. */
2331 ptlrpc_rqphase_move(request
, RQ_PHASE_UNREGISTERING
);
2333 /* Do not wait for unlink to finish. */
2338 * We have to l_wait_event() whatever the result, to give liblustre
2339 * a chance to run reply_in_callback(), and to make sure we've
2340 * unlinked before returning a req to the pool.
2342 if (request
->rq_set
)
2343 wq
= &request
->rq_set
->set_waitq
;
2345 wq
= &request
->rq_reply_waitq
;
2349 * Network access will complete in finite time but the HUGE
2350 * timeout lets us CWARN for visibility of sluggish NALs
2352 lwi
= LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK
),
2353 cfs_time_seconds(1), NULL
, NULL
);
2354 rc
= l_wait_event(*wq
, !ptlrpc_client_recv_or_unlink(request
),
2357 ptlrpc_rqphase_move(request
, request
->rq_next_phase
);
2361 LASSERT(rc
== -ETIMEDOUT
);
2362 DEBUG_REQ(D_WARNING
, request
,
2363 "Unexpectedly long timeout rvcng=%d unlnk=%d/%d",
2364 request
->rq_receiving_reply
,
2365 request
->rq_req_unlink
, request
->rq_reply_unlink
);
2369 EXPORT_SYMBOL(ptlrpc_unregister_reply
);
2371 static void ptlrpc_free_request(struct ptlrpc_request
*req
)
2373 spin_lock(&req
->rq_lock
);
2375 spin_unlock(&req
->rq_lock
);
2377 if (req
->rq_commit_cb
)
2378 req
->rq_commit_cb(req
);
2379 list_del_init(&req
->rq_replay_list
);
2381 __ptlrpc_req_finished(req
, 1);
2385 * the request is committed and dropped from the replay list of its import
2387 void ptlrpc_request_committed(struct ptlrpc_request
*req
, int force
)
2389 struct obd_import
*imp
= req
->rq_import
;
2391 spin_lock(&imp
->imp_lock
);
2392 if (list_empty(&req
->rq_replay_list
)) {
2393 spin_unlock(&imp
->imp_lock
);
2397 if (force
|| req
->rq_transno
<= imp
->imp_peer_committed_transno
)
2398 ptlrpc_free_request(req
);
2400 spin_unlock(&imp
->imp_lock
);
2402 EXPORT_SYMBOL(ptlrpc_request_committed
);
2405 * Iterates through replay_list on import and prunes
2406 * all requests have transno smaller than last_committed for the
2407 * import and don't have rq_replay set.
2408 * Since requests are sorted in transno order, stops when meeting first
2409 * transno bigger than last_committed.
2410 * caller must hold imp->imp_lock
2412 void ptlrpc_free_committed(struct obd_import
*imp
)
2414 struct ptlrpc_request
*req
, *saved
;
2415 struct ptlrpc_request
*last_req
= NULL
; /* temporary fire escape */
2416 bool skip_committed_list
= true;
2418 assert_spin_locked(&imp
->imp_lock
);
2420 if (imp
->imp_peer_committed_transno
== imp
->imp_last_transno_checked
&&
2421 imp
->imp_generation
== imp
->imp_last_generation_checked
) {
2422 CDEBUG(D_INFO
, "%s: skip recheck: last_committed %llu\n",
2423 imp
->imp_obd
->obd_name
, imp
->imp_peer_committed_transno
);
2426 CDEBUG(D_RPCTRACE
, "%s: committing for last_committed %llu gen %d\n",
2427 imp
->imp_obd
->obd_name
, imp
->imp_peer_committed_transno
,
2428 imp
->imp_generation
);
2430 if (imp
->imp_generation
!= imp
->imp_last_generation_checked
)
2431 skip_committed_list
= false;
2433 imp
->imp_last_transno_checked
= imp
->imp_peer_committed_transno
;
2434 imp
->imp_last_generation_checked
= imp
->imp_generation
;
2436 list_for_each_entry_safe(req
, saved
, &imp
->imp_replay_list
,
2438 /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
2439 LASSERT(req
!= last_req
);
2442 if (req
->rq_transno
== 0) {
2443 DEBUG_REQ(D_EMERG
, req
, "zero transno during replay");
2446 if (req
->rq_import_generation
< imp
->imp_generation
) {
2447 DEBUG_REQ(D_RPCTRACE
, req
, "free request with old gen");
2451 /* not yet committed */
2452 if (req
->rq_transno
> imp
->imp_peer_committed_transno
) {
2453 DEBUG_REQ(D_RPCTRACE
, req
, "stopping search");
2457 if (req
->rq_replay
) {
2458 DEBUG_REQ(D_RPCTRACE
, req
, "keeping (FL_REPLAY)");
2459 list_move_tail(&req
->rq_replay_list
,
2460 &imp
->imp_committed_list
);
2464 DEBUG_REQ(D_INFO
, req
, "commit (last_committed %llu)",
2465 imp
->imp_peer_committed_transno
);
2467 ptlrpc_free_request(req
);
2469 if (skip_committed_list
)
2472 list_for_each_entry_safe(req
, saved
, &imp
->imp_committed_list
,
2474 LASSERT(req
->rq_transno
!= 0);
2475 if (req
->rq_import_generation
< imp
->imp_generation
) {
2476 DEBUG_REQ(D_RPCTRACE
, req
, "free stale open request");
2477 ptlrpc_free_request(req
);
2483 * Schedule previously sent request for resend.
2484 * For bulk requests we assign new xid (to avoid problems with
2485 * lost replies and therefore several transfers landing into same buffer
2486 * from different sending attempts).
2488 void ptlrpc_resend_req(struct ptlrpc_request
*req
)
2490 DEBUG_REQ(D_HA
, req
, "going to resend");
2491 spin_lock(&req
->rq_lock
);
2494 * Request got reply but linked to the import list still.
2495 * Let ptlrpc_check_set() to process it.
2497 if (ptlrpc_client_replied(req
)) {
2498 spin_unlock(&req
->rq_lock
);
2499 DEBUG_REQ(D_HA
, req
, "it has reply, so skip it");
2503 lustre_msg_set_handle(req
->rq_reqmsg
, &(struct lustre_handle
){ 0 });
2504 req
->rq_status
= -EAGAIN
;
2507 req
->rq_net_err
= 0;
2508 req
->rq_timedout
= 0;
2510 __u64 old_xid
= req
->rq_xid
;
2512 /* ensure previous bulk fails */
2513 req
->rq_xid
= ptlrpc_next_xid();
2514 CDEBUG(D_HA
, "resend bulk old x%llu new x%llu\n",
2515 old_xid
, req
->rq_xid
);
2517 ptlrpc_client_wake_req(req
);
2518 spin_unlock(&req
->rq_lock
);
2520 EXPORT_SYMBOL(ptlrpc_resend_req
);
2523 * Grab additional reference on a request \a req
2525 struct ptlrpc_request
*ptlrpc_request_addref(struct ptlrpc_request
*req
)
2527 atomic_inc(&req
->rq_refcount
);
2530 EXPORT_SYMBOL(ptlrpc_request_addref
);
2533 * Add a request to import replay_list.
2534 * Must be called under imp_lock
2536 void ptlrpc_retain_replayable_request(struct ptlrpc_request
*req
,
2537 struct obd_import
*imp
)
2539 struct list_head
*tmp
;
2541 assert_spin_locked(&imp
->imp_lock
);
2543 if (req
->rq_transno
== 0) {
2544 DEBUG_REQ(D_EMERG
, req
, "saving request with zero transno");
2549 * clear this for new requests that were resent as well
2550 * as resent replayed requests.
2552 lustre_msg_clear_flags(req
->rq_reqmsg
, MSG_RESENT
);
2554 /* don't re-add requests that have been replayed */
2555 if (!list_empty(&req
->rq_replay_list
))
2558 lustre_msg_add_flags(req
->rq_reqmsg
, MSG_REPLAY
);
2560 LASSERT(imp
->imp_replayable
);
2561 /* Balanced in ptlrpc_free_committed, usually. */
2562 ptlrpc_request_addref(req
);
2563 list_for_each_prev(tmp
, &imp
->imp_replay_list
) {
2564 struct ptlrpc_request
*iter
=
2565 list_entry(tmp
, struct ptlrpc_request
, rq_replay_list
);
2568 * We may have duplicate transnos if we create and then
2569 * open a file, or for closes retained if to match creating
2570 * opens, so use req->rq_xid as a secondary key.
2571 * (See bugs 684, 685, and 428.)
2572 * XXX no longer needed, but all opens need transnos!
2574 if (iter
->rq_transno
> req
->rq_transno
)
2577 if (iter
->rq_transno
== req
->rq_transno
) {
2578 LASSERT(iter
->rq_xid
!= req
->rq_xid
);
2579 if (iter
->rq_xid
> req
->rq_xid
)
2583 list_add(&req
->rq_replay_list
, &iter
->rq_replay_list
);
2587 list_add(&req
->rq_replay_list
, &imp
->imp_replay_list
);
2589 EXPORT_SYMBOL(ptlrpc_retain_replayable_request
);
2592 * Send request and wait until it completes.
2593 * Returns request processing status.
2595 int ptlrpc_queue_wait(struct ptlrpc_request
*req
)
2597 struct ptlrpc_request_set
*set
;
2600 LASSERT(!req
->rq_set
);
2601 LASSERT(!req
->rq_receiving_reply
);
2603 set
= ptlrpc_prep_set();
2605 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM
);
2609 /* for distributed debugging */
2610 lustre_msg_set_status(req
->rq_reqmsg
, current_pid());
2612 /* add a ref for the set (see comment in ptlrpc_set_add_req) */
2613 ptlrpc_request_addref(req
);
2614 ptlrpc_set_add_req(set
, req
);
2615 rc
= ptlrpc_set_wait(set
);
2616 ptlrpc_set_destroy(set
);
2620 EXPORT_SYMBOL(ptlrpc_queue_wait
);
2622 struct ptlrpc_replay_async_args
{
2624 int praa_old_status
;
2628 * Callback used for replayed requests reply processing.
2629 * In case of successful reply calls registered request replay callback.
2630 * In case of error restart replay process.
2632 static int ptlrpc_replay_interpret(const struct lu_env
*env
,
2633 struct ptlrpc_request
*req
,
2636 struct ptlrpc_replay_async_args
*aa
= data
;
2637 struct obd_import
*imp
= req
->rq_import
;
2639 atomic_dec(&imp
->imp_replay_inflight
);
2641 if (!ptlrpc_client_replied(req
)) {
2642 CERROR("request replay timed out, restarting recovery\n");
2647 if (lustre_msg_get_type(req
->rq_repmsg
) == PTL_RPC_MSG_ERR
&&
2648 (lustre_msg_get_status(req
->rq_repmsg
) == -ENOTCONN
||
2649 lustre_msg_get_status(req
->rq_repmsg
) == -ENODEV
)) {
2650 rc
= lustre_msg_get_status(req
->rq_repmsg
);
2654 /** VBR: check version failure */
2655 if (lustre_msg_get_status(req
->rq_repmsg
) == -EOVERFLOW
) {
2656 /** replay was failed due to version mismatch */
2657 DEBUG_REQ(D_WARNING
, req
, "Version mismatch during replay\n");
2658 spin_lock(&imp
->imp_lock
);
2659 imp
->imp_vbr_failed
= 1;
2660 imp
->imp_no_lock_replay
= 1;
2661 spin_unlock(&imp
->imp_lock
);
2662 lustre_msg_set_status(req
->rq_repmsg
, aa
->praa_old_status
);
2664 /** The transno had better not change over replay. */
2665 LASSERTF(lustre_msg_get_transno(req
->rq_reqmsg
) ==
2666 lustre_msg_get_transno(req
->rq_repmsg
) ||
2667 lustre_msg_get_transno(req
->rq_repmsg
) == 0,
2669 lustre_msg_get_transno(req
->rq_reqmsg
),
2670 lustre_msg_get_transno(req
->rq_repmsg
));
2673 spin_lock(&imp
->imp_lock
);
2674 /** if replays by version then gap occur on server, no trust to locks */
2675 if (lustre_msg_get_flags(req
->rq_repmsg
) & MSG_VERSION_REPLAY
)
2676 imp
->imp_no_lock_replay
= 1;
2677 imp
->imp_last_replay_transno
= lustre_msg_get_transno(req
->rq_reqmsg
);
2678 spin_unlock(&imp
->imp_lock
);
2679 LASSERT(imp
->imp_last_replay_transno
);
2681 /* transaction number shouldn't be bigger than the latest replayed */
2682 if (req
->rq_transno
> lustre_msg_get_transno(req
->rq_reqmsg
)) {
2683 DEBUG_REQ(D_ERROR
, req
,
2684 "Reported transno %llu is bigger than the replayed one: %llu",
2686 lustre_msg_get_transno(req
->rq_reqmsg
));
2691 DEBUG_REQ(D_HA
, req
, "got rep");
2693 /* let the callback do fixups, possibly including in the request */
2694 if (req
->rq_replay_cb
)
2695 req
->rq_replay_cb(req
);
2697 if (ptlrpc_client_replied(req
) &&
2698 lustre_msg_get_status(req
->rq_repmsg
) != aa
->praa_old_status
) {
2699 DEBUG_REQ(D_ERROR
, req
, "status %d, old was %d",
2700 lustre_msg_get_status(req
->rq_repmsg
),
2701 aa
->praa_old_status
);
2703 /* Put it back for re-replay. */
2704 lustre_msg_set_status(req
->rq_repmsg
, aa
->praa_old_status
);
2708 * Errors while replay can set transno to 0, but
2709 * imp_last_replay_transno shouldn't be set to 0 anyway
2711 if (req
->rq_transno
== 0)
2712 CERROR("Transno is 0 during replay!\n");
2714 /* continue with recovery */
2715 rc
= ptlrpc_import_recovery_state_machine(imp
);
2717 req
->rq_send_state
= aa
->praa_old_state
;
2720 /* this replay failed, so restart recovery */
2721 ptlrpc_connect_import(imp
);
2727 * Prepares and queues request for replay.
2728 * Adds it to ptlrpcd queue for actual sending.
2729 * Returns 0 on success.
2731 int ptlrpc_replay_req(struct ptlrpc_request
*req
)
2733 struct ptlrpc_replay_async_args
*aa
;
2735 LASSERT(req
->rq_import
->imp_state
== LUSTRE_IMP_REPLAY
);
2737 LASSERT(sizeof(*aa
) <= sizeof(req
->rq_async_args
));
2738 aa
= ptlrpc_req_async_args(req
);
2739 memset(aa
, 0, sizeof(*aa
));
2741 /* Prepare request to be resent with ptlrpcd */
2742 aa
->praa_old_state
= req
->rq_send_state
;
2743 req
->rq_send_state
= LUSTRE_IMP_REPLAY
;
2744 req
->rq_phase
= RQ_PHASE_NEW
;
2745 req
->rq_next_phase
= RQ_PHASE_UNDEFINED
;
2747 aa
->praa_old_status
= lustre_msg_get_status(req
->rq_repmsg
);
2749 req
->rq_interpret_reply
= ptlrpc_replay_interpret
;
2750 /* Readjust the timeout for current conditions */
2751 ptlrpc_at_set_req_timeout(req
);
2754 * Tell server the net_latency, so the server can calculate how long
2755 * it should wait for next replay
2757 lustre_msg_set_service_time(req
->rq_reqmsg
,
2758 ptlrpc_at_get_net_latency(req
));
2759 DEBUG_REQ(D_HA
, req
, "REPLAY");
2761 atomic_inc(&req
->rq_import
->imp_replay_inflight
);
2762 ptlrpc_request_addref(req
); /* ptlrpcd needs a ref */
2764 ptlrpcd_add_req(req
);
2767 EXPORT_SYMBOL(ptlrpc_replay_req
);
2770 * Aborts all in-flight request on import \a imp sending and delayed lists
2772 void ptlrpc_abort_inflight(struct obd_import
*imp
)
2774 struct list_head
*tmp
, *n
;
2777 * Make sure that no new requests get processed for this import.
2778 * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2779 * this flag and then putting requests on sending_list or delayed_list.
2781 spin_lock(&imp
->imp_lock
);
2784 * XXX locking? Maybe we should remove each request with the list
2785 * locked? Also, how do we know if the requests on the list are
2786 * being freed at this time?
2788 list_for_each_safe(tmp
, n
, &imp
->imp_sending_list
) {
2789 struct ptlrpc_request
*req
=
2790 list_entry(tmp
, struct ptlrpc_request
, rq_list
);
2792 DEBUG_REQ(D_RPCTRACE
, req
, "inflight");
2794 spin_lock(&req
->rq_lock
);
2795 if (req
->rq_import_generation
< imp
->imp_generation
) {
2797 req
->rq_status
= -EIO
;
2798 ptlrpc_client_wake_req(req
);
2800 spin_unlock(&req
->rq_lock
);
2803 list_for_each_safe(tmp
, n
, &imp
->imp_delayed_list
) {
2804 struct ptlrpc_request
*req
=
2805 list_entry(tmp
, struct ptlrpc_request
, rq_list
);
2807 DEBUG_REQ(D_RPCTRACE
, req
, "aborting waiting req");
2809 spin_lock(&req
->rq_lock
);
2810 if (req
->rq_import_generation
< imp
->imp_generation
) {
2812 req
->rq_status
= -EIO
;
2813 ptlrpc_client_wake_req(req
);
2815 spin_unlock(&req
->rq_lock
);
2819 * Last chance to free reqs left on the replay list, but we
2820 * will still leak reqs that haven't committed.
2822 if (imp
->imp_replayable
)
2823 ptlrpc_free_committed(imp
);
2825 spin_unlock(&imp
->imp_lock
);
2827 EXPORT_SYMBOL(ptlrpc_abort_inflight
);
2830 * Abort all uncompleted requests in request set \a set
2832 void ptlrpc_abort_set(struct ptlrpc_request_set
*set
)
2834 struct list_head
*tmp
, *pos
;
2836 list_for_each_safe(pos
, tmp
, &set
->set_requests
) {
2837 struct ptlrpc_request
*req
=
2838 list_entry(pos
, struct ptlrpc_request
, rq_set_chain
);
2840 spin_lock(&req
->rq_lock
);
2841 if (req
->rq_phase
!= RQ_PHASE_RPC
) {
2842 spin_unlock(&req
->rq_lock
);
2847 req
->rq_status
= -EINTR
;
2848 ptlrpc_client_wake_req(req
);
2849 spin_unlock(&req
->rq_lock
);
2853 static __u64 ptlrpc_last_xid
;
2854 static spinlock_t ptlrpc_last_xid_lock
;
2857 * Initialize the XID for the node. This is common among all requests on
2858 * this node, and only requires the property that it is monotonically
2859 * increasing. It does not need to be sequential. Since this is also used
2860 * as the RDMA match bits, it is important that a single client NOT have
2861 * the same match bits for two different in-flight requests, hence we do
2862 * NOT want to have an XID per target or similar.
2864 * To avoid an unlikely collision between match bits after a client reboot
2865 * (which would deliver old data into the wrong RDMA buffer) initialize
2866 * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2867 * If the time is clearly incorrect, we instead use a 62-bit random number.
2868 * In the worst case the random number will overflow 1M RPCs per second in
2869 * 9133 years, or permutations thereof.
2871 #define YEAR_2004 (1ULL << 30)
2872 void ptlrpc_init_xid(void)
2874 time64_t now
= ktime_get_real_seconds();
2876 spin_lock_init(&ptlrpc_last_xid_lock
);
2877 if (now
< YEAR_2004
) {
2878 cfs_get_random_bytes(&ptlrpc_last_xid
, sizeof(ptlrpc_last_xid
));
2879 ptlrpc_last_xid
>>= 2;
2880 ptlrpc_last_xid
|= (1ULL << 61);
2882 ptlrpc_last_xid
= (__u64
)now
<< 20;
2885 /* Always need to be aligned to a power-of-two for multi-bulk BRW */
2886 CLASSERT(((PTLRPC_BULK_OPS_COUNT
- 1) & PTLRPC_BULK_OPS_COUNT
) == 0);
2887 ptlrpc_last_xid
&= PTLRPC_BULK_OPS_MASK
;
2891 * Increase xid and returns resulting new value to the caller.
2893 * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
2894 * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
2895 * itself uses the last bulk xid needed, so the server can determine the
2896 * the number of bulk transfers from the RPC XID and a bitmask. The starting
2897 * xid must align to a power-of-two value.
2899 * This is assumed to be true due to the initial ptlrpc_last_xid
2900 * value also being initialized to a power-of-two value. LU-1431
2902 __u64
ptlrpc_next_xid(void)
2906 spin_lock(&ptlrpc_last_xid_lock
);
2907 next
= ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2908 ptlrpc_last_xid
= next
;
2909 spin_unlock(&ptlrpc_last_xid_lock
);
2913 EXPORT_SYMBOL(ptlrpc_next_xid
);
2916 * Get a glimpse at what next xid value might have been.
2917 * Returns possible next xid.
2919 __u64
ptlrpc_sample_next_xid(void)
2921 #if BITS_PER_LONG == 32
2922 /* need to avoid possible word tearing on 32-bit systems */
2925 spin_lock(&ptlrpc_last_xid_lock
);
2926 next
= ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2927 spin_unlock(&ptlrpc_last_xid_lock
);
2931 /* No need to lock, since returned value is racy anyways */
2932 return ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2935 EXPORT_SYMBOL(ptlrpc_sample_next_xid
);
2938 * Functions for operating ptlrpc workers.
2940 * A ptlrpc work is a function which will be running inside ptlrpc context.
2941 * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
2943 * 1. after a work is created, it can be used many times, that is:
2944 * handler = ptlrpcd_alloc_work();
2945 * ptlrpcd_queue_work();
2947 * queue it again when necessary:
2948 * ptlrpcd_queue_work();
2949 * ptlrpcd_destroy_work();
2950 * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
2951 * it will only be queued once in any time. Also as its name implies, it may
2952 * have delay before it really runs by ptlrpcd thread.
2954 struct ptlrpc_work_async_args
{
2955 int (*cb
)(const struct lu_env
*, void *);
2959 static void ptlrpcd_add_work_req(struct ptlrpc_request
*req
)
2961 /* re-initialize the req */
2962 req
->rq_timeout
= obd_timeout
;
2963 req
->rq_sent
= ktime_get_real_seconds();
2964 req
->rq_deadline
= req
->rq_sent
+ req
->rq_timeout
;
2965 req
->rq_reply_deadline
= req
->rq_deadline
;
2966 req
->rq_phase
= RQ_PHASE_INTERPRET
;
2967 req
->rq_next_phase
= RQ_PHASE_COMPLETE
;
2968 req
->rq_xid
= ptlrpc_next_xid();
2969 req
->rq_import_generation
= req
->rq_import
->imp_generation
;
2971 ptlrpcd_add_req(req
);
2974 static int work_interpreter(const struct lu_env
*env
,
2975 struct ptlrpc_request
*req
, void *data
, int rc
)
2977 struct ptlrpc_work_async_args
*arg
= data
;
2979 LASSERT(ptlrpcd_check_work(req
));
2981 rc
= arg
->cb(env
, arg
->cbdata
);
2983 list_del_init(&req
->rq_set_chain
);
2986 if (atomic_dec_return(&req
->rq_refcount
) > 1) {
2987 atomic_set(&req
->rq_refcount
, 2);
2988 ptlrpcd_add_work_req(req
);
2993 static int worker_format
;
2995 static int ptlrpcd_check_work(struct ptlrpc_request
*req
)
2997 return req
->rq_pill
.rc_fmt
== (void *)&worker_format
;
3001 * Create a work for ptlrpc.
3003 void *ptlrpcd_alloc_work(struct obd_import
*imp
,
3004 int (*cb
)(const struct lu_env
*, void *), void *cbdata
)
3006 struct ptlrpc_request
*req
= NULL
;
3007 struct ptlrpc_work_async_args
*args
;
3012 return ERR_PTR(-EINVAL
);
3014 /* copy some code from deprecated fakereq. */
3015 req
= ptlrpc_request_cache_alloc(GFP_NOFS
);
3017 CERROR("ptlrpc: run out of memory!\n");
3018 return ERR_PTR(-ENOMEM
);
3021 req
->rq_send_state
= LUSTRE_IMP_FULL
;
3022 req
->rq_type
= PTL_RPC_MSG_REQUEST
;
3023 req
->rq_import
= class_import_get(imp
);
3024 req
->rq_export
= NULL
;
3025 req
->rq_interpret_reply
= work_interpreter
;
3026 /* don't want reply */
3027 req
->rq_receiving_reply
= 0;
3028 req
->rq_req_unlink
= req
->rq_reply_unlink
= 0;
3029 req
->rq_no_delay
= req
->rq_no_resend
= 1;
3030 req
->rq_pill
.rc_fmt
= (void *)&worker_format
;
3032 spin_lock_init(&req
->rq_lock
);
3033 INIT_LIST_HEAD(&req
->rq_list
);
3034 INIT_LIST_HEAD(&req
->rq_replay_list
);
3035 INIT_LIST_HEAD(&req
->rq_set_chain
);
3036 INIT_LIST_HEAD(&req
->rq_history_list
);
3037 INIT_LIST_HEAD(&req
->rq_exp_list
);
3038 init_waitqueue_head(&req
->rq_reply_waitq
);
3039 init_waitqueue_head(&req
->rq_set_waitq
);
3040 atomic_set(&req
->rq_refcount
, 1);
3042 CLASSERT(sizeof(*args
) <= sizeof(req
->rq_async_args
));
3043 args
= ptlrpc_req_async_args(req
);
3045 args
->cbdata
= cbdata
;
3049 EXPORT_SYMBOL(ptlrpcd_alloc_work
);
3051 void ptlrpcd_destroy_work(void *handler
)
3053 struct ptlrpc_request
*req
= handler
;
3056 ptlrpc_req_finished(req
);
3058 EXPORT_SYMBOL(ptlrpcd_destroy_work
);
3060 int ptlrpcd_queue_work(void *handler
)
3062 struct ptlrpc_request
*req
= handler
;
3065 * Check if the req is already being queued.
3067 * Here comes a trick: it lacks a way of checking if a req is being
3068 * processed reliably in ptlrpc. Here I have to use refcount of req
3069 * for this purpose. This is okay because the caller should use this
3070 * req as opaque data. - Jinshan
3072 LASSERT(atomic_read(&req
->rq_refcount
) > 0);
3073 if (atomic_inc_return(&req
->rq_refcount
) == 2)
3074 ptlrpcd_add_work_req(req
);
3077 EXPORT_SYMBOL(ptlrpcd_queue_work
);