staging/lustre/ptlrpc: Adjust comments to better conform to coding style
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / recover.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ptlrpc/recover.c
37 *
38 * Author: Mike Shaver <shaver@clusterfs.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_RPC
42 #include "../../include/linux/libcfs/libcfs.h"
43
44 #include "../include/obd_support.h"
45 #include "../include/lustre_ha.h"
46 #include "../include/lustre_net.h"
47 #include "../include/lustre_import.h"
48 #include "../include/lustre_export.h"
49 #include "../include/obd.h"
50 #include "../include/obd_class.h"
51 #include <linux/list.h>
52
53 #include "ptlrpc_internal.h"
54
55 /**
56 * Start recovery on disconnected import.
57 * This is done by just attempting a connect
58 */
59 void ptlrpc_initiate_recovery(struct obd_import *imp)
60 {
61 CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
62 ptlrpc_connect_import(imp);
63 }
64
65 /**
66 * Identify what request from replay list needs to be replayed next
67 * (based on what we have already replayed) and send it to server.
68 */
69 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
70 {
71 int rc = 0;
72 struct list_head *tmp, *pos;
73 struct ptlrpc_request *req = NULL;
74 __u64 last_transno;
75
76 *inflight = 0;
77
78 /* It might have committed some after we last spoke, so make sure we
79 * get rid of them now.
80 */
81 spin_lock(&imp->imp_lock);
82 imp->imp_last_transno_checked = 0;
83 ptlrpc_free_committed(imp);
84 last_transno = imp->imp_last_replay_transno;
85 spin_unlock(&imp->imp_lock);
86
87 CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
88 imp, obd2cli_tgt(imp->imp_obd),
89 imp->imp_peer_committed_transno, last_transno);
90
91 /* Do I need to hold a lock across this iteration? We shouldn't be
92 * racing with any additions to the list, because we're in recovery
93 * and are therefore not processing additional requests to add. Calls
94 * to ptlrpc_free_committed might commit requests, but nothing "newer"
95 * than the one we're replaying (it can't be committed until it's
96 * replayed, and we're doing that here). l_f_e_safe protects against
97 * problems with the current request being committed, in the unlikely
98 * event of that race. So, in conclusion, I think that it's safe to
99 * perform this list-walk without the imp_lock held.
100 *
101 * But, the {mdc,osc}_replay_open callbacks both iterate
102 * request lists, and have comments saying they assume the
103 * imp_lock is being held by ptlrpc_replay, but it's not. it's
104 * just a little race...
105 */
106
107 /* Replay all the committed open requests on committed_list first */
108 if (!list_empty(&imp->imp_committed_list)) {
109 tmp = imp->imp_committed_list.prev;
110 req = list_entry(tmp, struct ptlrpc_request,
111 rq_replay_list);
112
113 /* The last request on committed_list hasn't been replayed */
114 if (req->rq_transno > last_transno) {
115 /* Since the imp_committed_list is immutable before
116 * all of it's requests being replayed, it's safe to
117 * use a cursor to accelerate the search
118 */
119 imp->imp_replay_cursor = imp->imp_replay_cursor->next;
120
121 while (imp->imp_replay_cursor !=
122 &imp->imp_committed_list) {
123 req = list_entry(imp->imp_replay_cursor,
124 struct ptlrpc_request,
125 rq_replay_list);
126 if (req->rq_transno > last_transno)
127 break;
128
129 req = NULL;
130 imp->imp_replay_cursor =
131 imp->imp_replay_cursor->next;
132 }
133 } else {
134 /* All requests on committed_list have been replayed */
135 imp->imp_replay_cursor = &imp->imp_committed_list;
136 req = NULL;
137 }
138 }
139
140 /* All the requests in committed list have been replayed, let's replay
141 * the imp_replay_list
142 */
143 if (!req) {
144 list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
145 req = list_entry(tmp, struct ptlrpc_request,
146 rq_replay_list);
147
148 if (req->rq_transno > last_transno)
149 break;
150 req = NULL;
151 }
152 }
153
154 /* If need to resend the last sent transno (because a reconnect
155 * has occurred), then stop on the matching req and send it again.
156 * If, however, the last sent transno has been committed then we
157 * continue replay from the next request.
158 */
159 if (req && imp->imp_resend_replay)
160 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
161
162 spin_lock(&imp->imp_lock);
163 imp->imp_resend_replay = 0;
164 spin_unlock(&imp->imp_lock);
165
166 if (req) {
167 rc = ptlrpc_replay_req(req);
168 if (rc) {
169 CERROR("recovery replay error %d for req %llu\n",
170 rc, req->rq_xid);
171 return rc;
172 }
173 *inflight = 1;
174 }
175 return rc;
176 }
177
178 /**
179 * Schedule resending of request on sending_list. This is done after
180 * we completed replaying of requests and locks.
181 */
182 int ptlrpc_resend(struct obd_import *imp)
183 {
184 struct ptlrpc_request *req, *next;
185
186 /* As long as we're in recovery, nothing should be added to the sending
187 * list, so we don't need to hold the lock during this iteration and
188 * resend process.
189 */
190 /* Well... what if lctl recover is called twice at the same time?
191 */
192 spin_lock(&imp->imp_lock);
193 if (imp->imp_state != LUSTRE_IMP_RECOVER) {
194 spin_unlock(&imp->imp_lock);
195 return -1;
196 }
197
198 list_for_each_entry_safe(req, next, &imp->imp_sending_list,
199 rq_list) {
200 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
201 "req %p bad\n", req);
202 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
203 if (!ptlrpc_no_resend(req))
204 ptlrpc_resend_req(req);
205 }
206 spin_unlock(&imp->imp_lock);
207
208 return 0;
209 }
210 EXPORT_SYMBOL(ptlrpc_resend);
211
212 /**
213 * Go through all requests in delayed list and wake their threads
214 * for resending
215 */
216 void ptlrpc_wake_delayed(struct obd_import *imp)
217 {
218 struct list_head *tmp, *pos;
219 struct ptlrpc_request *req;
220
221 spin_lock(&imp->imp_lock);
222 list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
223 req = list_entry(tmp, struct ptlrpc_request, rq_list);
224
225 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
226 ptlrpc_client_wake_req(req);
227 }
228 spin_unlock(&imp->imp_lock);
229 }
230 EXPORT_SYMBOL(ptlrpc_wake_delayed);
231
232 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
233 {
234 struct obd_import *imp = failed_req->rq_import;
235
236 CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
237 imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
238 imp->imp_connection->c_remote_uuid.uuid);
239
240 if (ptlrpc_set_import_discon(imp,
241 lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
242 if (!imp->imp_replayable) {
243 CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
244 obd2cli_tgt(imp->imp_obd),
245 imp->imp_connection->c_remote_uuid.uuid,
246 imp->imp_obd->obd_name);
247 ptlrpc_deactivate_import(imp);
248 }
249 /* to control recovery via lctl {disable|enable}_recovery */
250 if (imp->imp_deactive == 0)
251 ptlrpc_connect_import(imp);
252 }
253
254 /* Wait for recovery to complete and resend. If evicted, then
255 * this request will be errored out later.
256 */
257 spin_lock(&failed_req->rq_lock);
258 if (!failed_req->rq_no_resend)
259 failed_req->rq_resend = 1;
260 spin_unlock(&failed_req->rq_lock);
261 }
262
263 /**
264 * Administratively active/deactive a client.
265 * This should only be called by the ioctl interface, currently
266 * - the lctl deactivate and activate commands
267 * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active
268 * - client umount -f (ll_umount_begin)
269 */
270 int ptlrpc_set_import_active(struct obd_import *imp, int active)
271 {
272 struct obd_device *obd = imp->imp_obd;
273 int rc = 0;
274
275 LASSERT(obd);
276
277 /* When deactivating, mark import invalid, and abort in-flight
278 * requests.
279 */
280 if (!active) {
281 LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
282 obd2cli_tgt(imp->imp_obd));
283
284 /* set before invalidate to avoid messages about imp_inval
285 * set without imp_deactive in ptlrpc_import_delay_req
286 */
287 spin_lock(&imp->imp_lock);
288 imp->imp_deactive = 1;
289 spin_unlock(&imp->imp_lock);
290
291 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
292
293 ptlrpc_invalidate_import(imp);
294 }
295
296 /* When activating, mark import valid, and attempt recovery */
297 if (active) {
298 CDEBUG(D_HA, "setting import %s VALID\n",
299 obd2cli_tgt(imp->imp_obd));
300
301 spin_lock(&imp->imp_lock);
302 imp->imp_deactive = 0;
303 spin_unlock(&imp->imp_lock);
304 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
305
306 rc = ptlrpc_recover_import(imp, NULL, 0);
307 }
308
309 return rc;
310 }
311 EXPORT_SYMBOL(ptlrpc_set_import_active);
312
313 /* Attempt to reconnect an import */
314 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
315 {
316 int rc = 0;
317
318 spin_lock(&imp->imp_lock);
319 if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
320 atomic_read(&imp->imp_inval_count))
321 rc = -EINVAL;
322 spin_unlock(&imp->imp_lock);
323 if (rc)
324 goto out;
325
326 /* force import to be disconnected. */
327 ptlrpc_set_import_discon(imp, 0);
328
329 if (new_uuid) {
330 struct obd_uuid uuid;
331
332 /* intruct import to use new uuid */
333 obd_str2uuid(&uuid, new_uuid);
334 rc = import_set_conn_priority(imp, &uuid);
335 if (rc)
336 goto out;
337 }
338
339 /* Check if reconnect is already in progress */
340 spin_lock(&imp->imp_lock);
341 if (imp->imp_state != LUSTRE_IMP_DISCON) {
342 imp->imp_force_verify = 1;
343 rc = -EALREADY;
344 }
345 spin_unlock(&imp->imp_lock);
346 if (rc)
347 goto out;
348
349 rc = ptlrpc_connect_import(imp);
350 if (rc)
351 goto out;
352
353 if (!async) {
354 struct l_wait_info lwi;
355 int secs = cfs_time_seconds(obd_timeout);
356
357 CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
358 obd2cli_tgt(imp->imp_obd), secs);
359
360 lwi = LWI_TIMEOUT(secs, NULL, NULL);
361 rc = l_wait_event(imp->imp_recovery_waitq,
362 !ptlrpc_import_in_recovery(imp), &lwi);
363 CDEBUG(D_HA, "%s: recovery finished\n",
364 obd2cli_tgt(imp->imp_obd));
365 }
366
367 out:
368 return rc;
369 }
370 EXPORT_SYMBOL(ptlrpc_recover_import);
371
372 int ptlrpc_import_in_recovery(struct obd_import *imp)
373 {
374 int in_recovery = 1;
375
376 spin_lock(&imp->imp_lock);
377 if (imp->imp_state == LUSTRE_IMP_FULL ||
378 imp->imp_state == LUSTRE_IMP_CLOSED ||
379 imp->imp_state == LUSTRE_IMP_DISCON ||
380 imp->imp_obd->obd_no_recov)
381 in_recovery = 0;
382 spin_unlock(&imp->imp_lock);
383
384 return in_recovery;
385 }
This page took 0.038697 seconds and 5 git commands to generate.