Merge git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia-2.6
[deliverable/linux.git] / fs / dlm / recoverd.c
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6 **
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "dir.h"
18 #include "ast.h"
19 #include "recover.h"
20 #include "lowcomms.h"
21 #include "lock.h"
22 #include "requestqueue.h"
23 #include "recoverd.h"
24
25
26 /* If the start for which we're re-enabling locking (seq) has been superseded
27 by a newer stop (ls_recover_seq), we need to leave locking disabled.
28
29 We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
30 locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
31 enables locking and clears the requestqueue between a and b. */
32
33 static int enable_locking(struct dlm_ls *ls, uint64_t seq)
34 {
35 int error = -EINTR;
36
37 down_write(&ls->ls_recv_active);
38
39 spin_lock(&ls->ls_recover_lock);
40 if (ls->ls_recover_seq == seq) {
41 set_bit(LSFL_RUNNING, &ls->ls_flags);
42 /* unblocks processes waiting to enter the dlm */
43 up_write(&ls->ls_in_recovery);
44 error = 0;
45 }
46 spin_unlock(&ls->ls_recover_lock);
47
48 up_write(&ls->ls_recv_active);
49 return error;
50 }
51
52 static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
53 {
54 unsigned long start;
55 int error, neg = 0;
56
57 log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
58
59 mutex_lock(&ls->ls_recoverd_active);
60
61 dlm_callback_suspend(ls);
62
63 /*
64 * Free non-master tossed rsb's. Master rsb's are kept on toss
65 * list and put on root list to be included in resdir recovery.
66 */
67
68 dlm_clear_toss_list(ls);
69
70 /*
71 * This list of root rsb's will be the basis of most of the recovery
72 * routines.
73 */
74
75 dlm_create_root_list(ls);
76
77 /*
78 * Add or remove nodes from the lockspace's ls_nodes list.
79 * Also waits for all nodes to complete dlm_recover_members.
80 */
81
82 error = dlm_recover_members(ls, rv, &neg);
83 if (error) {
84 log_debug(ls, "recover_members failed %d", error);
85 goto fail;
86 }
87 start = jiffies;
88
89 /*
90 * Rebuild our own share of the directory by collecting from all other
91 * nodes their master rsb names that hash to us.
92 */
93
94 error = dlm_recover_directory(ls);
95 if (error) {
96 log_debug(ls, "recover_directory failed %d", error);
97 goto fail;
98 }
99
100 /*
101 * Wait for all nodes to complete directory rebuild.
102 */
103
104 error = dlm_recover_directory_wait(ls);
105 if (error) {
106 log_debug(ls, "recover_directory_wait failed %d", error);
107 goto fail;
108 }
109
110 /*
111 * We may have outstanding operations that are waiting for a reply from
112 * a failed node. Mark these to be resent after recovery. Unlock and
113 * cancel ops can just be completed.
114 */
115
116 dlm_recover_waiters_pre(ls);
117
118 error = dlm_recovery_stopped(ls);
119 if (error)
120 goto fail;
121
122 if (neg || dlm_no_directory(ls)) {
123 /*
124 * Clear lkb's for departed nodes.
125 */
126
127 dlm_purge_locks(ls);
128
129 /*
130 * Get new master nodeid's for rsb's that were mastered on
131 * departed nodes.
132 */
133
134 error = dlm_recover_masters(ls);
135 if (error) {
136 log_debug(ls, "recover_masters failed %d", error);
137 goto fail;
138 }
139
140 /*
141 * Send our locks on remastered rsb's to the new masters.
142 */
143
144 error = dlm_recover_locks(ls);
145 if (error) {
146 log_debug(ls, "recover_locks failed %d", error);
147 goto fail;
148 }
149
150 error = dlm_recover_locks_wait(ls);
151 if (error) {
152 log_debug(ls, "recover_locks_wait failed %d", error);
153 goto fail;
154 }
155
156 /*
157 * Finalize state in master rsb's now that all locks can be
158 * checked. This includes conversion resolution and lvb
159 * settings.
160 */
161
162 dlm_recover_rsbs(ls);
163 } else {
164 /*
165 * Other lockspace members may be going through the "neg" steps
166 * while also adding us to the lockspace, in which case they'll
167 * be doing the recover_locks (RS_LOCKS) barrier.
168 */
169 dlm_set_recover_status(ls, DLM_RS_LOCKS);
170
171 error = dlm_recover_locks_wait(ls);
172 if (error) {
173 log_debug(ls, "recover_locks_wait failed %d", error);
174 goto fail;
175 }
176 }
177
178 dlm_release_root_list(ls);
179
180 /*
181 * Purge directory-related requests that are saved in requestqueue.
182 * All dir requests from before recovery are invalid now due to the dir
183 * rebuild and will be resent by the requesting nodes.
184 */
185
186 dlm_purge_requestqueue(ls);
187
188 dlm_set_recover_status(ls, DLM_RS_DONE);
189 error = dlm_recover_done_wait(ls);
190 if (error) {
191 log_debug(ls, "recover_done_wait failed %d", error);
192 goto fail;
193 }
194
195 dlm_clear_members_gone(ls);
196
197 dlm_adjust_timeouts(ls);
198
199 dlm_callback_resume(ls);
200
201 error = enable_locking(ls, rv->seq);
202 if (error) {
203 log_debug(ls, "enable_locking failed %d", error);
204 goto fail;
205 }
206
207 error = dlm_process_requestqueue(ls);
208 if (error) {
209 log_debug(ls, "process_requestqueue failed %d", error);
210 goto fail;
211 }
212
213 error = dlm_recover_waiters_post(ls);
214 if (error) {
215 log_debug(ls, "recover_waiters_post failed %d", error);
216 goto fail;
217 }
218
219 dlm_grant_after_purge(ls);
220
221 log_debug(ls, "recover %llx done: %u ms",
222 (unsigned long long)rv->seq,
223 jiffies_to_msecs(jiffies - start));
224 mutex_unlock(&ls->ls_recoverd_active);
225
226 return 0;
227
228 fail:
229 dlm_release_root_list(ls);
230 log_debug(ls, "recover %llx error %d",
231 (unsigned long long)rv->seq, error);
232 mutex_unlock(&ls->ls_recoverd_active);
233 return error;
234 }
235
236 /* The dlm_ls_start() that created the rv we take here may already have been
237 stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
238 flag set. */
239
240 static void do_ls_recovery(struct dlm_ls *ls)
241 {
242 struct dlm_recover *rv = NULL;
243
244 spin_lock(&ls->ls_recover_lock);
245 rv = ls->ls_recover_args;
246 ls->ls_recover_args = NULL;
247 if (rv && ls->ls_recover_seq == rv->seq)
248 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
249 spin_unlock(&ls->ls_recover_lock);
250
251 if (rv) {
252 ls_recover(ls, rv);
253 kfree(rv->nodeids);
254 kfree(rv->new);
255 kfree(rv);
256 }
257 }
258
259 static int dlm_recoverd(void *arg)
260 {
261 struct dlm_ls *ls;
262
263 ls = dlm_find_lockspace_local(arg);
264 if (!ls) {
265 log_print("dlm_recoverd: no lockspace %p", arg);
266 return -1;
267 }
268
269 while (!kthread_should_stop()) {
270 set_current_state(TASK_INTERRUPTIBLE);
271 if (!test_bit(LSFL_WORK, &ls->ls_flags))
272 schedule();
273 set_current_state(TASK_RUNNING);
274
275 if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
276 do_ls_recovery(ls);
277 }
278
279 dlm_put_lockspace(ls);
280 return 0;
281 }
282
283 void dlm_recoverd_kick(struct dlm_ls *ls)
284 {
285 set_bit(LSFL_WORK, &ls->ls_flags);
286 wake_up_process(ls->ls_recoverd_task);
287 }
288
289 int dlm_recoverd_start(struct dlm_ls *ls)
290 {
291 struct task_struct *p;
292 int error = 0;
293
294 p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
295 if (IS_ERR(p))
296 error = PTR_ERR(p);
297 else
298 ls->ls_recoverd_task = p;
299 return error;
300 }
301
302 void dlm_recoverd_stop(struct dlm_ls *ls)
303 {
304 kthread_stop(ls->ls_recoverd_task);
305 }
306
307 void dlm_recoverd_suspend(struct dlm_ls *ls)
308 {
309 wake_up(&ls->ls_wait_general);
310 mutex_lock(&ls->ls_recoverd_active);
311 }
312
313 void dlm_recoverd_resume(struct dlm_ls *ls)
314 {
315 mutex_unlock(&ls->ls_recoverd_active);
316 }
317
This page took 0.036903 seconds and 5 git commands to generate.