Merge tag 'soc-exynos5420-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene...
[deliverable/linux.git] / fs / ocfs2 / dlm / dlmrecovery.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
54
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56
57 static int dlm_recovery_thread(void *data);
58 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
59 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
60 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
61 static int dlm_do_recovery(struct dlm_ctxt *dlm);
62
63 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
64 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
65 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
67 u8 request_from, u8 dead_node);
68 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
69
70 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
71 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
72 const char *lockname, int namelen,
73 int total_locks, u64 cookie,
74 u8 flags, u8 master);
75 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
76 struct dlm_migratable_lockres *mres,
77 u8 send_to,
78 struct dlm_lock_resource *res,
79 int total_locks);
80 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
81 struct dlm_lock_resource *res,
82 struct dlm_migratable_lockres *mres);
83 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
84 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
85 u8 dead_node, u8 send_to);
86 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
87 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
88 struct list_head *list, u8 dead_node);
89 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
90 u8 dead_node, u8 new_master);
91 static void dlm_reco_ast(void *astdata);
92 static void dlm_reco_bast(void *astdata, int blocked_type);
93 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
94 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
95 void *data);
96 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
97 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
98 struct dlm_lock_resource *res,
99 u8 *real_master);
100
101 static u64 dlm_get_next_mig_cookie(void);
102
103 static DEFINE_SPINLOCK(dlm_reco_state_lock);
104 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
105 static u64 dlm_mig_cookie = 1;
106
107 static u64 dlm_get_next_mig_cookie(void)
108 {
109 u64 c;
110 spin_lock(&dlm_mig_cookie_lock);
111 c = dlm_mig_cookie;
112 if (dlm_mig_cookie == (~0ULL))
113 dlm_mig_cookie = 1;
114 else
115 dlm_mig_cookie++;
116 spin_unlock(&dlm_mig_cookie_lock);
117 return c;
118 }
119
120 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
121 u8 dead_node)
122 {
123 assert_spin_locked(&dlm->spinlock);
124 if (dlm->reco.dead_node != dead_node)
125 mlog(0, "%s: changing dead_node from %u to %u\n",
126 dlm->name, dlm->reco.dead_node, dead_node);
127 dlm->reco.dead_node = dead_node;
128 }
129
130 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
131 u8 master)
132 {
133 assert_spin_locked(&dlm->spinlock);
134 mlog(0, "%s: changing new_master from %u to %u\n",
135 dlm->name, dlm->reco.new_master, master);
136 dlm->reco.new_master = master;
137 }
138
139 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
140 {
141 assert_spin_locked(&dlm->spinlock);
142 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
143 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
144 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
145 }
146
147 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
148 {
149 spin_lock(&dlm->spinlock);
150 __dlm_reset_recovery(dlm);
151 spin_unlock(&dlm->spinlock);
152 }
153
154 /* Worker function used during recovery. */
155 void dlm_dispatch_work(struct work_struct *work)
156 {
157 struct dlm_ctxt *dlm =
158 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list);
160 struct dlm_work_item *item, *next;
161 dlm_workfunc_t *workfunc;
162 int tot=0;
163
164 spin_lock(&dlm->work_lock);
165 list_splice_init(&dlm->work_list, &tmp_list);
166 spin_unlock(&dlm->work_lock);
167
168 list_for_each_entry(item, &tmp_list, list) {
169 tot++;
170 }
171 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
172
173 list_for_each_entry_safe(item, next, &tmp_list, list) {
174 workfunc = item->func;
175 list_del_init(&item->list);
176
177 /* already have ref on dlm to avoid having
178 * it disappear. just double-check. */
179 BUG_ON(item->dlm != dlm);
180
181 /* this is allowed to sleep and
182 * call network stuff */
183 workfunc(item, item->data);
184
185 dlm_put(dlm);
186 kfree(item);
187 }
188 }
189
190 /*
191 * RECOVERY THREAD
192 */
193
194 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
195 {
196 /* wake the recovery thread
197 * this will wake the reco thread in one of three places
198 * 1) sleeping with no recovery happening
199 * 2) sleeping with recovery mastered elsewhere
200 * 3) recovery mastered here, waiting on reco data */
201
202 wake_up(&dlm->dlm_reco_thread_wq);
203 }
204
205 /* Launch the recovery thread */
206 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
207 {
208 mlog(0, "starting dlm recovery thread...\n");
209
210 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
211 "dlm_reco_thread");
212 if (IS_ERR(dlm->dlm_reco_thread_task)) {
213 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
214 dlm->dlm_reco_thread_task = NULL;
215 return -EINVAL;
216 }
217
218 return 0;
219 }
220
221 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
222 {
223 if (dlm->dlm_reco_thread_task) {
224 mlog(0, "waiting for dlm recovery thread to exit\n");
225 kthread_stop(dlm->dlm_reco_thread_task);
226 dlm->dlm_reco_thread_task = NULL;
227 }
228 }
229
230
231
232 /*
233 * this is lame, but here's how recovery works...
234 * 1) all recovery threads cluster wide will work on recovering
235 * ONE node at a time
236 * 2) negotiate who will take over all the locks for the dead node.
237 * thats right... ALL the locks.
238 * 3) once a new master is chosen, everyone scans all locks
239 * and moves aside those mastered by the dead guy
240 * 4) each of these locks should be locked until recovery is done
241 * 5) the new master collects up all of secondary lock queue info
242 * one lock at a time, forcing each node to communicate back
243 * before continuing
244 * 6) each secondary lock queue responds with the full known lock info
245 * 7) once the new master has run all its locks, it sends a ALLDONE!
246 * message to everyone
247 * 8) upon receiving this message, the secondary queue node unlocks
248 * and responds to the ALLDONE
249 * 9) once the new master gets responses from everyone, he unlocks
250 * everything and recovery for this dead node is done
251 *10) go back to 2) while there are still dead nodes
252 *
253 */
254
255 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
256 {
257 struct dlm_reco_node_data *ndata;
258 struct dlm_lock_resource *res;
259
260 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
261 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
262 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
263 dlm->reco.dead_node, dlm->reco.new_master);
264
265 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
266 char *st = "unknown";
267 switch (ndata->state) {
268 case DLM_RECO_NODE_DATA_INIT:
269 st = "init";
270 break;
271 case DLM_RECO_NODE_DATA_REQUESTING:
272 st = "requesting";
273 break;
274 case DLM_RECO_NODE_DATA_DEAD:
275 st = "dead";
276 break;
277 case DLM_RECO_NODE_DATA_RECEIVING:
278 st = "receiving";
279 break;
280 case DLM_RECO_NODE_DATA_REQUESTED:
281 st = "requested";
282 break;
283 case DLM_RECO_NODE_DATA_DONE:
284 st = "done";
285 break;
286 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
287 st = "finalize-sent";
288 break;
289 default:
290 st = "bad";
291 break;
292 }
293 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
294 dlm->name, ndata->node_num, st);
295 }
296 list_for_each_entry(res, &dlm->reco.resources, recovering) {
297 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
298 dlm->name, res->lockname.len, res->lockname.name);
299 }
300 }
301
302 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
303
304 static int dlm_recovery_thread(void *data)
305 {
306 int status;
307 struct dlm_ctxt *dlm = data;
308 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
309
310 mlog(0, "dlm thread running for %s...\n", dlm->name);
311
312 while (!kthread_should_stop()) {
313 if (dlm_domain_fully_joined(dlm)) {
314 status = dlm_do_recovery(dlm);
315 if (status == -EAGAIN) {
316 /* do not sleep, recheck immediately. */
317 continue;
318 }
319 if (status < 0)
320 mlog_errno(status);
321 }
322
323 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
324 kthread_should_stop(),
325 timeout);
326 }
327
328 mlog(0, "quitting DLM recovery thread\n");
329 return 0;
330 }
331
332 /* returns true when the recovery master has contacted us */
333 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
334 {
335 int ready;
336 spin_lock(&dlm->spinlock);
337 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
338 spin_unlock(&dlm->spinlock);
339 return ready;
340 }
341
342 /* returns true if node is no longer in the domain
343 * could be dead or just not joined */
344 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
345 {
346 int dead;
347 spin_lock(&dlm->spinlock);
348 dead = !test_bit(node, dlm->domain_map);
349 spin_unlock(&dlm->spinlock);
350 return dead;
351 }
352
353 /* returns true if node is no longer in the domain
354 * could be dead or just not joined */
355 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
356 {
357 int recovered;
358 spin_lock(&dlm->spinlock);
359 recovered = !test_bit(node, dlm->recovery_map);
360 spin_unlock(&dlm->spinlock);
361 return recovered;
362 }
363
364
365 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
366 {
367 if (dlm_is_node_dead(dlm, node))
368 return;
369
370 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
371 "domain %s\n", node, dlm->name);
372
373 if (timeout)
374 wait_event_timeout(dlm->dlm_reco_thread_wq,
375 dlm_is_node_dead(dlm, node),
376 msecs_to_jiffies(timeout));
377 else
378 wait_event(dlm->dlm_reco_thread_wq,
379 dlm_is_node_dead(dlm, node));
380 }
381
382 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
383 {
384 if (dlm_is_node_recovered(dlm, node))
385 return;
386
387 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
388 "domain %s\n", node, dlm->name);
389
390 if (timeout)
391 wait_event_timeout(dlm->dlm_reco_thread_wq,
392 dlm_is_node_recovered(dlm, node),
393 msecs_to_jiffies(timeout));
394 else
395 wait_event(dlm->dlm_reco_thread_wq,
396 dlm_is_node_recovered(dlm, node));
397 }
398
399 /* callers of the top-level api calls (dlmlock/dlmunlock) should
400 * block on the dlm->reco.event when recovery is in progress.
401 * the dlm recovery thread will set this state when it begins
402 * recovering a dead node (as the new master or not) and clear
403 * the state and wake as soon as all affected lock resources have
404 * been marked with the RECOVERY flag */
405 static int dlm_in_recovery(struct dlm_ctxt *dlm)
406 {
407 int in_recovery;
408 spin_lock(&dlm->spinlock);
409 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
410 spin_unlock(&dlm->spinlock);
411 return in_recovery;
412 }
413
414
415 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
416 {
417 if (dlm_in_recovery(dlm)) {
418 mlog(0, "%s: reco thread %d in recovery: "
419 "state=%d, master=%u, dead=%u\n",
420 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
421 dlm->reco.state, dlm->reco.new_master,
422 dlm->reco.dead_node);
423 }
424 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
425 }
426
427 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
428 {
429 spin_lock(&dlm->spinlock);
430 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
431 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
432 dlm->name, dlm->reco.dead_node);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock);
435 }
436
437 static void dlm_end_recovery(struct dlm_ctxt *dlm)
438 {
439 spin_lock(&dlm->spinlock);
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock);
443 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
444 wake_up(&dlm->reco.event);
445 }
446
447 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
448 {
449 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
450 "dead node %u in domain %s\n", dlm->reco.new_master,
451 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
452 dlm->reco.dead_node, dlm->name);
453 }
454
455 static int dlm_do_recovery(struct dlm_ctxt *dlm)
456 {
457 int status = 0;
458 int ret;
459
460 spin_lock(&dlm->spinlock);
461
462 /* check to see if the new master has died */
463 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
464 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
465 mlog(0, "new master %u died while recovering %u!\n",
466 dlm->reco.new_master, dlm->reco.dead_node);
467 /* unset the new_master, leave dead_node */
468 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
469 }
470
471 /* select a target to recover */
472 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
473 int bit;
474
475 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
476 if (bit >= O2NM_MAX_NODES || bit < 0)
477 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
478 else
479 dlm_set_reco_dead_node(dlm, bit);
480 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
481 /* BUG? */
482 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
483 dlm->reco.dead_node);
484 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
485 }
486
487 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
488 // mlog(0, "nothing to recover! sleeping now!\n");
489 spin_unlock(&dlm->spinlock);
490 /* return to main thread loop and sleep. */
491 return 0;
492 }
493 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
494 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
495 dlm->reco.dead_node);
496 spin_unlock(&dlm->spinlock);
497
498 /* take write barrier */
499 /* (stops the list reshuffling thread, proxy ast handling) */
500 dlm_begin_recovery(dlm);
501
502 if (dlm->reco.new_master == dlm->node_num)
503 goto master_here;
504
505 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
506 /* choose a new master, returns 0 if this node
507 * is the master, -EEXIST if it's another node.
508 * this does not return until a new master is chosen
509 * or recovery completes entirely. */
510 ret = dlm_pick_recovery_master(dlm);
511 if (!ret) {
512 /* already notified everyone. go. */
513 goto master_here;
514 }
515 mlog(0, "another node will master this recovery session.\n");
516 }
517
518 dlm_print_recovery_master(dlm);
519
520 /* it is safe to start everything back up here
521 * because all of the dead node's lock resources
522 * have been marked as in-recovery */
523 dlm_end_recovery(dlm);
524
525 /* sleep out in main dlm_recovery_thread loop. */
526 return 0;
527
528 master_here:
529 dlm_print_recovery_master(dlm);
530
531 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
532 if (status < 0) {
533 /* we should never hit this anymore */
534 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
535 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
536 /* yield a bit to allow any final network messages
537 * to get handled on remaining nodes */
538 msleep(100);
539 } else {
540 /* success! see if any other nodes need recovery */
541 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
542 dlm->name, dlm->reco.dead_node, dlm->node_num);
543 dlm_reset_recovery(dlm);
544 }
545 dlm_end_recovery(dlm);
546
547 /* continue and look for another dead node */
548 return -EAGAIN;
549 }
550
551 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
552 {
553 int status = 0;
554 struct dlm_reco_node_data *ndata;
555 int all_nodes_done;
556 int destroy = 0;
557 int pass = 0;
558
559 do {
560 /* we have become recovery master. there is no escaping
561 * this, so just keep trying until we get it. */
562 status = dlm_init_recovery_area(dlm, dead_node);
563 if (status < 0) {
564 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
565 "retrying\n", dlm->name);
566 msleep(1000);
567 }
568 } while (status != 0);
569
570 /* safe to access the node data list without a lock, since this
571 * process is the only one to change the list */
572 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
575
576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
577 ndata->node_num);
578
579 if (ndata->node_num == dlm->node_num) {
580 ndata->state = DLM_RECO_NODE_DATA_DONE;
581 continue;
582 }
583
584 do {
585 status = dlm_request_all_locks(dlm, ndata->node_num,
586 dead_node);
587 if (status < 0) {
588 mlog_errno(status);
589 if (dlm_is_host_down(status)) {
590 /* node died, ignore it for recovery */
591 status = 0;
592 ndata->state = DLM_RECO_NODE_DATA_DEAD;
593 /* wait for the domain map to catch up
594 * with the network state. */
595 wait_event_timeout(dlm->dlm_reco_thread_wq,
596 dlm_is_node_dead(dlm,
597 ndata->node_num),
598 msecs_to_jiffies(1000));
599 mlog(0, "waited 1 sec for %u, "
600 "dead? %s\n", ndata->node_num,
601 dlm_is_node_dead(dlm, ndata->node_num) ?
602 "yes" : "no");
603 } else {
604 /* -ENOMEM on the other node */
605 mlog(0, "%s: node %u returned "
606 "%d during recovery, retrying "
607 "after a short wait\n",
608 dlm->name, ndata->node_num,
609 status);
610 msleep(100);
611 }
612 }
613 } while (status != 0);
614
615 spin_lock(&dlm_reco_state_lock);
616 switch (ndata->state) {
617 case DLM_RECO_NODE_DATA_INIT:
618 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
619 case DLM_RECO_NODE_DATA_REQUESTED:
620 BUG();
621 break;
622 case DLM_RECO_NODE_DATA_DEAD:
623 mlog(0, "node %u died after requesting "
624 "recovery info for node %u\n",
625 ndata->node_num, dead_node);
626 /* fine. don't need this node's info.
627 * continue without it. */
628 break;
629 case DLM_RECO_NODE_DATA_REQUESTING:
630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
631 mlog(0, "now receiving recovery data from "
632 "node %u for dead node %u\n",
633 ndata->node_num, dead_node);
634 break;
635 case DLM_RECO_NODE_DATA_RECEIVING:
636 mlog(0, "already receiving recovery data from "
637 "node %u for dead node %u\n",
638 ndata->node_num, dead_node);
639 break;
640 case DLM_RECO_NODE_DATA_DONE:
641 mlog(0, "already DONE receiving recovery data "
642 "from node %u for dead node %u\n",
643 ndata->node_num, dead_node);
644 break;
645 }
646 spin_unlock(&dlm_reco_state_lock);
647 }
648
649 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
650
651 /* nodes should be sending reco data now
652 * just need to wait */
653
654 while (1) {
655 /* check all the nodes now to see if we are
656 * done, or if anyone died */
657 all_nodes_done = 1;
658 spin_lock(&dlm_reco_state_lock);
659 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
660 mlog(0, "checking recovery state of node %u\n",
661 ndata->node_num);
662 switch (ndata->state) {
663 case DLM_RECO_NODE_DATA_INIT:
664 case DLM_RECO_NODE_DATA_REQUESTING:
665 mlog(ML_ERROR, "bad ndata state for "
666 "node %u: state=%d\n",
667 ndata->node_num, ndata->state);
668 BUG();
669 break;
670 case DLM_RECO_NODE_DATA_DEAD:
671 mlog(0, "node %u died after "
672 "requesting recovery info for "
673 "node %u\n", ndata->node_num,
674 dead_node);
675 break;
676 case DLM_RECO_NODE_DATA_RECEIVING:
677 case DLM_RECO_NODE_DATA_REQUESTED:
678 mlog(0, "%s: node %u still in state %s\n",
679 dlm->name, ndata->node_num,
680 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
681 "receiving" : "requested");
682 all_nodes_done = 0;
683 break;
684 case DLM_RECO_NODE_DATA_DONE:
685 mlog(0, "%s: node %u state is done\n",
686 dlm->name, ndata->node_num);
687 break;
688 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
689 mlog(0, "%s: node %u state is finalize\n",
690 dlm->name, ndata->node_num);
691 break;
692 }
693 }
694 spin_unlock(&dlm_reco_state_lock);
695
696 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
697 all_nodes_done?"yes":"no");
698 if (all_nodes_done) {
699 int ret;
700
701 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
702 * just send a finalize message to everyone and
703 * clean up */
704 mlog(0, "all nodes are done! send finalize\n");
705 ret = dlm_send_finalize_reco_message(dlm);
706 if (ret < 0)
707 mlog_errno(ret);
708
709 spin_lock(&dlm->spinlock);
710 dlm_finish_local_lockres_recovery(dlm, dead_node,
711 dlm->node_num);
712 spin_unlock(&dlm->spinlock);
713 mlog(0, "should be done with recovery!\n");
714
715 mlog(0, "finishing recovery of %s at %lu, "
716 "dead=%u, this=%u, new=%u\n", dlm->name,
717 jiffies, dlm->reco.dead_node,
718 dlm->node_num, dlm->reco.new_master);
719 destroy = 1;
720 status = 0;
721 /* rescan everything marked dirty along the way */
722 dlm_kick_thread(dlm, NULL);
723 break;
724 }
725 /* wait to be signalled, with periodic timeout
726 * to check for node death */
727 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
728 kthread_should_stop(),
729 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
730
731 }
732
733 if (destroy)
734 dlm_destroy_recovery_area(dlm, dead_node);
735
736 return status;
737 }
738
739 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
740 {
741 int num=0;
742 struct dlm_reco_node_data *ndata;
743
744 spin_lock(&dlm->spinlock);
745 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
746 /* nodes can only be removed (by dying) after dropping
747 * this lock, and death will be trapped later, so this should do */
748 spin_unlock(&dlm->spinlock);
749
750 while (1) {
751 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
752 if (num >= O2NM_MAX_NODES) {
753 break;
754 }
755 BUG_ON(num == dead_node);
756
757 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
758 if (!ndata) {
759 dlm_destroy_recovery_area(dlm, dead_node);
760 return -ENOMEM;
761 }
762 ndata->node_num = num;
763 ndata->state = DLM_RECO_NODE_DATA_INIT;
764 spin_lock(&dlm_reco_state_lock);
765 list_add_tail(&ndata->list, &dlm->reco.node_data);
766 spin_unlock(&dlm_reco_state_lock);
767 num++;
768 }
769
770 return 0;
771 }
772
773 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
774 {
775 struct dlm_reco_node_data *ndata, *next;
776 LIST_HEAD(tmplist);
777
778 spin_lock(&dlm_reco_state_lock);
779 list_splice_init(&dlm->reco.node_data, &tmplist);
780 spin_unlock(&dlm_reco_state_lock);
781
782 list_for_each_entry_safe(ndata, next, &tmplist, list) {
783 list_del_init(&ndata->list);
784 kfree(ndata);
785 }
786 }
787
788 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
789 u8 dead_node)
790 {
791 struct dlm_lock_request lr;
792 enum dlm_status ret;
793
794 mlog(0, "\n");
795
796
797 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
798 "to %u\n", dead_node, request_from);
799
800 memset(&lr, 0, sizeof(lr));
801 lr.node_idx = dlm->node_num;
802 lr.dead_node = dead_node;
803
804 // send message
805 ret = DLM_NOLOCKMGR;
806 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
807 &lr, sizeof(lr), request_from, NULL);
808
809 /* negative status is handled by caller */
810 if (ret < 0)
811 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
812 "to recover dead node %u\n", dlm->name, ret,
813 request_from, dead_node);
814 // return from here, then
815 // sleep until all received or error
816 return ret;
817
818 }
819
820 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
821 void **ret_data)
822 {
823 struct dlm_ctxt *dlm = data;
824 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
825 char *buf = NULL;
826 struct dlm_work_item *item = NULL;
827
828 if (!dlm_grab(dlm))
829 return -EINVAL;
830
831 if (lr->dead_node != dlm->reco.dead_node) {
832 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
833 "dead_node is %u\n", dlm->name, lr->node_idx,
834 lr->dead_node, dlm->reco.dead_node);
835 dlm_print_reco_node_status(dlm);
836 /* this is a hack */
837 dlm_put(dlm);
838 return -ENOMEM;
839 }
840 BUG_ON(lr->dead_node != dlm->reco.dead_node);
841
842 item = kzalloc(sizeof(*item), GFP_NOFS);
843 if (!item) {
844 dlm_put(dlm);
845 return -ENOMEM;
846 }
847
848 /* this will get freed by dlm_request_all_locks_worker */
849 buf = (char *) __get_free_page(GFP_NOFS);
850 if (!buf) {
851 kfree(item);
852 dlm_put(dlm);
853 return -ENOMEM;
854 }
855
856 /* queue up work for dlm_request_all_locks_worker */
857 dlm_grab(dlm); /* get an extra ref for the work item */
858 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
859 item->u.ral.reco_master = lr->node_idx;
860 item->u.ral.dead_node = lr->dead_node;
861 spin_lock(&dlm->work_lock);
862 list_add_tail(&item->list, &dlm->work_list);
863 spin_unlock(&dlm->work_lock);
864 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
865
866 dlm_put(dlm);
867 return 0;
868 }
869
870 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
871 {
872 struct dlm_migratable_lockres *mres;
873 struct dlm_lock_resource *res;
874 struct dlm_ctxt *dlm;
875 LIST_HEAD(resources);
876 int ret;
877 u8 dead_node, reco_master;
878 int skip_all_done = 0;
879
880 dlm = item->dlm;
881 dead_node = item->u.ral.dead_node;
882 reco_master = item->u.ral.reco_master;
883 mres = (struct dlm_migratable_lockres *)data;
884
885 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
886 dlm->name, dead_node, reco_master);
887
888 if (dead_node != dlm->reco.dead_node ||
889 reco_master != dlm->reco.new_master) {
890 /* worker could have been created before the recovery master
891 * died. if so, do not continue, but do not error. */
892 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
893 mlog(ML_NOTICE, "%s: will not send recovery state, "
894 "recovery master %u died, thread=(dead=%u,mas=%u)"
895 " current=(dead=%u,mas=%u)\n", dlm->name,
896 reco_master, dead_node, reco_master,
897 dlm->reco.dead_node, dlm->reco.new_master);
898 } else {
899 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
900 "master=%u), request(dead=%u, master=%u)\n",
901 dlm->name, dlm->reco.dead_node,
902 dlm->reco.new_master, dead_node, reco_master);
903 }
904 goto leave;
905 }
906
907 /* lock resources should have already been moved to the
908 * dlm->reco.resources list. now move items from that list
909 * to a temp list if the dead owner matches. note that the
910 * whole cluster recovers only one node at a time, so we
911 * can safely move UNKNOWN lock resources for each recovery
912 * session. */
913 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
914
915 /* now we can begin blasting lockreses without the dlm lock */
916
917 /* any errors returned will be due to the new_master dying,
918 * the dlm_reco_thread should detect this */
919 list_for_each_entry(res, &resources, recovering) {
920 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
921 DLM_MRES_RECOVERY);
922 if (ret < 0) {
923 mlog(ML_ERROR, "%s: node %u went down while sending "
924 "recovery state for dead node %u, ret=%d\n", dlm->name,
925 reco_master, dead_node, ret);
926 skip_all_done = 1;
927 break;
928 }
929 }
930
931 /* move the resources back to the list */
932 spin_lock(&dlm->spinlock);
933 list_splice_init(&resources, &dlm->reco.resources);
934 spin_unlock(&dlm->spinlock);
935
936 if (!skip_all_done) {
937 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
938 if (ret < 0) {
939 mlog(ML_ERROR, "%s: node %u went down while sending "
940 "recovery all-done for dead node %u, ret=%d\n",
941 dlm->name, reco_master, dead_node, ret);
942 }
943 }
944 leave:
945 free_page((unsigned long)data);
946 }
947
948
949 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
950 {
951 int ret, tmpret;
952 struct dlm_reco_data_done done_msg;
953
954 memset(&done_msg, 0, sizeof(done_msg));
955 done_msg.node_idx = dlm->node_num;
956 done_msg.dead_node = dead_node;
957 mlog(0, "sending DATA DONE message to %u, "
958 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
959 done_msg.dead_node);
960
961 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
962 sizeof(done_msg), send_to, &tmpret);
963 if (ret < 0) {
964 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
965 "to recover dead node %u\n", dlm->name, ret, send_to,
966 dead_node);
967 if (!dlm_is_host_down(ret)) {
968 BUG();
969 }
970 } else
971 ret = tmpret;
972 return ret;
973 }
974
975
976 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
977 void **ret_data)
978 {
979 struct dlm_ctxt *dlm = data;
980 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
981 struct dlm_reco_node_data *ndata = NULL;
982 int ret = -EINVAL;
983
984 if (!dlm_grab(dlm))
985 return -EINVAL;
986
987 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
988 "node_idx=%u, this node=%u\n", done->dead_node,
989 dlm->reco.dead_node, done->node_idx, dlm->node_num);
990
991 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
992 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
993 "node_idx=%u, this node=%u\n", done->dead_node,
994 dlm->reco.dead_node, done->node_idx, dlm->node_num);
995
996 spin_lock(&dlm_reco_state_lock);
997 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
998 if (ndata->node_num != done->node_idx)
999 continue;
1000
1001 switch (ndata->state) {
1002 /* should have moved beyond INIT but not to FINALIZE yet */
1003 case DLM_RECO_NODE_DATA_INIT:
1004 case DLM_RECO_NODE_DATA_DEAD:
1005 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1006 mlog(ML_ERROR, "bad ndata state for node %u:"
1007 " state=%d\n", ndata->node_num,
1008 ndata->state);
1009 BUG();
1010 break;
1011 /* these states are possible at this point, anywhere along
1012 * the line of recovery */
1013 case DLM_RECO_NODE_DATA_DONE:
1014 case DLM_RECO_NODE_DATA_RECEIVING:
1015 case DLM_RECO_NODE_DATA_REQUESTED:
1016 case DLM_RECO_NODE_DATA_REQUESTING:
1017 mlog(0, "node %u is DONE sending "
1018 "recovery data!\n",
1019 ndata->node_num);
1020
1021 ndata->state = DLM_RECO_NODE_DATA_DONE;
1022 ret = 0;
1023 break;
1024 }
1025 }
1026 spin_unlock(&dlm_reco_state_lock);
1027
1028 /* wake the recovery thread, some node is done */
1029 if (!ret)
1030 dlm_kick_recovery_thread(dlm);
1031
1032 if (ret < 0)
1033 mlog(ML_ERROR, "failed to find recovery node data for node "
1034 "%u\n", done->node_idx);
1035 dlm_put(dlm);
1036
1037 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1038 return ret;
1039 }
1040
1041 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1042 struct list_head *list,
1043 u8 dead_node)
1044 {
1045 struct dlm_lock_resource *res, *next;
1046 struct dlm_lock *lock;
1047
1048 spin_lock(&dlm->spinlock);
1049 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1050 /* always prune any $RECOVERY entries for dead nodes,
1051 * otherwise hangs can occur during later recovery */
1052 if (dlm_is_recovery_lock(res->lockname.name,
1053 res->lockname.len)) {
1054 spin_lock(&res->spinlock);
1055 list_for_each_entry(lock, &res->granted, list) {
1056 if (lock->ml.node == dead_node) {
1057 mlog(0, "AHA! there was "
1058 "a $RECOVERY lock for dead "
1059 "node %u (%s)!\n",
1060 dead_node, dlm->name);
1061 list_del_init(&lock->list);
1062 dlm_lock_put(lock);
1063 break;
1064 }
1065 }
1066 spin_unlock(&res->spinlock);
1067 continue;
1068 }
1069
1070 if (res->owner == dead_node) {
1071 mlog(0, "found lockres owned by dead node while "
1072 "doing recovery for node %u. sending it.\n",
1073 dead_node);
1074 list_move_tail(&res->recovering, list);
1075 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1076 mlog(0, "found UNKNOWN owner while doing recovery "
1077 "for node %u. sending it.\n", dead_node);
1078 list_move_tail(&res->recovering, list);
1079 }
1080 }
1081 spin_unlock(&dlm->spinlock);
1082 }
1083
1084 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1085 {
1086 int total_locks = 0;
1087 struct list_head *iter, *queue = &res->granted;
1088 int i;
1089
1090 for (i=0; i<3; i++) {
1091 list_for_each(iter, queue)
1092 total_locks++;
1093 queue++;
1094 }
1095 return total_locks;
1096 }
1097
1098
1099 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1100 struct dlm_migratable_lockres *mres,
1101 u8 send_to,
1102 struct dlm_lock_resource *res,
1103 int total_locks)
1104 {
1105 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1106 int mres_total_locks = be32_to_cpu(mres->total_locks);
1107 int sz, ret = 0, status = 0;
1108 u8 orig_flags = mres->flags,
1109 orig_master = mres->master;
1110
1111 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1112 if (!mres->num_locks)
1113 return 0;
1114
1115 sz = sizeof(struct dlm_migratable_lockres) +
1116 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1117
1118 /* add an all-done flag if we reached the last lock */
1119 orig_flags = mres->flags;
1120 BUG_ON(total_locks > mres_total_locks);
1121 if (total_locks == mres_total_locks)
1122 mres->flags |= DLM_MRES_ALL_DONE;
1123
1124 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1125 dlm->name, res->lockname.len, res->lockname.name,
1126 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1127 send_to);
1128
1129 /* send it */
1130 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1131 sz, send_to, &status);
1132 if (ret < 0) {
1133 /* XXX: negative status is not handled.
1134 * this will end up killing this node. */
1135 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1136 "node %u (%s)\n", dlm->name, mres->lockname_len,
1137 mres->lockname, ret, send_to,
1138 (orig_flags & DLM_MRES_MIGRATION ?
1139 "migration" : "recovery"));
1140 } else {
1141 /* might get an -ENOMEM back here */
1142 ret = status;
1143 if (ret < 0) {
1144 mlog_errno(ret);
1145
1146 if (ret == -EFAULT) {
1147 mlog(ML_ERROR, "node %u told me to kill "
1148 "myself!\n", send_to);
1149 BUG();
1150 }
1151 }
1152 }
1153
1154 /* zero and reinit the message buffer */
1155 dlm_init_migratable_lockres(mres, res->lockname.name,
1156 res->lockname.len, mres_total_locks,
1157 mig_cookie, orig_flags, orig_master);
1158 return ret;
1159 }
1160
1161 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1162 const char *lockname, int namelen,
1163 int total_locks, u64 cookie,
1164 u8 flags, u8 master)
1165 {
1166 /* mres here is one full page */
1167 clear_page(mres);
1168 mres->lockname_len = namelen;
1169 memcpy(mres->lockname, lockname, namelen);
1170 mres->num_locks = 0;
1171 mres->total_locks = cpu_to_be32(total_locks);
1172 mres->mig_cookie = cpu_to_be64(cookie);
1173 mres->flags = flags;
1174 mres->master = master;
1175 }
1176
1177 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1178 struct dlm_migratable_lockres *mres,
1179 int queue)
1180 {
1181 if (!lock->lksb)
1182 return;
1183
1184 /* Ignore lvb in all locks in the blocked list */
1185 if (queue == DLM_BLOCKED_LIST)
1186 return;
1187
1188 /* Only consider lvbs in locks with granted EX or PR lock levels */
1189 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1190 return;
1191
1192 if (dlm_lvb_is_empty(mres->lvb)) {
1193 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1194 return;
1195 }
1196
1197 /* Ensure the lvb copied for migration matches in other valid locks */
1198 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1199 return;
1200
1201 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1202 "node=%u\n",
1203 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1204 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1205 lock->lockres->lockname.len, lock->lockres->lockname.name,
1206 lock->ml.node);
1207 dlm_print_one_lock_resource(lock->lockres);
1208 BUG();
1209 }
1210
1211 /* returns 1 if this lock fills the network structure,
1212 * 0 otherwise */
1213 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1214 struct dlm_migratable_lockres *mres, int queue)
1215 {
1216 struct dlm_migratable_lock *ml;
1217 int lock_num = mres->num_locks;
1218
1219 ml = &(mres->ml[lock_num]);
1220 ml->cookie = lock->ml.cookie;
1221 ml->type = lock->ml.type;
1222 ml->convert_type = lock->ml.convert_type;
1223 ml->highest_blocked = lock->ml.highest_blocked;
1224 ml->list = queue;
1225 if (lock->lksb) {
1226 ml->flags = lock->lksb->flags;
1227 dlm_prepare_lvb_for_migration(lock, mres, queue);
1228 }
1229 ml->node = lock->ml.node;
1230 mres->num_locks++;
1231 /* we reached the max, send this network message */
1232 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1233 return 1;
1234 return 0;
1235 }
1236
1237 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1238 struct dlm_migratable_lockres *mres)
1239 {
1240 struct dlm_lock dummy;
1241 memset(&dummy, 0, sizeof(dummy));
1242 dummy.ml.cookie = 0;
1243 dummy.ml.type = LKM_IVMODE;
1244 dummy.ml.convert_type = LKM_IVMODE;
1245 dummy.ml.highest_blocked = LKM_IVMODE;
1246 dummy.lksb = NULL;
1247 dummy.ml.node = dlm->node_num;
1248 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1249 }
1250
1251 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1252 struct dlm_migratable_lock *ml,
1253 u8 *nodenum)
1254 {
1255 if (unlikely(ml->cookie == 0 &&
1256 ml->type == LKM_IVMODE &&
1257 ml->convert_type == LKM_IVMODE &&
1258 ml->highest_blocked == LKM_IVMODE &&
1259 ml->list == DLM_BLOCKED_LIST)) {
1260 *nodenum = ml->node;
1261 return 1;
1262 }
1263 return 0;
1264 }
1265
1266 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1267 struct dlm_migratable_lockres *mres,
1268 u8 send_to, u8 flags)
1269 {
1270 struct list_head *queue;
1271 int total_locks, i;
1272 u64 mig_cookie = 0;
1273 struct dlm_lock *lock;
1274 int ret = 0;
1275
1276 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1277
1278 mlog(0, "sending to %u\n", send_to);
1279
1280 total_locks = dlm_num_locks_in_lockres(res);
1281 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1282 /* rare, but possible */
1283 mlog(0, "argh. lockres has %d locks. this will "
1284 "require more than one network packet to "
1285 "migrate\n", total_locks);
1286 mig_cookie = dlm_get_next_mig_cookie();
1287 }
1288
1289 dlm_init_migratable_lockres(mres, res->lockname.name,
1290 res->lockname.len, total_locks,
1291 mig_cookie, flags, res->owner);
1292
1293 total_locks = 0;
1294 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1295 queue = dlm_list_idx_to_ptr(res, i);
1296 list_for_each_entry(lock, queue, list) {
1297 /* add another lock. */
1298 total_locks++;
1299 if (!dlm_add_lock_to_array(lock, mres, i))
1300 continue;
1301
1302 /* this filled the lock message,
1303 * we must send it immediately. */
1304 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1305 res, total_locks);
1306 if (ret < 0)
1307 goto error;
1308 }
1309 }
1310 if (total_locks == 0) {
1311 /* send a dummy lock to indicate a mastery reference only */
1312 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1313 dlm->name, res->lockname.len, res->lockname.name,
1314 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1315 "migration");
1316 dlm_add_dummy_lock(dlm, mres);
1317 }
1318 /* flush any remaining locks */
1319 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1320 if (ret < 0)
1321 goto error;
1322 return ret;
1323
1324 error:
1325 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1326 dlm->name, ret);
1327 if (!dlm_is_host_down(ret))
1328 BUG();
1329 mlog(0, "%s: node %u went down while sending %s "
1330 "lockres %.*s\n", dlm->name, send_to,
1331 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1332 res->lockname.len, res->lockname.name);
1333 return ret;
1334 }
1335
1336
1337
1338 /*
1339 * this message will contain no more than one page worth of
1340 * recovery data, and it will work on only one lockres.
1341 * there may be many locks in this page, and we may need to wait
1342 * for additional packets to complete all the locks (rare, but
1343 * possible).
1344 */
1345 /*
1346 * NOTE: the allocation error cases here are scary
1347 * we really cannot afford to fail an alloc in recovery
1348 * do we spin? returning an error only delays the problem really
1349 */
1350
1351 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1352 void **ret_data)
1353 {
1354 struct dlm_ctxt *dlm = data;
1355 struct dlm_migratable_lockres *mres =
1356 (struct dlm_migratable_lockres *)msg->buf;
1357 int ret = 0;
1358 u8 real_master;
1359 u8 extra_refs = 0;
1360 char *buf = NULL;
1361 struct dlm_work_item *item = NULL;
1362 struct dlm_lock_resource *res = NULL;
1363
1364 if (!dlm_grab(dlm))
1365 return -EINVAL;
1366
1367 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1368
1369 real_master = mres->master;
1370 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1371 /* cannot migrate a lockres with no master */
1372 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1373 }
1374
1375 mlog(0, "%s message received from node %u\n",
1376 (mres->flags & DLM_MRES_RECOVERY) ?
1377 "recovery" : "migration", mres->master);
1378 if (mres->flags & DLM_MRES_ALL_DONE)
1379 mlog(0, "all done flag. all lockres data received!\n");
1380
1381 ret = -ENOMEM;
1382 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1383 item = kzalloc(sizeof(*item), GFP_NOFS);
1384 if (!buf || !item)
1385 goto leave;
1386
1387 /* lookup the lock to see if we have a secondary queue for this
1388 * already... just add the locks in and this will have its owner
1389 * and RECOVERY flag changed when it completes. */
1390 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1391 if (res) {
1392 /* this will get a ref on res */
1393 /* mark it as recovering/migrating and hash it */
1394 spin_lock(&res->spinlock);
1395 if (mres->flags & DLM_MRES_RECOVERY) {
1396 res->state |= DLM_LOCK_RES_RECOVERING;
1397 } else {
1398 if (res->state & DLM_LOCK_RES_MIGRATING) {
1399 /* this is at least the second
1400 * lockres message */
1401 mlog(0, "lock %.*s is already migrating\n",
1402 mres->lockname_len,
1403 mres->lockname);
1404 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1405 /* caller should BUG */
1406 mlog(ML_ERROR, "node is attempting to migrate "
1407 "lock %.*s, but marked as recovering!\n",
1408 mres->lockname_len, mres->lockname);
1409 ret = -EFAULT;
1410 spin_unlock(&res->spinlock);
1411 dlm_lockres_put(res);
1412 goto leave;
1413 }
1414 res->state |= DLM_LOCK_RES_MIGRATING;
1415 }
1416 spin_unlock(&res->spinlock);
1417 } else {
1418 /* need to allocate, just like if it was
1419 * mastered here normally */
1420 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1421 if (!res)
1422 goto leave;
1423
1424 /* to match the ref that we would have gotten if
1425 * dlm_lookup_lockres had succeeded */
1426 dlm_lockres_get(res);
1427
1428 /* mark it as recovering/migrating and hash it */
1429 if (mres->flags & DLM_MRES_RECOVERY)
1430 res->state |= DLM_LOCK_RES_RECOVERING;
1431 else
1432 res->state |= DLM_LOCK_RES_MIGRATING;
1433
1434 spin_lock(&dlm->spinlock);
1435 __dlm_insert_lockres(dlm, res);
1436 spin_unlock(&dlm->spinlock);
1437
1438 /* Add an extra ref for this lock-less lockres lest the
1439 * dlm_thread purges it before we get the chance to add
1440 * locks to it */
1441 dlm_lockres_get(res);
1442
1443 /* There are three refs that need to be put.
1444 * 1. Taken above.
1445 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1446 * 3. dlm_lookup_lockres()
1447 * The first one is handled at the end of this function. The
1448 * other two are handled in the worker thread after locks have
1449 * been attached. Yes, we don't wait for purge time to match
1450 * kref_init. The lockres will still have atleast one ref
1451 * added because it is in the hash __dlm_insert_lockres() */
1452 extra_refs++;
1453
1454 /* now that the new lockres is inserted,
1455 * make it usable by other processes */
1456 spin_lock(&res->spinlock);
1457 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1458 spin_unlock(&res->spinlock);
1459 wake_up(&res->wq);
1460 }
1461
1462 /* at this point we have allocated everything we need,
1463 * and we have a hashed lockres with an extra ref and
1464 * the proper res->state flags. */
1465 ret = 0;
1466 spin_lock(&res->spinlock);
1467 /* drop this either when master requery finds a different master
1468 * or when a lock is added by the recovery worker */
1469 dlm_lockres_grab_inflight_ref(dlm, res);
1470 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1471 /* migration cannot have an unknown master */
1472 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1473 mlog(0, "recovery has passed me a lockres with an "
1474 "unknown owner.. will need to requery: "
1475 "%.*s\n", mres->lockname_len, mres->lockname);
1476 } else {
1477 /* take a reference now to pin the lockres, drop it
1478 * when locks are added in the worker */
1479 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1480 }
1481 spin_unlock(&res->spinlock);
1482
1483 /* queue up work for dlm_mig_lockres_worker */
1484 dlm_grab(dlm); /* get an extra ref for the work item */
1485 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1486 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1487 item->u.ml.lockres = res; /* already have a ref */
1488 item->u.ml.real_master = real_master;
1489 item->u.ml.extra_ref = extra_refs;
1490 spin_lock(&dlm->work_lock);
1491 list_add_tail(&item->list, &dlm->work_list);
1492 spin_unlock(&dlm->work_lock);
1493 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1494
1495 leave:
1496 /* One extra ref taken needs to be put here */
1497 if (extra_refs)
1498 dlm_lockres_put(res);
1499
1500 dlm_put(dlm);
1501 if (ret < 0) {
1502 kfree(buf);
1503 kfree(item);
1504 mlog_errno(ret);
1505 }
1506
1507 return ret;
1508 }
1509
1510
1511 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1512 {
1513 struct dlm_ctxt *dlm;
1514 struct dlm_migratable_lockres *mres;
1515 int ret = 0;
1516 struct dlm_lock_resource *res;
1517 u8 real_master;
1518 u8 extra_ref;
1519
1520 dlm = item->dlm;
1521 mres = (struct dlm_migratable_lockres *)data;
1522
1523 res = item->u.ml.lockres;
1524 real_master = item->u.ml.real_master;
1525 extra_ref = item->u.ml.extra_ref;
1526
1527 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1528 /* this case is super-rare. only occurs if
1529 * node death happens during migration. */
1530 again:
1531 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1532 if (ret < 0) {
1533 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1534 ret);
1535 goto again;
1536 }
1537 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1538 mlog(0, "lockres %.*s not claimed. "
1539 "this node will take it.\n",
1540 res->lockname.len, res->lockname.name);
1541 } else {
1542 spin_lock(&res->spinlock);
1543 dlm_lockres_drop_inflight_ref(dlm, res);
1544 spin_unlock(&res->spinlock);
1545 mlog(0, "master needs to respond to sender "
1546 "that node %u still owns %.*s\n",
1547 real_master, res->lockname.len,
1548 res->lockname.name);
1549 /* cannot touch this lockres */
1550 goto leave;
1551 }
1552 }
1553
1554 ret = dlm_process_recovery_data(dlm, res, mres);
1555 if (ret < 0)
1556 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1557 else
1558 mlog(0, "dlm_process_recovery_data succeeded\n");
1559
1560 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1561 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1562 ret = dlm_finish_migration(dlm, res, mres->master);
1563 if (ret < 0)
1564 mlog_errno(ret);
1565 }
1566
1567 leave:
1568 /* See comment in dlm_mig_lockres_handler() */
1569 if (res) {
1570 if (extra_ref)
1571 dlm_lockres_put(res);
1572 dlm_lockres_put(res);
1573 }
1574 kfree(data);
1575 }
1576
1577
1578
1579 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1580 struct dlm_lock_resource *res,
1581 u8 *real_master)
1582 {
1583 struct dlm_node_iter iter;
1584 int nodenum;
1585 int ret = 0;
1586
1587 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1588
1589 /* we only reach here if one of the two nodes in a
1590 * migration died while the migration was in progress.
1591 * at this point we need to requery the master. we
1592 * know that the new_master got as far as creating
1593 * an mle on at least one node, but we do not know
1594 * if any nodes had actually cleared the mle and set
1595 * the master to the new_master. the old master
1596 * is supposed to set the owner to UNKNOWN in the
1597 * event of a new_master death, so the only possible
1598 * responses that we can get from nodes here are
1599 * that the master is new_master, or that the master
1600 * is UNKNOWN.
1601 * if all nodes come back with UNKNOWN then we know
1602 * the lock needs remastering here.
1603 * if any node comes back with a valid master, check
1604 * to see if that master is the one that we are
1605 * recovering. if so, then the new_master died and
1606 * we need to remaster this lock. if not, then the
1607 * new_master survived and that node will respond to
1608 * other nodes about the owner.
1609 * if there is an owner, this node needs to dump this
1610 * lockres and alert the sender that this lockres
1611 * was rejected. */
1612 spin_lock(&dlm->spinlock);
1613 dlm_node_iter_init(dlm->domain_map, &iter);
1614 spin_unlock(&dlm->spinlock);
1615
1616 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1617 /* do not send to self */
1618 if (nodenum == dlm->node_num)
1619 continue;
1620 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1621 if (ret < 0) {
1622 mlog_errno(ret);
1623 if (!dlm_is_host_down(ret))
1624 BUG();
1625 /* host is down, so answer for that node would be
1626 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1627 }
1628 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1629 mlog(0, "lock master is %u\n", *real_master);
1630 break;
1631 }
1632 }
1633 return ret;
1634 }
1635
1636
1637 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1638 u8 nodenum, u8 *real_master)
1639 {
1640 int ret = -EINVAL;
1641 struct dlm_master_requery req;
1642 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1643
1644 memset(&req, 0, sizeof(req));
1645 req.node_idx = dlm->node_num;
1646 req.namelen = res->lockname.len;
1647 memcpy(req.name, res->lockname.name, res->lockname.len);
1648
1649 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1650 &req, sizeof(req), nodenum, &status);
1651 /* XXX: negative status not handled properly here. */
1652 if (ret < 0)
1653 mlog(ML_ERROR, "Error %d when sending message %u (key "
1654 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1655 dlm->key, nodenum);
1656 else {
1657 BUG_ON(status < 0);
1658 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1659 *real_master = (u8) (status & 0xff);
1660 mlog(0, "node %u responded to master requery with %u\n",
1661 nodenum, *real_master);
1662 ret = 0;
1663 }
1664 return ret;
1665 }
1666
1667
1668 /* this function cannot error, so unless the sending
1669 * or receiving of the message failed, the owner can
1670 * be trusted */
1671 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1672 void **ret_data)
1673 {
1674 struct dlm_ctxt *dlm = data;
1675 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1676 struct dlm_lock_resource *res = NULL;
1677 unsigned int hash;
1678 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1679 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1680
1681 if (!dlm_grab(dlm)) {
1682 /* since the domain has gone away on this
1683 * node, the proper response is UNKNOWN */
1684 return master;
1685 }
1686
1687 hash = dlm_lockid_hash(req->name, req->namelen);
1688
1689 spin_lock(&dlm->spinlock);
1690 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1691 if (res) {
1692 spin_lock(&res->spinlock);
1693 master = res->owner;
1694 if (master == dlm->node_num) {
1695 int ret = dlm_dispatch_assert_master(dlm, res,
1696 0, 0, flags);
1697 if (ret < 0) {
1698 mlog_errno(-ENOMEM);
1699 /* retry!? */
1700 BUG();
1701 }
1702 } else /* put.. incase we are not the master */
1703 dlm_lockres_put(res);
1704 spin_unlock(&res->spinlock);
1705 }
1706 spin_unlock(&dlm->spinlock);
1707
1708 dlm_put(dlm);
1709 return master;
1710 }
1711
1712 static inline struct list_head *
1713 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1714 {
1715 struct list_head *ret;
1716 BUG_ON(list_num < 0);
1717 BUG_ON(list_num > 2);
1718 ret = &(res->granted);
1719 ret += list_num;
1720 return ret;
1721 }
1722 /* TODO: do ast flush business
1723 * TODO: do MIGRATING and RECOVERING spinning
1724 */
1725
1726 /*
1727 * NOTE about in-flight requests during migration:
1728 *
1729 * Before attempting the migrate, the master has marked the lockres as
1730 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1731 * requests either got queued before the MIGRATING flag got set, in which
1732 * case the lock data will reflect the change and a return message is on
1733 * the way, or the request failed to get in before MIGRATING got set. In
1734 * this case, the caller will be told to spin and wait for the MIGRATING
1735 * flag to be dropped, then recheck the master.
1736 * This holds true for the convert, cancel and unlock cases, and since lvb
1737 * updates are tied to these same messages, it applies to lvb updates as
1738 * well. For the lock case, there is no way a lock can be on the master
1739 * queue and not be on the secondary queue since the lock is always added
1740 * locally first. This means that the new target node will never be sent
1741 * a lock that he doesn't already have on the list.
1742 * In total, this means that the local lock is correct and should not be
1743 * updated to match the one sent by the master. Any messages sent back
1744 * from the master before the MIGRATING flag will bring the lock properly
1745 * up-to-date, and the change will be ordered properly for the waiter.
1746 * We will *not* attempt to modify the lock underneath the waiter.
1747 */
1748
1749 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1750 struct dlm_lock_resource *res,
1751 struct dlm_migratable_lockres *mres)
1752 {
1753 struct dlm_migratable_lock *ml;
1754 struct list_head *queue;
1755 struct list_head *tmpq = NULL;
1756 struct dlm_lock *newlock = NULL;
1757 struct dlm_lockstatus *lksb = NULL;
1758 int ret = 0;
1759 int i, j, bad;
1760 struct dlm_lock *lock = NULL;
1761 u8 from = O2NM_MAX_NODES;
1762 unsigned int added = 0;
1763 __be64 c;
1764
1765 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1766 for (i=0; i<mres->num_locks; i++) {
1767 ml = &(mres->ml[i]);
1768
1769 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1770 /* placeholder, just need to set the refmap bit */
1771 BUG_ON(mres->num_locks != 1);
1772 mlog(0, "%s:%.*s: dummy lock for %u\n",
1773 dlm->name, mres->lockname_len, mres->lockname,
1774 from);
1775 spin_lock(&res->spinlock);
1776 dlm_lockres_set_refmap_bit(dlm, res, from);
1777 spin_unlock(&res->spinlock);
1778 added++;
1779 break;
1780 }
1781 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1782 newlock = NULL;
1783 lksb = NULL;
1784
1785 queue = dlm_list_num_to_pointer(res, ml->list);
1786 tmpq = NULL;
1787
1788 /* if the lock is for the local node it needs to
1789 * be moved to the proper location within the queue.
1790 * do not allocate a new lock structure. */
1791 if (ml->node == dlm->node_num) {
1792 /* MIGRATION ONLY! */
1793 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1794
1795 spin_lock(&res->spinlock);
1796 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1797 tmpq = dlm_list_idx_to_ptr(res, j);
1798 list_for_each_entry(lock, tmpq, list) {
1799 if (lock->ml.cookie != ml->cookie)
1800 lock = NULL;
1801 else
1802 break;
1803 }
1804 if (lock)
1805 break;
1806 }
1807
1808 /* lock is always created locally first, and
1809 * destroyed locally last. it must be on the list */
1810 if (!lock) {
1811 c = ml->cookie;
1812 mlog(ML_ERROR, "Could not find local lock "
1813 "with cookie %u:%llu, node %u, "
1814 "list %u, flags 0x%x, type %d, "
1815 "conv %d, highest blocked %d\n",
1816 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1817 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1818 ml->node, ml->list, ml->flags, ml->type,
1819 ml->convert_type, ml->highest_blocked);
1820 __dlm_print_one_lock_resource(res);
1821 BUG();
1822 }
1823
1824 if (lock->ml.node != ml->node) {
1825 c = lock->ml.cookie;
1826 mlog(ML_ERROR, "Mismatched node# in lock "
1827 "cookie %u:%llu, name %.*s, node %u\n",
1828 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1829 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1830 res->lockname.len, res->lockname.name,
1831 lock->ml.node);
1832 c = ml->cookie;
1833 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1834 "node %u, list %u, flags 0x%x, type %d, "
1835 "conv %d, highest blocked %d\n",
1836 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1837 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1838 ml->node, ml->list, ml->flags, ml->type,
1839 ml->convert_type, ml->highest_blocked);
1840 __dlm_print_one_lock_resource(res);
1841 BUG();
1842 }
1843
1844 if (tmpq != queue) {
1845 c = ml->cookie;
1846 mlog(0, "Lock cookie %u:%llu was on list %u "
1847 "instead of list %u for %.*s\n",
1848 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1849 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1850 j, ml->list, res->lockname.len,
1851 res->lockname.name);
1852 __dlm_print_one_lock_resource(res);
1853 spin_unlock(&res->spinlock);
1854 continue;
1855 }
1856
1857 /* see NOTE above about why we do not update
1858 * to match the master here */
1859
1860 /* move the lock to its proper place */
1861 /* do not alter lock refcount. switching lists. */
1862 list_move_tail(&lock->list, queue);
1863 spin_unlock(&res->spinlock);
1864 added++;
1865
1866 mlog(0, "just reordered a local lock!\n");
1867 continue;
1868 }
1869
1870 /* lock is for another node. */
1871 newlock = dlm_new_lock(ml->type, ml->node,
1872 be64_to_cpu(ml->cookie), NULL);
1873 if (!newlock) {
1874 ret = -ENOMEM;
1875 goto leave;
1876 }
1877 lksb = newlock->lksb;
1878 dlm_lock_attach_lockres(newlock, res);
1879
1880 if (ml->convert_type != LKM_IVMODE) {
1881 BUG_ON(queue != &res->converting);
1882 newlock->ml.convert_type = ml->convert_type;
1883 }
1884 lksb->flags |= (ml->flags &
1885 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1886
1887 if (ml->type == LKM_NLMODE)
1888 goto skip_lvb;
1889
1890 if (!dlm_lvb_is_empty(mres->lvb)) {
1891 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1892 /* other node was trying to update
1893 * lvb when node died. recreate the
1894 * lksb with the updated lvb. */
1895 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1896 /* the lock resource lvb update must happen
1897 * NOW, before the spinlock is dropped.
1898 * we no longer wait for the AST to update
1899 * the lvb. */
1900 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1901 } else {
1902 /* otherwise, the node is sending its
1903 * most recent valid lvb info */
1904 BUG_ON(ml->type != LKM_EXMODE &&
1905 ml->type != LKM_PRMODE);
1906 if (!dlm_lvb_is_empty(res->lvb) &&
1907 (ml->type == LKM_EXMODE ||
1908 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1909 int i;
1910 mlog(ML_ERROR, "%s:%.*s: received bad "
1911 "lvb! type=%d\n", dlm->name,
1912 res->lockname.len,
1913 res->lockname.name, ml->type);
1914 printk("lockres lvb=[");
1915 for (i=0; i<DLM_LVB_LEN; i++)
1916 printk("%02x", res->lvb[i]);
1917 printk("]\nmigrated lvb=[");
1918 for (i=0; i<DLM_LVB_LEN; i++)
1919 printk("%02x", mres->lvb[i]);
1920 printk("]\n");
1921 dlm_print_one_lock_resource(res);
1922 BUG();
1923 }
1924 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1925 }
1926 }
1927 skip_lvb:
1928
1929 /* NOTE:
1930 * wrt lock queue ordering and recovery:
1931 * 1. order of locks on granted queue is
1932 * meaningless.
1933 * 2. order of locks on converting queue is
1934 * LOST with the node death. sorry charlie.
1935 * 3. order of locks on the blocked queue is
1936 * also LOST.
1937 * order of locks does not affect integrity, it
1938 * just means that a lock request may get pushed
1939 * back in line as a result of the node death.
1940 * also note that for a given node the lock order
1941 * for its secondary queue locks is preserved
1942 * relative to each other, but clearly *not*
1943 * preserved relative to locks from other nodes.
1944 */
1945 bad = 0;
1946 spin_lock(&res->spinlock);
1947 list_for_each_entry(lock, queue, list) {
1948 if (lock->ml.cookie == ml->cookie) {
1949 c = lock->ml.cookie;
1950 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1951 "exists on this lockres!\n", dlm->name,
1952 res->lockname.len, res->lockname.name,
1953 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1954 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1955
1956 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1957 "node=%u, cookie=%u:%llu, queue=%d\n",
1958 ml->type, ml->convert_type, ml->node,
1959 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1960 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1961 ml->list);
1962
1963 __dlm_print_one_lock_resource(res);
1964 bad = 1;
1965 break;
1966 }
1967 }
1968 if (!bad) {
1969 dlm_lock_get(newlock);
1970 list_add_tail(&newlock->list, queue);
1971 mlog(0, "%s:%.*s: added lock for node %u, "
1972 "setting refmap bit\n", dlm->name,
1973 res->lockname.len, res->lockname.name, ml->node);
1974 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
1975 added++;
1976 }
1977 spin_unlock(&res->spinlock);
1978 }
1979 mlog(0, "done running all the locks\n");
1980
1981 leave:
1982 /* balance the ref taken when the work was queued */
1983 spin_lock(&res->spinlock);
1984 dlm_lockres_drop_inflight_ref(dlm, res);
1985 spin_unlock(&res->spinlock);
1986
1987 if (ret < 0) {
1988 mlog_errno(ret);
1989 if (newlock)
1990 dlm_lock_put(newlock);
1991 }
1992
1993 return ret;
1994 }
1995
1996 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1997 struct dlm_lock_resource *res)
1998 {
1999 int i;
2000 struct list_head *queue;
2001 struct dlm_lock *lock, *next;
2002
2003 assert_spin_locked(&dlm->spinlock);
2004 assert_spin_locked(&res->spinlock);
2005 res->state |= DLM_LOCK_RES_RECOVERING;
2006 if (!list_empty(&res->recovering)) {
2007 mlog(0,
2008 "Recovering res %s:%.*s, is already on recovery list!\n",
2009 dlm->name, res->lockname.len, res->lockname.name);
2010 list_del_init(&res->recovering);
2011 dlm_lockres_put(res);
2012 }
2013 /* We need to hold a reference while on the recovery list */
2014 dlm_lockres_get(res);
2015 list_add_tail(&res->recovering, &dlm->reco.resources);
2016
2017 /* find any pending locks and put them back on proper list */
2018 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2019 queue = dlm_list_idx_to_ptr(res, i);
2020 list_for_each_entry_safe(lock, next, queue, list) {
2021 dlm_lock_get(lock);
2022 if (lock->convert_pending) {
2023 /* move converting lock back to granted */
2024 BUG_ON(i != DLM_CONVERTING_LIST);
2025 mlog(0, "node died with convert pending "
2026 "on %.*s. move back to granted list.\n",
2027 res->lockname.len, res->lockname.name);
2028 dlm_revert_pending_convert(res, lock);
2029 lock->convert_pending = 0;
2030 } else if (lock->lock_pending) {
2031 /* remove pending lock requests completely */
2032 BUG_ON(i != DLM_BLOCKED_LIST);
2033 mlog(0, "node died with lock pending "
2034 "on %.*s. remove from blocked list and skip.\n",
2035 res->lockname.len, res->lockname.name);
2036 /* lock will be floating until ref in
2037 * dlmlock_remote is freed after the network
2038 * call returns. ok for it to not be on any
2039 * list since no ast can be called
2040 * (the master is dead). */
2041 dlm_revert_pending_lock(res, lock);
2042 lock->lock_pending = 0;
2043 } else if (lock->unlock_pending) {
2044 /* if an unlock was in progress, treat as
2045 * if this had completed successfully
2046 * before sending this lock state to the
2047 * new master. note that the dlm_unlock
2048 * call is still responsible for calling
2049 * the unlockast. that will happen after
2050 * the network call times out. for now,
2051 * just move lists to prepare the new
2052 * recovery master. */
2053 BUG_ON(i != DLM_GRANTED_LIST);
2054 mlog(0, "node died with unlock pending "
2055 "on %.*s. remove from blocked list and skip.\n",
2056 res->lockname.len, res->lockname.name);
2057 dlm_commit_pending_unlock(res, lock);
2058 lock->unlock_pending = 0;
2059 } else if (lock->cancel_pending) {
2060 /* if a cancel was in progress, treat as
2061 * if this had completed successfully
2062 * before sending this lock state to the
2063 * new master */
2064 BUG_ON(i != DLM_CONVERTING_LIST);
2065 mlog(0, "node died with cancel pending "
2066 "on %.*s. move back to granted list.\n",
2067 res->lockname.len, res->lockname.name);
2068 dlm_commit_pending_cancel(res, lock);
2069 lock->cancel_pending = 0;
2070 }
2071 dlm_lock_put(lock);
2072 }
2073 }
2074 }
2075
2076
2077
2078 /* removes all recovered locks from the recovery list.
2079 * sets the res->owner to the new master.
2080 * unsets the RECOVERY flag and wakes waiters. */
2081 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2082 u8 dead_node, u8 new_master)
2083 {
2084 int i;
2085 struct hlist_head *bucket;
2086 struct dlm_lock_resource *res, *next;
2087
2088 assert_spin_locked(&dlm->spinlock);
2089
2090 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2091 if (res->owner == dead_node) {
2092 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2093 dlm->name, res->lockname.len, res->lockname.name,
2094 res->owner, new_master);
2095 list_del_init(&res->recovering);
2096 spin_lock(&res->spinlock);
2097 /* new_master has our reference from
2098 * the lock state sent during recovery */
2099 dlm_change_lockres_owner(dlm, res, new_master);
2100 res->state &= ~DLM_LOCK_RES_RECOVERING;
2101 if (__dlm_lockres_has_locks(res))
2102 __dlm_dirty_lockres(dlm, res);
2103 spin_unlock(&res->spinlock);
2104 wake_up(&res->wq);
2105 dlm_lockres_put(res);
2106 }
2107 }
2108
2109 /* this will become unnecessary eventually, but
2110 * for now we need to run the whole hash, clear
2111 * the RECOVERING state and set the owner
2112 * if necessary */
2113 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2114 bucket = dlm_lockres_hash(dlm, i);
2115 hlist_for_each_entry(res, bucket, hash_node) {
2116 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2117 continue;
2118
2119 if (res->owner != dead_node &&
2120 res->owner != dlm->node_num)
2121 continue;
2122
2123 if (!list_empty(&res->recovering)) {
2124 list_del_init(&res->recovering);
2125 dlm_lockres_put(res);
2126 }
2127
2128 /* new_master has our reference from
2129 * the lock state sent during recovery */
2130 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2131 dlm->name, res->lockname.len, res->lockname.name,
2132 res->owner, new_master);
2133 spin_lock(&res->spinlock);
2134 dlm_change_lockres_owner(dlm, res, new_master);
2135 res->state &= ~DLM_LOCK_RES_RECOVERING;
2136 if (__dlm_lockres_has_locks(res))
2137 __dlm_dirty_lockres(dlm, res);
2138 spin_unlock(&res->spinlock);
2139 wake_up(&res->wq);
2140 }
2141 }
2142 }
2143
2144 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2145 {
2146 if (local) {
2147 if (lock->ml.type != LKM_EXMODE &&
2148 lock->ml.type != LKM_PRMODE)
2149 return 1;
2150 } else if (lock->ml.type == LKM_EXMODE)
2151 return 1;
2152 return 0;
2153 }
2154
2155 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2156 struct dlm_lock_resource *res, u8 dead_node)
2157 {
2158 struct list_head *queue;
2159 struct dlm_lock *lock;
2160 int blank_lvb = 0, local = 0;
2161 int i;
2162 u8 search_node;
2163
2164 assert_spin_locked(&dlm->spinlock);
2165 assert_spin_locked(&res->spinlock);
2166
2167 if (res->owner == dlm->node_num)
2168 /* if this node owned the lockres, and if the dead node
2169 * had an EX when he died, blank out the lvb */
2170 search_node = dead_node;
2171 else {
2172 /* if this is a secondary lockres, and we had no EX or PR
2173 * locks granted, we can no longer trust the lvb */
2174 search_node = dlm->node_num;
2175 local = 1; /* check local state for valid lvb */
2176 }
2177
2178 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2179 queue = dlm_list_idx_to_ptr(res, i);
2180 list_for_each_entry(lock, queue, list) {
2181 if (lock->ml.node == search_node) {
2182 if (dlm_lvb_needs_invalidation(lock, local)) {
2183 /* zero the lksb lvb and lockres lvb */
2184 blank_lvb = 1;
2185 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2186 }
2187 }
2188 }
2189 }
2190
2191 if (blank_lvb) {
2192 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2193 res->lockname.len, res->lockname.name, dead_node);
2194 memset(res->lvb, 0, DLM_LVB_LEN);
2195 }
2196 }
2197
2198 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2199 struct dlm_lock_resource *res, u8 dead_node)
2200 {
2201 struct dlm_lock *lock, *next;
2202 unsigned int freed = 0;
2203
2204 /* this node is the lockres master:
2205 * 1) remove any stale locks for the dead node
2206 * 2) if the dead node had an EX when he died, blank out the lvb
2207 */
2208 assert_spin_locked(&dlm->spinlock);
2209 assert_spin_locked(&res->spinlock);
2210
2211 /* We do two dlm_lock_put(). One for removing from list and the other is
2212 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2213
2214 /* TODO: check pending_asts, pending_basts here */
2215 list_for_each_entry_safe(lock, next, &res->granted, list) {
2216 if (lock->ml.node == dead_node) {
2217 list_del_init(&lock->list);
2218 dlm_lock_put(lock);
2219 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2220 dlm_lock_put(lock);
2221 freed++;
2222 }
2223 }
2224 list_for_each_entry_safe(lock, next, &res->converting, list) {
2225 if (lock->ml.node == dead_node) {
2226 list_del_init(&lock->list);
2227 dlm_lock_put(lock);
2228 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2229 dlm_lock_put(lock);
2230 freed++;
2231 }
2232 }
2233 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2234 if (lock->ml.node == dead_node) {
2235 list_del_init(&lock->list);
2236 dlm_lock_put(lock);
2237 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2238 dlm_lock_put(lock);
2239 freed++;
2240 }
2241 }
2242
2243 if (freed) {
2244 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2245 "dropping ref from lockres\n", dlm->name,
2246 res->lockname.len, res->lockname.name, freed, dead_node);
2247 if(!test_bit(dead_node, res->refmap)) {
2248 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2249 "but ref was not set\n", dlm->name,
2250 res->lockname.len, res->lockname.name, freed, dead_node);
2251 __dlm_print_one_lock_resource(res);
2252 }
2253 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2254 } else if (test_bit(dead_node, res->refmap)) {
2255 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2256 "no locks and had not purged before dying\n", dlm->name,
2257 res->lockname.len, res->lockname.name, dead_node);
2258 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2259 }
2260
2261 /* do not kick thread yet */
2262 __dlm_dirty_lockres(dlm, res);
2263 }
2264
2265 /* if this node is the recovery master, and there are no
2266 * locks for a given lockres owned by this node that are in
2267 * either PR or EX mode, zero out the lvb before requesting.
2268 *
2269 */
2270
2271
2272 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2273 {
2274 struct dlm_lock_resource *res;
2275 int i;
2276 struct hlist_head *bucket;
2277 struct dlm_lock *lock;
2278
2279
2280 /* purge any stale mles */
2281 dlm_clean_master_list(dlm, dead_node);
2282
2283 /*
2284 * now clean up all lock resources. there are two rules:
2285 *
2286 * 1) if the dead node was the master, move the lockres
2287 * to the recovering list. set the RECOVERING flag.
2288 * this lockres needs to be cleaned up before it can
2289 * be used further.
2290 *
2291 * 2) if this node was the master, remove all locks from
2292 * each of the lockres queues that were owned by the
2293 * dead node. once recovery finishes, the dlm thread
2294 * can be kicked again to see if any ASTs or BASTs
2295 * need to be fired as a result.
2296 */
2297 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2298 bucket = dlm_lockres_hash(dlm, i);
2299 hlist_for_each_entry(res, bucket, hash_node) {
2300 /* always prune any $RECOVERY entries for dead nodes,
2301 * otherwise hangs can occur during later recovery */
2302 if (dlm_is_recovery_lock(res->lockname.name,
2303 res->lockname.len)) {
2304 spin_lock(&res->spinlock);
2305 list_for_each_entry(lock, &res->granted, list) {
2306 if (lock->ml.node == dead_node) {
2307 mlog(0, "AHA! there was "
2308 "a $RECOVERY lock for dead "
2309 "node %u (%s)!\n",
2310 dead_node, dlm->name);
2311 list_del_init(&lock->list);
2312 dlm_lock_put(lock);
2313 break;
2314 }
2315 }
2316 spin_unlock(&res->spinlock);
2317 continue;
2318 }
2319 spin_lock(&res->spinlock);
2320 /* zero the lvb if necessary */
2321 dlm_revalidate_lvb(dlm, res, dead_node);
2322 if (res->owner == dead_node) {
2323 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2324 mlog(ML_NOTICE, "%s: res %.*s, Skip "
2325 "recovery as it is being freed\n",
2326 dlm->name, res->lockname.len,
2327 res->lockname.name);
2328 } else
2329 dlm_move_lockres_to_recovery_list(dlm,
2330 res);
2331
2332 } else if (res->owner == dlm->node_num) {
2333 dlm_free_dead_locks(dlm, res, dead_node);
2334 __dlm_lockres_calc_usage(dlm, res);
2335 }
2336 spin_unlock(&res->spinlock);
2337 }
2338 }
2339
2340 }
2341
2342 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2343 {
2344 assert_spin_locked(&dlm->spinlock);
2345
2346 if (dlm->reco.new_master == idx) {
2347 mlog(0, "%s: recovery master %d just died\n",
2348 dlm->name, idx);
2349 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2350 /* finalize1 was reached, so it is safe to clear
2351 * the new_master and dead_node. that recovery
2352 * is complete. */
2353 mlog(0, "%s: dead master %d had reached "
2354 "finalize1 state, clearing\n", dlm->name, idx);
2355 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2356 __dlm_reset_recovery(dlm);
2357 }
2358 }
2359
2360 /* Clean up join state on node death. */
2361 if (dlm->joining_node == idx) {
2362 mlog(0, "Clearing join state for node %u\n", idx);
2363 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2364 }
2365
2366 /* check to see if the node is already considered dead */
2367 if (!test_bit(idx, dlm->live_nodes_map)) {
2368 mlog(0, "for domain %s, node %d is already dead. "
2369 "another node likely did recovery already.\n",
2370 dlm->name, idx);
2371 return;
2372 }
2373
2374 /* check to see if we do not care about this node */
2375 if (!test_bit(idx, dlm->domain_map)) {
2376 /* This also catches the case that we get a node down
2377 * but haven't joined the domain yet. */
2378 mlog(0, "node %u already removed from domain!\n", idx);
2379 return;
2380 }
2381
2382 clear_bit(idx, dlm->live_nodes_map);
2383
2384 /* make sure local cleanup occurs before the heartbeat events */
2385 if (!test_bit(idx, dlm->recovery_map))
2386 dlm_do_local_recovery_cleanup(dlm, idx);
2387
2388 /* notify anything attached to the heartbeat events */
2389 dlm_hb_event_notify_attached(dlm, idx, 0);
2390
2391 mlog(0, "node %u being removed from domain map!\n", idx);
2392 clear_bit(idx, dlm->domain_map);
2393 clear_bit(idx, dlm->exit_domain_map);
2394 /* wake up migration waiters if a node goes down.
2395 * perhaps later we can genericize this for other waiters. */
2396 wake_up(&dlm->migration_wq);
2397
2398 if (test_bit(idx, dlm->recovery_map))
2399 mlog(0, "domain %s, node %u already added "
2400 "to recovery map!\n", dlm->name, idx);
2401 else
2402 set_bit(idx, dlm->recovery_map);
2403 }
2404
2405 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2406 {
2407 struct dlm_ctxt *dlm = data;
2408
2409 if (!dlm_grab(dlm))
2410 return;
2411
2412 /*
2413 * This will notify any dlm users that a node in our domain
2414 * went away without notifying us first.
2415 */
2416 if (test_bit(idx, dlm->domain_map))
2417 dlm_fire_domain_eviction_callbacks(dlm, idx);
2418
2419 spin_lock(&dlm->spinlock);
2420 __dlm_hb_node_down(dlm, idx);
2421 spin_unlock(&dlm->spinlock);
2422
2423 dlm_put(dlm);
2424 }
2425
2426 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2427 {
2428 struct dlm_ctxt *dlm = data;
2429
2430 if (!dlm_grab(dlm))
2431 return;
2432
2433 spin_lock(&dlm->spinlock);
2434 set_bit(idx, dlm->live_nodes_map);
2435 /* do NOT notify mle attached to the heartbeat events.
2436 * new nodes are not interesting in mastery until joined. */
2437 spin_unlock(&dlm->spinlock);
2438
2439 dlm_put(dlm);
2440 }
2441
2442 static void dlm_reco_ast(void *astdata)
2443 {
2444 struct dlm_ctxt *dlm = astdata;
2445 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2446 dlm->node_num, dlm->name);
2447 }
2448 static void dlm_reco_bast(void *astdata, int blocked_type)
2449 {
2450 struct dlm_ctxt *dlm = astdata;
2451 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2452 dlm->node_num, dlm->name);
2453 }
2454 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2455 {
2456 mlog(0, "unlockast for recovery lock fired!\n");
2457 }
2458
2459 /*
2460 * dlm_pick_recovery_master will continually attempt to use
2461 * dlmlock() on the special "$RECOVERY" lockres with the
2462 * LKM_NOQUEUE flag to get an EX. every thread that enters
2463 * this function on each node racing to become the recovery
2464 * master will not stop attempting this until either:
2465 * a) this node gets the EX (and becomes the recovery master),
2466 * or b) dlm->reco.new_master gets set to some nodenum
2467 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2468 * so each time a recovery master is needed, the entire cluster
2469 * will sync at this point. if the new master dies, that will
2470 * be detected in dlm_do_recovery */
2471 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2472 {
2473 enum dlm_status ret;
2474 struct dlm_lockstatus lksb;
2475 int status = -EINVAL;
2476
2477 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2478 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2479 again:
2480 memset(&lksb, 0, sizeof(lksb));
2481
2482 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2483 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2484 dlm_reco_ast, dlm, dlm_reco_bast);
2485
2486 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2487 dlm->name, ret, lksb.status);
2488
2489 if (ret == DLM_NORMAL) {
2490 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2491 dlm->name, dlm->node_num);
2492
2493 /* got the EX lock. check to see if another node
2494 * just became the reco master */
2495 if (dlm_reco_master_ready(dlm)) {
2496 mlog(0, "%s: got reco EX lock, but %u will "
2497 "do the recovery\n", dlm->name,
2498 dlm->reco.new_master);
2499 status = -EEXIST;
2500 } else {
2501 status = 0;
2502
2503 /* see if recovery was already finished elsewhere */
2504 spin_lock(&dlm->spinlock);
2505 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2506 status = -EINVAL;
2507 mlog(0, "%s: got reco EX lock, but "
2508 "node got recovered already\n", dlm->name);
2509 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2510 mlog(ML_ERROR, "%s: new master is %u "
2511 "but no dead node!\n",
2512 dlm->name, dlm->reco.new_master);
2513 BUG();
2514 }
2515 }
2516 spin_unlock(&dlm->spinlock);
2517 }
2518
2519 /* if this node has actually become the recovery master,
2520 * set the master and send the messages to begin recovery */
2521 if (!status) {
2522 mlog(0, "%s: dead=%u, this=%u, sending "
2523 "begin_reco now\n", dlm->name,
2524 dlm->reco.dead_node, dlm->node_num);
2525 status = dlm_send_begin_reco_message(dlm,
2526 dlm->reco.dead_node);
2527 /* this always succeeds */
2528 BUG_ON(status);
2529
2530 /* set the new_master to this node */
2531 spin_lock(&dlm->spinlock);
2532 dlm_set_reco_master(dlm, dlm->node_num);
2533 spin_unlock(&dlm->spinlock);
2534 }
2535
2536 /* recovery lock is a special case. ast will not get fired,
2537 * so just go ahead and unlock it. */
2538 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2539 if (ret == DLM_DENIED) {
2540 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2541 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2542 }
2543 if (ret != DLM_NORMAL) {
2544 /* this would really suck. this could only happen
2545 * if there was a network error during the unlock
2546 * because of node death. this means the unlock
2547 * is actually "done" and the lock structure is
2548 * even freed. we can continue, but only
2549 * because this specific lock name is special. */
2550 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2551 }
2552 } else if (ret == DLM_NOTQUEUED) {
2553 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2554 dlm->name, dlm->node_num);
2555 /* another node is master. wait on
2556 * reco.new_master != O2NM_INVALID_NODE_NUM
2557 * for at most one second */
2558 wait_event_timeout(dlm->dlm_reco_thread_wq,
2559 dlm_reco_master_ready(dlm),
2560 msecs_to_jiffies(1000));
2561 if (!dlm_reco_master_ready(dlm)) {
2562 mlog(0, "%s: reco master taking awhile\n",
2563 dlm->name);
2564 goto again;
2565 }
2566 /* another node has informed this one that it is reco master */
2567 mlog(0, "%s: reco master %u is ready to recover %u\n",
2568 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2569 status = -EEXIST;
2570 } else if (ret == DLM_RECOVERING) {
2571 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2572 dlm->name, dlm->node_num);
2573 goto again;
2574 } else {
2575 struct dlm_lock_resource *res;
2576
2577 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2578 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2579 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2580 dlm_errname(lksb.status));
2581 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2582 DLM_RECOVERY_LOCK_NAME_LEN);
2583 if (res) {
2584 dlm_print_one_lock_resource(res);
2585 dlm_lockres_put(res);
2586 } else {
2587 mlog(ML_ERROR, "recovery lock not found\n");
2588 }
2589 BUG();
2590 }
2591
2592 return status;
2593 }
2594
2595 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2596 {
2597 struct dlm_begin_reco br;
2598 int ret = 0;
2599 struct dlm_node_iter iter;
2600 int nodenum;
2601 int status;
2602
2603 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2604
2605 spin_lock(&dlm->spinlock);
2606 dlm_node_iter_init(dlm->domain_map, &iter);
2607 spin_unlock(&dlm->spinlock);
2608
2609 clear_bit(dead_node, iter.node_map);
2610
2611 memset(&br, 0, sizeof(br));
2612 br.node_idx = dlm->node_num;
2613 br.dead_node = dead_node;
2614
2615 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2616 ret = 0;
2617 if (nodenum == dead_node) {
2618 mlog(0, "not sending begin reco to dead node "
2619 "%u\n", dead_node);
2620 continue;
2621 }
2622 if (nodenum == dlm->node_num) {
2623 mlog(0, "not sending begin reco to self\n");
2624 continue;
2625 }
2626 retry:
2627 ret = -EINVAL;
2628 mlog(0, "attempting to send begin reco msg to %d\n",
2629 nodenum);
2630 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2631 &br, sizeof(br), nodenum, &status);
2632 /* negative status is handled ok by caller here */
2633 if (ret >= 0)
2634 ret = status;
2635 if (dlm_is_host_down(ret)) {
2636 /* node is down. not involved in recovery
2637 * so just keep going */
2638 mlog(ML_NOTICE, "%s: node %u was down when sending "
2639 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2640 ret = 0;
2641 }
2642
2643 /*
2644 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2645 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2646 * We are handling both for compatibility reasons.
2647 */
2648 if (ret == -EAGAIN || ret == EAGAIN) {
2649 mlog(0, "%s: trying to start recovery of node "
2650 "%u, but node %u is waiting for last recovery "
2651 "to complete, backoff for a bit\n", dlm->name,
2652 dead_node, nodenum);
2653 msleep(100);
2654 goto retry;
2655 }
2656 if (ret < 0) {
2657 struct dlm_lock_resource *res;
2658
2659 /* this is now a serious problem, possibly ENOMEM
2660 * in the network stack. must retry */
2661 mlog_errno(ret);
2662 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2663 "returned %d\n", dlm->name, nodenum, ret);
2664 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2665 DLM_RECOVERY_LOCK_NAME_LEN);
2666 if (res) {
2667 dlm_print_one_lock_resource(res);
2668 dlm_lockres_put(res);
2669 } else {
2670 mlog(ML_ERROR, "recovery lock not found\n");
2671 }
2672 /* sleep for a bit in hopes that we can avoid
2673 * another ENOMEM */
2674 msleep(100);
2675 goto retry;
2676 }
2677 }
2678
2679 return ret;
2680 }
2681
2682 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2683 void **ret_data)
2684 {
2685 struct dlm_ctxt *dlm = data;
2686 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2687
2688 /* ok to return 0, domain has gone away */
2689 if (!dlm_grab(dlm))
2690 return 0;
2691
2692 spin_lock(&dlm->spinlock);
2693 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2694 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2695 "but this node is in finalize state, waiting on finalize2\n",
2696 dlm->name, br->node_idx, br->dead_node,
2697 dlm->reco.dead_node, dlm->reco.new_master);
2698 spin_unlock(&dlm->spinlock);
2699 return -EAGAIN;
2700 }
2701 spin_unlock(&dlm->spinlock);
2702
2703 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2704 dlm->name, br->node_idx, br->dead_node,
2705 dlm->reco.dead_node, dlm->reco.new_master);
2706
2707 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2708
2709 spin_lock(&dlm->spinlock);
2710 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2711 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2712 mlog(0, "%s: new_master %u died, changing "
2713 "to %u\n", dlm->name, dlm->reco.new_master,
2714 br->node_idx);
2715 } else {
2716 mlog(0, "%s: new_master %u NOT DEAD, changing "
2717 "to %u\n", dlm->name, dlm->reco.new_master,
2718 br->node_idx);
2719 /* may not have seen the new master as dead yet */
2720 }
2721 }
2722 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2723 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2724 "node %u changing it to %u\n", dlm->name,
2725 dlm->reco.dead_node, br->node_idx, br->dead_node);
2726 }
2727 dlm_set_reco_master(dlm, br->node_idx);
2728 dlm_set_reco_dead_node(dlm, br->dead_node);
2729 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2730 mlog(0, "recovery master %u sees %u as dead, but this "
2731 "node has not yet. marking %u as dead\n",
2732 br->node_idx, br->dead_node, br->dead_node);
2733 if (!test_bit(br->dead_node, dlm->domain_map) ||
2734 !test_bit(br->dead_node, dlm->live_nodes_map))
2735 mlog(0, "%u not in domain/live_nodes map "
2736 "so setting it in reco map manually\n",
2737 br->dead_node);
2738 /* force the recovery cleanup in __dlm_hb_node_down
2739 * both of these will be cleared in a moment */
2740 set_bit(br->dead_node, dlm->domain_map);
2741 set_bit(br->dead_node, dlm->live_nodes_map);
2742 __dlm_hb_node_down(dlm, br->dead_node);
2743 }
2744 spin_unlock(&dlm->spinlock);
2745
2746 dlm_kick_recovery_thread(dlm);
2747
2748 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2749 dlm->name, br->node_idx, br->dead_node,
2750 dlm->reco.dead_node, dlm->reco.new_master);
2751
2752 dlm_put(dlm);
2753 return 0;
2754 }
2755
2756 #define DLM_FINALIZE_STAGE2 0x01
2757 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2758 {
2759 int ret = 0;
2760 struct dlm_finalize_reco fr;
2761 struct dlm_node_iter iter;
2762 int nodenum;
2763 int status;
2764 int stage = 1;
2765
2766 mlog(0, "finishing recovery for node %s:%u, "
2767 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2768
2769 spin_lock(&dlm->spinlock);
2770 dlm_node_iter_init(dlm->domain_map, &iter);
2771 spin_unlock(&dlm->spinlock);
2772
2773 stage2:
2774 memset(&fr, 0, sizeof(fr));
2775 fr.node_idx = dlm->node_num;
2776 fr.dead_node = dlm->reco.dead_node;
2777 if (stage == 2)
2778 fr.flags |= DLM_FINALIZE_STAGE2;
2779
2780 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2781 if (nodenum == dlm->node_num)
2782 continue;
2783 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2784 &fr, sizeof(fr), nodenum, &status);
2785 if (ret >= 0)
2786 ret = status;
2787 if (ret < 0) {
2788 mlog(ML_ERROR, "Error %d when sending message %u (key "
2789 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2790 dlm->key, nodenum);
2791 if (dlm_is_host_down(ret)) {
2792 /* this has no effect on this recovery
2793 * session, so set the status to zero to
2794 * finish out the last recovery */
2795 mlog(ML_ERROR, "node %u went down after this "
2796 "node finished recovery.\n", nodenum);
2797 ret = 0;
2798 continue;
2799 }
2800 break;
2801 }
2802 }
2803 if (stage == 1) {
2804 /* reset the node_iter back to the top and send finalize2 */
2805 iter.curnode = -1;
2806 stage = 2;
2807 goto stage2;
2808 }
2809
2810 return ret;
2811 }
2812
2813 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2814 void **ret_data)
2815 {
2816 struct dlm_ctxt *dlm = data;
2817 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2818 int stage = 1;
2819
2820 /* ok to return 0, domain has gone away */
2821 if (!dlm_grab(dlm))
2822 return 0;
2823
2824 if (fr->flags & DLM_FINALIZE_STAGE2)
2825 stage = 2;
2826
2827 mlog(0, "%s: node %u finalizing recovery stage%d of "
2828 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2829 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2830
2831 spin_lock(&dlm->spinlock);
2832
2833 if (dlm->reco.new_master != fr->node_idx) {
2834 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2835 "%u is supposed to be the new master, dead=%u\n",
2836 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2837 BUG();
2838 }
2839 if (dlm->reco.dead_node != fr->dead_node) {
2840 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2841 "node %u, but node %u is supposed to be dead\n",
2842 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2843 BUG();
2844 }
2845
2846 switch (stage) {
2847 case 1:
2848 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2849 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2850 mlog(ML_ERROR, "%s: received finalize1 from "
2851 "new master %u for dead node %u, but "
2852 "this node has already received it!\n",
2853 dlm->name, fr->node_idx, fr->dead_node);
2854 dlm_print_reco_node_status(dlm);
2855 BUG();
2856 }
2857 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2858 spin_unlock(&dlm->spinlock);
2859 break;
2860 case 2:
2861 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2862 mlog(ML_ERROR, "%s: received finalize2 from "
2863 "new master %u for dead node %u, but "
2864 "this node did not have finalize1!\n",
2865 dlm->name, fr->node_idx, fr->dead_node);
2866 dlm_print_reco_node_status(dlm);
2867 BUG();
2868 }
2869 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2870 spin_unlock(&dlm->spinlock);
2871 dlm_reset_recovery(dlm);
2872 dlm_kick_recovery_thread(dlm);
2873 break;
2874 default:
2875 BUG();
2876 }
2877
2878 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2879 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2880
2881 dlm_put(dlm);
2882 return 0;
2883 }
This page took 0.092344 seconds and 5 git commands to generate.