ocfs2/dlm: Use ast_lock to protect ast_list
[deliverable/linux.git] / fs / ocfs2 / dlm / dlmmaster.c
CommitLineData
6714d8e8
KH
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmmod.c
5 *
6 * standalone DLM module
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/spinlock.h>
41#include <linux/delay.h>
42
43
44#include "cluster/heartbeat.h"
45#include "cluster/nodemanager.h"
46#include "cluster/tcp.h"
47
48#include "dlmapi.h"
49#include "dlmcommon.h"
82353b59 50#include "dlmdomain.h"
e5a0334c 51#include "dlmdebug.h"
6714d8e8
KH
52
53#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54#include "cluster/masklog.h"
55
6714d8e8
KH
56static void dlm_mle_node_down(struct dlm_ctxt *dlm,
57 struct dlm_master_list_entry *mle,
58 struct o2nm_node *node,
59 int idx);
60static void dlm_mle_node_up(struct dlm_ctxt *dlm,
61 struct dlm_master_list_entry *mle,
62 struct o2nm_node *node,
63 int idx);
64
65static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
ba2bf218
KH
66static int dlm_do_assert_master(struct dlm_ctxt *dlm,
67 struct dlm_lock_resource *res,
68 void *nodemap, u32 flags);
f3f85464 69static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
6714d8e8
KH
70
71static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
72 struct dlm_master_list_entry *mle,
73 const char *name,
74 unsigned int namelen)
75{
76 struct dlm_lock_resource *res;
77
78 if (dlm != mle->dlm)
79 return 0;
80
81 if (mle->type == DLM_MLE_BLOCK ||
82 mle->type == DLM_MLE_MIGRATION) {
83 if (namelen != mle->u.name.len ||
84 memcmp(name, mle->u.name.name, namelen)!=0)
85 return 0;
86 } else {
87 res = mle->u.res;
88 if (namelen != res->lockname.len ||
89 memcmp(res->lockname.name, name, namelen) != 0)
90 return 0;
91 }
92 return 1;
93}
94
724bdca9
SM
95static struct kmem_cache *dlm_lockres_cache = NULL;
96static struct kmem_cache *dlm_lockname_cache = NULL;
e18b890b 97static struct kmem_cache *dlm_mle_cache = NULL;
6714d8e8 98
6714d8e8
KH
99static void dlm_mle_release(struct kref *kref);
100static void dlm_init_mle(struct dlm_master_list_entry *mle,
101 enum dlm_mle_type type,
102 struct dlm_ctxt *dlm,
103 struct dlm_lock_resource *res,
104 const char *name,
105 unsigned int namelen);
106static void dlm_put_mle(struct dlm_master_list_entry *mle);
107static void __dlm_put_mle(struct dlm_master_list_entry *mle);
108static int dlm_find_mle(struct dlm_ctxt *dlm,
109 struct dlm_master_list_entry **mle,
110 char *name, unsigned int namelen);
111
ba2bf218
KH
112static int dlm_do_master_request(struct dlm_lock_resource *res,
113 struct dlm_master_list_entry *mle, int to);
6714d8e8
KH
114
115
116static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
117 struct dlm_lock_resource *res,
118 struct dlm_master_list_entry *mle,
119 int *blocked);
120static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
121 struct dlm_lock_resource *res,
122 struct dlm_master_list_entry *mle,
123 int blocked);
124static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
125 struct dlm_lock_resource *res,
126 struct dlm_master_list_entry *mle,
127 struct dlm_master_list_entry **oldmle,
128 const char *name, unsigned int namelen,
129 u8 new_master, u8 master);
130
131static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
132 struct dlm_lock_resource *res);
133static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
134 struct dlm_lock_resource *res);
135static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
136 struct dlm_lock_resource *res,
137 u8 target);
c03872f5
KH
138static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
139 struct dlm_lock_resource *res);
6714d8e8
KH
140
141
142int dlm_is_host_down(int errno)
143{
144 switch (errno) {
145 case -EBADF:
146 case -ECONNREFUSED:
147 case -ENOTCONN:
148 case -ECONNRESET:
149 case -EPIPE:
150 case -EHOSTDOWN:
151 case -EHOSTUNREACH:
152 case -ETIMEDOUT:
153 case -ECONNABORTED:
154 case -ENETDOWN:
155 case -ENETUNREACH:
156 case -ENETRESET:
157 case -ESHUTDOWN:
158 case -ENOPROTOOPT:
159 case -EINVAL: /* if returned from our tcp code,
160 this means there is no socket */
161 return 1;
162 }
163 return 0;
164}
165
166
167/*
168 * MASTER LIST FUNCTIONS
169 */
170
171
172/*
173 * regarding master list entries and heartbeat callbacks:
174 *
175 * in order to avoid sleeping and allocation that occurs in
176 * heartbeat, master list entries are simply attached to the
177 * dlm's established heartbeat callbacks. the mle is attached
178 * when it is created, and since the dlm->spinlock is held at
179 * that time, any heartbeat event will be properly discovered
180 * by the mle. the mle needs to be detached from the
181 * dlm->mle_hb_events list as soon as heartbeat events are no
182 * longer useful to the mle, and before the mle is freed.
183 *
184 * as a general rule, heartbeat events are no longer needed by
185 * the mle once an "answer" regarding the lock master has been
186 * received.
187 */
188static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
189 struct dlm_master_list_entry *mle)
190{
191 assert_spin_locked(&dlm->spinlock);
192
193 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
194}
195
196
197static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
198 struct dlm_master_list_entry *mle)
199{
200 if (!list_empty(&mle->hb_events))
201 list_del_init(&mle->hb_events);
202}
203
204
205static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
206 struct dlm_master_list_entry *mle)
207{
208 spin_lock(&dlm->spinlock);
209 __dlm_mle_detach_hb_events(dlm, mle);
210 spin_unlock(&dlm->spinlock);
211}
212
a2bf0477
KH
213static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
214{
215 struct dlm_ctxt *dlm;
216 dlm = mle->dlm;
217
218 assert_spin_locked(&dlm->spinlock);
219 assert_spin_locked(&dlm->master_lock);
220 mle->inuse++;
221 kref_get(&mle->mle_refs);
222}
223
224static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
225{
226 struct dlm_ctxt *dlm;
227 dlm = mle->dlm;
228
229 spin_lock(&dlm->spinlock);
230 spin_lock(&dlm->master_lock);
231 mle->inuse--;
232 __dlm_put_mle(mle);
233 spin_unlock(&dlm->master_lock);
234 spin_unlock(&dlm->spinlock);
235
236}
237
6714d8e8
KH
238/* remove from list and free */
239static void __dlm_put_mle(struct dlm_master_list_entry *mle)
240{
241 struct dlm_ctxt *dlm;
242 dlm = mle->dlm;
243
244 assert_spin_locked(&dlm->spinlock);
245 assert_spin_locked(&dlm->master_lock);
aa852354
KH
246 if (!atomic_read(&mle->mle_refs.refcount)) {
247 /* this may or may not crash, but who cares.
248 * it's a BUG. */
249 mlog(ML_ERROR, "bad mle: %p\n", mle);
250 dlm_print_one_mle(mle);
251 BUG();
252 } else
253 kref_put(&mle->mle_refs, dlm_mle_release);
6714d8e8
KH
254}
255
256
257/* must not have any spinlocks coming in */
258static void dlm_put_mle(struct dlm_master_list_entry *mle)
259{
260 struct dlm_ctxt *dlm;
261 dlm = mle->dlm;
262
263 spin_lock(&dlm->spinlock);
264 spin_lock(&dlm->master_lock);
265 __dlm_put_mle(mle);
266 spin_unlock(&dlm->master_lock);
267 spin_unlock(&dlm->spinlock);
268}
269
270static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
271{
272 kref_get(&mle->mle_refs);
273}
274
275static void dlm_init_mle(struct dlm_master_list_entry *mle,
276 enum dlm_mle_type type,
277 struct dlm_ctxt *dlm,
278 struct dlm_lock_resource *res,
279 const char *name,
280 unsigned int namelen)
281{
282 assert_spin_locked(&dlm->spinlock);
283
284 mle->dlm = dlm;
285 mle->type = type;
286 INIT_LIST_HEAD(&mle->list);
287 INIT_LIST_HEAD(&mle->hb_events);
288 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
289 spin_lock_init(&mle->spinlock);
290 init_waitqueue_head(&mle->wq);
291 atomic_set(&mle->woken, 0);
292 kref_init(&mle->mle_refs);
293 memset(mle->response_map, 0, sizeof(mle->response_map));
294 mle->master = O2NM_MAX_NODES;
295 mle->new_master = O2NM_MAX_NODES;
a2bf0477 296 mle->inuse = 0;
6714d8e8
KH
297
298 if (mle->type == DLM_MLE_MASTER) {
299 BUG_ON(!res);
300 mle->u.res = res;
301 } else if (mle->type == DLM_MLE_BLOCK) {
302 BUG_ON(!name);
303 memcpy(mle->u.name.name, name, namelen);
304 mle->u.name.len = namelen;
305 } else /* DLM_MLE_MIGRATION */ {
306 BUG_ON(!name);
307 memcpy(mle->u.name.name, name, namelen);
308 mle->u.name.len = namelen;
309 }
310
311 /* copy off the node_map and register hb callbacks on our copy */
312 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
313 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
314 clear_bit(dlm->node_num, mle->vote_map);
315 clear_bit(dlm->node_num, mle->node_map);
316
317 /* attach the mle to the domain node up/down events */
318 __dlm_mle_attach_hb_events(dlm, mle);
319}
320
321
322/* returns 1 if found, 0 if not */
323static int dlm_find_mle(struct dlm_ctxt *dlm,
324 struct dlm_master_list_entry **mle,
325 char *name, unsigned int namelen)
326{
327 struct dlm_master_list_entry *tmpmle;
6714d8e8
KH
328
329 assert_spin_locked(&dlm->master_lock);
330
800deef3 331 list_for_each_entry(tmpmle, &dlm->master_list, list) {
6714d8e8
KH
332 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
333 continue;
334 dlm_get_mle(tmpmle);
335 *mle = tmpmle;
336 return 1;
337 }
338 return 0;
339}
340
341void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
342{
343 struct dlm_master_list_entry *mle;
6714d8e8
KH
344
345 assert_spin_locked(&dlm->spinlock);
346
800deef3 347 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
6714d8e8
KH
348 if (node_up)
349 dlm_mle_node_up(dlm, mle, NULL, idx);
350 else
351 dlm_mle_node_down(dlm, mle, NULL, idx);
352 }
353}
354
355static void dlm_mle_node_down(struct dlm_ctxt *dlm,
356 struct dlm_master_list_entry *mle,
357 struct o2nm_node *node, int idx)
358{
359 spin_lock(&mle->spinlock);
360
361 if (!test_bit(idx, mle->node_map))
362 mlog(0, "node %u already removed from nodemap!\n", idx);
363 else
364 clear_bit(idx, mle->node_map);
365
366 spin_unlock(&mle->spinlock);
367}
368
369static void dlm_mle_node_up(struct dlm_ctxt *dlm,
370 struct dlm_master_list_entry *mle,
371 struct o2nm_node *node, int idx)
372{
373 spin_lock(&mle->spinlock);
374
375 if (test_bit(idx, mle->node_map))
376 mlog(0, "node %u already in node map!\n", idx);
377 else
378 set_bit(idx, mle->node_map);
379
380 spin_unlock(&mle->spinlock);
381}
382
383
384int dlm_init_mle_cache(void)
385{
12eb0035 386 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
6714d8e8
KH
387 sizeof(struct dlm_master_list_entry),
388 0, SLAB_HWCACHE_ALIGN,
20c2df83 389 NULL);
6714d8e8
KH
390 if (dlm_mle_cache == NULL)
391 return -ENOMEM;
392 return 0;
393}
394
395void dlm_destroy_mle_cache(void)
396{
397 if (dlm_mle_cache)
398 kmem_cache_destroy(dlm_mle_cache);
399}
400
401static void dlm_mle_release(struct kref *kref)
402{
403 struct dlm_master_list_entry *mle;
404 struct dlm_ctxt *dlm;
405
406 mlog_entry_void();
407
408 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
409 dlm = mle->dlm;
410
411 if (mle->type != DLM_MLE_MASTER) {
412 mlog(0, "calling mle_release for %.*s, type %d\n",
413 mle->u.name.len, mle->u.name.name, mle->type);
414 } else {
415 mlog(0, "calling mle_release for %.*s, type %d\n",
416 mle->u.res->lockname.len,
417 mle->u.res->lockname.name, mle->type);
418 }
419 assert_spin_locked(&dlm->spinlock);
420 assert_spin_locked(&dlm->master_lock);
421
422 /* remove from list if not already */
423 if (!list_empty(&mle->list))
424 list_del_init(&mle->list);
425
426 /* detach the mle from the domain node up/down events */
427 __dlm_mle_detach_hb_events(dlm, mle);
428
429 /* NOTE: kfree under spinlock here.
430 * if this is bad, we can move this to a freelist. */
431 kmem_cache_free(dlm_mle_cache, mle);
432}
433
434
435/*
436 * LOCK RESOURCE FUNCTIONS
437 */
438
724bdca9
SM
439int dlm_init_master_caches(void)
440{
441 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
442 sizeof(struct dlm_lock_resource),
443 0, SLAB_HWCACHE_ALIGN, NULL);
444 if (!dlm_lockres_cache)
445 goto bail;
446
447 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
448 DLM_LOCKID_NAME_MAX, 0,
449 SLAB_HWCACHE_ALIGN, NULL);
450 if (!dlm_lockname_cache)
451 goto bail;
452
453 return 0;
454bail:
455 dlm_destroy_master_caches();
456 return -ENOMEM;
457}
458
459void dlm_destroy_master_caches(void)
460{
461 if (dlm_lockname_cache)
462 kmem_cache_destroy(dlm_lockname_cache);
463
464 if (dlm_lockres_cache)
465 kmem_cache_destroy(dlm_lockres_cache);
466}
467
6714d8e8
KH
468static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
469 struct dlm_lock_resource *res,
470 u8 owner)
471{
472 assert_spin_locked(&res->spinlock);
473
474 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
475
476 if (owner == dlm->node_num)
477 atomic_inc(&dlm->local_resources);
478 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
479 atomic_inc(&dlm->unknown_resources);
480 else
481 atomic_inc(&dlm->remote_resources);
482
483 res->owner = owner;
484}
485
486void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
487 struct dlm_lock_resource *res, u8 owner)
488{
489 assert_spin_locked(&res->spinlock);
490
491 if (owner == res->owner)
492 return;
493
494 if (res->owner == dlm->node_num)
495 atomic_dec(&dlm->local_resources);
496 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
497 atomic_dec(&dlm->unknown_resources);
498 else
499 atomic_dec(&dlm->remote_resources);
500
501 dlm_set_lockres_owner(dlm, res, owner);
502}
503
504
505static void dlm_lockres_release(struct kref *kref)
506{
507 struct dlm_lock_resource *res;
b0d4f817 508 struct dlm_ctxt *dlm;
6714d8e8
KH
509
510 res = container_of(kref, struct dlm_lock_resource, refs);
b0d4f817 511 dlm = res->dlm;
6714d8e8
KH
512
513 /* This should not happen -- all lockres' have a name
514 * associated with them at init time. */
515 BUG_ON(!res->lockname.name);
516
517 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
518 res->lockname.name);
519
b0d4f817 520 spin_lock(&dlm->track_lock);
29576f8b
SM
521 if (!list_empty(&res->tracking))
522 list_del_init(&res->tracking);
523 else {
524 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
525 res->lockname.len, res->lockname.name);
526 dlm_print_one_lock_resource(res);
527 }
b0d4f817
SM
528 spin_unlock(&dlm->track_lock);
529
530 dlm_put(dlm);
29576f8b 531
a7f90d83
KH
532 if (!hlist_unhashed(&res->hash_node) ||
533 !list_empty(&res->granted) ||
534 !list_empty(&res->converting) ||
535 !list_empty(&res->blocked) ||
536 !list_empty(&res->dirty) ||
537 !list_empty(&res->recovering) ||
538 !list_empty(&res->purge)) {
539 mlog(ML_ERROR,
540 "Going to BUG for resource %.*s."
541 " We're on a list! [%c%c%c%c%c%c%c]\n",
542 res->lockname.len, res->lockname.name,
543 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
544 !list_empty(&res->granted) ? 'G' : ' ',
545 !list_empty(&res->converting) ? 'C' : ' ',
546 !list_empty(&res->blocked) ? 'B' : ' ',
547 !list_empty(&res->dirty) ? 'D' : ' ',
548 !list_empty(&res->recovering) ? 'R' : ' ',
549 !list_empty(&res->purge) ? 'P' : ' ');
550
551 dlm_print_one_lock_resource(res);
552 }
553
6714d8e8
KH
554 /* By the time we're ready to blow this guy away, we shouldn't
555 * be on any lists. */
81f2094a 556 BUG_ON(!hlist_unhashed(&res->hash_node));
6714d8e8
KH
557 BUG_ON(!list_empty(&res->granted));
558 BUG_ON(!list_empty(&res->converting));
559 BUG_ON(!list_empty(&res->blocked));
560 BUG_ON(!list_empty(&res->dirty));
561 BUG_ON(!list_empty(&res->recovering));
562 BUG_ON(!list_empty(&res->purge));
563
724bdca9 564 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
6714d8e8 565
724bdca9 566 kmem_cache_free(dlm_lockres_cache, res);
6714d8e8
KH
567}
568
6714d8e8
KH
569void dlm_lockres_put(struct dlm_lock_resource *res)
570{
571 kref_put(&res->refs, dlm_lockres_release);
572}
573
574static void dlm_init_lockres(struct dlm_ctxt *dlm,
575 struct dlm_lock_resource *res,
576 const char *name, unsigned int namelen)
577{
578 char *qname;
579
580 /* If we memset here, we lose our reference to the kmalloc'd
581 * res->lockname.name, so be sure to init every field
582 * correctly! */
583
584 qname = (char *) res->lockname.name;
585 memcpy(qname, name, namelen);
586
587 res->lockname.len = namelen;
a3d33291 588 res->lockname.hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
589
590 init_waitqueue_head(&res->wq);
591 spin_lock_init(&res->spinlock);
81f2094a 592 INIT_HLIST_NODE(&res->hash_node);
6714d8e8
KH
593 INIT_LIST_HEAD(&res->granted);
594 INIT_LIST_HEAD(&res->converting);
595 INIT_LIST_HEAD(&res->blocked);
596 INIT_LIST_HEAD(&res->dirty);
597 INIT_LIST_HEAD(&res->recovering);
598 INIT_LIST_HEAD(&res->purge);
29576f8b 599 INIT_LIST_HEAD(&res->tracking);
6714d8e8
KH
600 atomic_set(&res->asts_reserved, 0);
601 res->migration_pending = 0;
ba2bf218 602 res->inflight_locks = 0;
6714d8e8 603
b0d4f817
SM
604 /* put in dlm_lockres_release */
605 dlm_grab(dlm);
606 res->dlm = dlm;
607
6714d8e8
KH
608 kref_init(&res->refs);
609
610 /* just for consistency */
611 spin_lock(&res->spinlock);
612 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
613 spin_unlock(&res->spinlock);
614
615 res->state = DLM_LOCK_RES_IN_PROGRESS;
616
617 res->last_used = 0;
618
18c6ac38 619 spin_lock(&dlm->spinlock);
29576f8b 620 list_add_tail(&res->tracking, &dlm->tracking_list);
18c6ac38 621 spin_unlock(&dlm->spinlock);
29576f8b 622
6714d8e8 623 memset(res->lvb, 0, DLM_LVB_LEN);
ba2bf218 624 memset(res->refmap, 0, sizeof(res->refmap));
6714d8e8
KH
625}
626
627struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
628 const char *name,
629 unsigned int namelen)
630{
724bdca9 631 struct dlm_lock_resource *res = NULL;
6714d8e8 632
724bdca9
SM
633 res = (struct dlm_lock_resource *)
634 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
6714d8e8 635 if (!res)
724bdca9 636 goto error;
6714d8e8 637
724bdca9
SM
638 res->lockname.name = (char *)
639 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
640 if (!res->lockname.name)
641 goto error;
6714d8e8
KH
642
643 dlm_init_lockres(dlm, res, name, namelen);
644 return res;
724bdca9
SM
645
646error:
647 if (res && res->lockname.name)
648 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
649
650 if (res)
651 kmem_cache_free(dlm_lockres_cache, res);
652 return NULL;
6714d8e8
KH
653}
654
ba2bf218
KH
655void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
656 struct dlm_lock_resource *res,
657 int new_lockres,
658 const char *file,
659 int line)
660{
661 if (!new_lockres)
662 assert_spin_locked(&res->spinlock);
663
664 if (!test_bit(dlm->node_num, res->refmap)) {
665 BUG_ON(res->inflight_locks != 0);
666 dlm_lockres_set_refmap_bit(dlm->node_num, res);
667 }
668 res->inflight_locks++;
669 mlog(0, "%s:%.*s: inflight++: now %u\n",
670 dlm->name, res->lockname.len, res->lockname.name,
671 res->inflight_locks);
672}
673
674void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
675 struct dlm_lock_resource *res,
676 const char *file,
677 int line)
678{
679 assert_spin_locked(&res->spinlock);
680
681 BUG_ON(res->inflight_locks == 0);
682 res->inflight_locks--;
683 mlog(0, "%s:%.*s: inflight--: now %u\n",
684 dlm->name, res->lockname.len, res->lockname.name,
685 res->inflight_locks);
686 if (res->inflight_locks == 0)
687 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
688 wake_up(&res->wq);
689}
690
6714d8e8
KH
691/*
692 * lookup a lock resource by name.
693 * may already exist in the hashtable.
694 * lockid is null terminated
695 *
696 * if not, allocate enough for the lockres and for
697 * the temporary structure used in doing the mastering.
698 *
699 * also, do a lookup in the dlm->master_list to see
700 * if another node has begun mastering the same lock.
701 * if so, there should be a block entry in there
702 * for this name, and we should *not* attempt to master
703 * the lock here. need to wait around for that node
704 * to assert_master (or die).
705 *
706 */
707struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
708 const char *lockid,
3384f3df 709 int namelen,
6714d8e8
KH
710 int flags)
711{
712 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
713 struct dlm_master_list_entry *mle = NULL;
714 struct dlm_master_list_entry *alloc_mle = NULL;
715 int blocked = 0;
716 int ret, nodenum;
717 struct dlm_node_iter iter;
3384f3df 718 unsigned int hash;
6714d8e8 719 int tries = 0;
c03872f5 720 int bit, wait_on_recovery = 0;
ba2bf218 721 int drop_inflight_if_nonlocal = 0;
6714d8e8
KH
722
723 BUG_ON(!lockid);
724
a3d33291 725 hash = dlm_lockid_hash(lockid, namelen);
6714d8e8
KH
726
727 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
728
729lookup:
730 spin_lock(&dlm->spinlock);
ba2bf218 731 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
6714d8e8 732 if (tmpres) {
ba2bf218
KH
733 int dropping_ref = 0;
734
7b791d68
SM
735 spin_unlock(&dlm->spinlock);
736
ba2bf218 737 spin_lock(&tmpres->spinlock);
7b791d68
SM
738 /* We wait for the other thread that is mastering the resource */
739 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
740 __dlm_wait_on_lockres(tmpres);
741 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
742 }
743
ba2bf218
KH
744 if (tmpres->owner == dlm->node_num) {
745 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
746 dlm_lockres_grab_inflight_ref(dlm, tmpres);
747 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
748 dropping_ref = 1;
749 spin_unlock(&tmpres->spinlock);
ba2bf218
KH
750
751 /* wait until done messaging the master, drop our ref to allow
752 * the lockres to be purged, start over. */
753 if (dropping_ref) {
754 spin_lock(&tmpres->spinlock);
755 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
756 spin_unlock(&tmpres->spinlock);
757 dlm_lockres_put(tmpres);
758 tmpres = NULL;
759 goto lookup;
760 }
761
6714d8e8
KH
762 mlog(0, "found in hash!\n");
763 if (res)
764 dlm_lockres_put(res);
765 res = tmpres;
766 goto leave;
767 }
768
769 if (!res) {
770 spin_unlock(&dlm->spinlock);
771 mlog(0, "allocating a new resource\n");
772 /* nothing found and we need to allocate one. */
773 alloc_mle = (struct dlm_master_list_entry *)
ad8100e0 774 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
6714d8e8
KH
775 if (!alloc_mle)
776 goto leave;
777 res = dlm_new_lockres(dlm, lockid, namelen);
778 if (!res)
779 goto leave;
780 goto lookup;
781 }
782
783 mlog(0, "no lockres found, allocated our own: %p\n", res);
784
785 if (flags & LKM_LOCAL) {
786 /* caller knows it's safe to assume it's not mastered elsewhere
787 * DONE! return right away */
788 spin_lock(&res->spinlock);
789 dlm_change_lockres_owner(dlm, res, dlm->node_num);
790 __dlm_insert_lockres(dlm, res);
ba2bf218 791 dlm_lockres_grab_inflight_ref(dlm, res);
6714d8e8
KH
792 spin_unlock(&res->spinlock);
793 spin_unlock(&dlm->spinlock);
794 /* lockres still marked IN_PROGRESS */
795 goto wake_waiters;
796 }
797
798 /* check master list to see if another node has started mastering it */
799 spin_lock(&dlm->master_lock);
800
801 /* if we found a block, wait for lock to be mastered by another node */
802 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
803 if (blocked) {
ba2bf218 804 int mig;
6714d8e8
KH
805 if (mle->type == DLM_MLE_MASTER) {
806 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
807 BUG();
ba2bf218
KH
808 }
809 mig = (mle->type == DLM_MLE_MIGRATION);
810 /* if there is a migration in progress, let the migration
811 * finish before continuing. we can wait for the absence
812 * of the MIGRATION mle: either the migrate finished or
813 * one of the nodes died and the mle was cleaned up.
814 * if there is a BLOCK here, but it already has a master
815 * set, we are too late. the master does not have a ref
816 * for us in the refmap. detach the mle and drop it.
817 * either way, go back to the top and start over. */
818 if (mig || mle->master != O2NM_MAX_NODES) {
819 BUG_ON(mig && mle->master == dlm->node_num);
820 /* we arrived too late. the master does not
821 * have a ref for us. retry. */
822 mlog(0, "%s:%.*s: late on %s\n",
823 dlm->name, namelen, lockid,
824 mig ? "MIGRATION" : "BLOCK");
6714d8e8 825 spin_unlock(&dlm->master_lock);
6714d8e8
KH
826 spin_unlock(&dlm->spinlock);
827
828 /* master is known, detach */
ba2bf218
KH
829 if (!mig)
830 dlm_mle_detach_hb_events(dlm, mle);
6714d8e8
KH
831 dlm_put_mle(mle);
832 mle = NULL;
ba2bf218
KH
833 /* this is lame, but we cant wait on either
834 * the mle or lockres waitqueue here */
835 if (mig)
836 msleep(100);
837 goto lookup;
6714d8e8
KH
838 }
839 } else {
840 /* go ahead and try to master lock on this node */
841 mle = alloc_mle;
842 /* make sure this does not get freed below */
843 alloc_mle = NULL;
844 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
845 set_bit(dlm->node_num, mle->maybe_map);
846 list_add(&mle->list, &dlm->master_list);
c03872f5
KH
847
848 /* still holding the dlm spinlock, check the recovery map
849 * to see if there are any nodes that still need to be
850 * considered. these will not appear in the mle nodemap
851 * but they might own this lockres. wait on them. */
852 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
853 if (bit < O2NM_MAX_NODES) {
2759236f 854 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
c03872f5
KH
855 "recover before lock mastery can begin\n",
856 dlm->name, namelen, (char *)lockid, bit);
857 wait_on_recovery = 1;
858 }
6714d8e8
KH
859 }
860
861 /* at this point there is either a DLM_MLE_BLOCK or a
862 * DLM_MLE_MASTER on the master list, so it's safe to add the
863 * lockres to the hashtable. anyone who finds the lock will
864 * still have to wait on the IN_PROGRESS. */
865
866 /* finally add the lockres to its hash bucket */
867 __dlm_insert_lockres(dlm, res);
ba2bf218
KH
868 /* since this lockres is new it doesnt not require the spinlock */
869 dlm_lockres_grab_inflight_ref_new(dlm, res);
870
871 /* if this node does not become the master make sure to drop
872 * this inflight reference below */
873 drop_inflight_if_nonlocal = 1;
874
6714d8e8
KH
875 /* get an extra ref on the mle in case this is a BLOCK
876 * if so, the creator of the BLOCK may try to put the last
877 * ref at this time in the assert master handler, so we
878 * need an extra one to keep from a bad ptr deref. */
a2bf0477 879 dlm_get_mle_inuse(mle);
6714d8e8
KH
880 spin_unlock(&dlm->master_lock);
881 spin_unlock(&dlm->spinlock);
882
e7e69eb3 883redo_request:
c03872f5
KH
884 while (wait_on_recovery) {
885 /* any cluster changes that occurred after dropping the
886 * dlm spinlock would be detectable be a change on the mle,
887 * so we only need to clear out the recovery map once. */
888 if (dlm_is_recovery_lock(lockid, namelen)) {
889 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
890 "must master $RECOVERY lock now\n", dlm->name);
891 if (!dlm_pre_master_reco_lockres(dlm, res))
892 wait_on_recovery = 0;
893 else {
894 mlog(0, "%s: waiting 500ms for heartbeat state "
895 "change\n", dlm->name);
896 msleep(500);
897 }
898 continue;
899 }
900
901 dlm_kick_recovery_thread(dlm);
aa087b84 902 msleep(1000);
c03872f5
KH
903 dlm_wait_for_recovery(dlm);
904
905 spin_lock(&dlm->spinlock);
906 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
907 if (bit < O2NM_MAX_NODES) {
2759236f 908 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
c03872f5
KH
909 "recover before lock mastery can begin\n",
910 dlm->name, namelen, (char *)lockid, bit);
911 wait_on_recovery = 1;
912 } else
913 wait_on_recovery = 0;
914 spin_unlock(&dlm->spinlock);
b7084ab5
KH
915
916 if (wait_on_recovery)
917 dlm_wait_for_node_recovery(dlm, bit, 10000);
c03872f5
KH
918 }
919
6714d8e8
KH
920 /* must wait for lock to be mastered elsewhere */
921 if (blocked)
922 goto wait;
923
6714d8e8
KH
924 ret = -EINVAL;
925 dlm_node_iter_init(mle->vote_map, &iter);
926 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ba2bf218 927 ret = dlm_do_master_request(res, mle, nodenum);
6714d8e8
KH
928 if (ret < 0)
929 mlog_errno(ret);
930 if (mle->master != O2NM_MAX_NODES) {
931 /* found a master ! */
9c6510a5
KH
932 if (mle->master <= nodenum)
933 break;
934 /* if our master request has not reached the master
935 * yet, keep going until it does. this is how the
936 * master will know that asserts are needed back to
937 * the lower nodes. */
938 mlog(0, "%s:%.*s: requests only up to %u but master "
939 "is %u, keep going\n", dlm->name, namelen,
940 lockid, nodenum, mle->master);
6714d8e8
KH
941 }
942 }
943
944wait:
945 /* keep going until the response map includes all nodes */
946 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
947 if (ret < 0) {
e7e69eb3 948 wait_on_recovery = 1;
6714d8e8
KH
949 mlog(0, "%s:%.*s: node map changed, redo the "
950 "master request now, blocked=%d\n",
951 dlm->name, res->lockname.len,
952 res->lockname.name, blocked);
953 if (++tries > 20) {
954 mlog(ML_ERROR, "%s:%.*s: spinning on "
955 "dlm_wait_for_lock_mastery, blocked=%d\n",
956 dlm->name, res->lockname.len,
957 res->lockname.name, blocked);
958 dlm_print_one_lock_resource(res);
8a9343fa 959 dlm_print_one_mle(mle);
6714d8e8
KH
960 tries = 0;
961 }
962 goto redo_request;
963 }
964
965 mlog(0, "lockres mastered by %u\n", res->owner);
966 /* make sure we never continue without this */
967 BUG_ON(res->owner == O2NM_MAX_NODES);
968
969 /* master is known, detach if not already detached */
970 dlm_mle_detach_hb_events(dlm, mle);
971 dlm_put_mle(mle);
972 /* put the extra ref */
a2bf0477 973 dlm_put_mle_inuse(mle);
6714d8e8
KH
974
975wake_waiters:
976 spin_lock(&res->spinlock);
ba2bf218
KH
977 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
978 dlm_lockres_drop_inflight_ref(dlm, res);
6714d8e8
KH
979 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
980 spin_unlock(&res->spinlock);
981 wake_up(&res->wq);
982
983leave:
984 /* need to free the unused mle */
985 if (alloc_mle)
986 kmem_cache_free(dlm_mle_cache, alloc_mle);
987
988 return res;
989}
990
991
992#define DLM_MASTERY_TIMEOUT_MS 5000
993
994static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
995 struct dlm_lock_resource *res,
996 struct dlm_master_list_entry *mle,
997 int *blocked)
998{
999 u8 m;
1000 int ret, bit;
1001 int map_changed, voting_done;
1002 int assert, sleep;
1003
1004recheck:
1005 ret = 0;
1006 assert = 0;
1007
1008 /* check if another node has already become the owner */
1009 spin_lock(&res->spinlock);
1010 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
9c6510a5
KH
1011 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1012 res->lockname.len, res->lockname.name, res->owner);
6714d8e8 1013 spin_unlock(&res->spinlock);
9c6510a5
KH
1014 /* this will cause the master to re-assert across
1015 * the whole cluster, freeing up mles */
588e0090 1016 if (res->owner != dlm->node_num) {
ba2bf218 1017 ret = dlm_do_master_request(res, mle, res->owner);
588e0090
KH
1018 if (ret < 0) {
1019 /* give recovery a chance to run */
1020 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1021 msleep(500);
1022 goto recheck;
1023 }
9c6510a5
KH
1024 }
1025 ret = 0;
6714d8e8
KH
1026 goto leave;
1027 }
1028 spin_unlock(&res->spinlock);
1029
1030 spin_lock(&mle->spinlock);
1031 m = mle->master;
1032 map_changed = (memcmp(mle->vote_map, mle->node_map,
1033 sizeof(mle->vote_map)) != 0);
1034 voting_done = (memcmp(mle->vote_map, mle->response_map,
1035 sizeof(mle->vote_map)) == 0);
1036
1037 /* restart if we hit any errors */
1038 if (map_changed) {
1039 int b;
1040 mlog(0, "%s: %.*s: node map changed, restarting\n",
1041 dlm->name, res->lockname.len, res->lockname.name);
1042 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1043 b = (mle->type == DLM_MLE_BLOCK);
1044 if ((*blocked && !b) || (!*blocked && b)) {
1045 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1046 dlm->name, res->lockname.len, res->lockname.name,
1047 *blocked, b);
1048 *blocked = b;
1049 }
1050 spin_unlock(&mle->spinlock);
1051 if (ret < 0) {
1052 mlog_errno(ret);
1053 goto leave;
1054 }
1055 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1056 "rechecking now\n", dlm->name, res->lockname.len,
1057 res->lockname.name);
1058 goto recheck;
aa852354
KH
1059 } else {
1060 if (!voting_done) {
1061 mlog(0, "map not changed and voting not done "
1062 "for %s:%.*s\n", dlm->name, res->lockname.len,
1063 res->lockname.name);
1064 }
6714d8e8
KH
1065 }
1066
1067 if (m != O2NM_MAX_NODES) {
1068 /* another node has done an assert!
1069 * all done! */
1070 sleep = 0;
1071 } else {
1072 sleep = 1;
1073 /* have all nodes responded? */
1074 if (voting_done && !*blocked) {
1075 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1076 if (dlm->node_num <= bit) {
1077 /* my node number is lowest.
1078 * now tell other nodes that I am
1079 * mastering this. */
1080 mle->master = dlm->node_num;
ba2bf218
KH
1081 /* ref was grabbed in get_lock_resource
1082 * will be dropped in dlmlock_master */
6714d8e8
KH
1083 assert = 1;
1084 sleep = 0;
1085 }
1086 /* if voting is done, but we have not received
1087 * an assert master yet, we must sleep */
1088 }
1089 }
1090
1091 spin_unlock(&mle->spinlock);
1092
1093 /* sleep if we haven't finished voting yet */
1094 if (sleep) {
1095 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1096
1097 /*
1098 if (atomic_read(&mle->mle_refs.refcount) < 2)
1099 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1100 atomic_read(&mle->mle_refs.refcount),
1101 res->lockname.len, res->lockname.name);
1102 */
1103 atomic_set(&mle->woken, 0);
1104 (void)wait_event_timeout(mle->wq,
1105 (atomic_read(&mle->woken) == 1),
1106 timeo);
1107 if (res->owner == O2NM_MAX_NODES) {
ba2bf218
KH
1108 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1109 res->lockname.len, res->lockname.name);
6714d8e8
KH
1110 goto recheck;
1111 }
1112 mlog(0, "done waiting, master is %u\n", res->owner);
1113 ret = 0;
1114 goto leave;
1115 }
1116
1117 ret = 0; /* done */
1118 if (assert) {
1119 m = dlm->node_num;
1120 mlog(0, "about to master %.*s here, this=%u\n",
1121 res->lockname.len, res->lockname.name, m);
ba2bf218 1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
6714d8e8
KH
1123 if (ret) {
1124 /* This is a failure in the network path,
1125 * not in the response to the assert_master
1126 * (any nonzero response is a BUG on this node).
1127 * Most likely a socket just got disconnected
1128 * due to node death. */
1129 mlog_errno(ret);
1130 }
1131 /* no longer need to restart lock mastery.
1132 * all living nodes have been contacted. */
1133 ret = 0;
1134 }
1135
1136 /* set the lockres owner */
1137 spin_lock(&res->spinlock);
ba2bf218
KH
1138 /* mastery reference obtained either during
1139 * assert_master_handler or in get_lock_resource */
6714d8e8
KH
1140 dlm_change_lockres_owner(dlm, res, m);
1141 spin_unlock(&res->spinlock);
1142
1143leave:
1144 return ret;
1145}
1146
1147struct dlm_bitmap_diff_iter
1148{
1149 int curnode;
1150 unsigned long *orig_bm;
1151 unsigned long *cur_bm;
1152 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1153};
1154
1155enum dlm_node_state_change
1156{
1157 NODE_DOWN = -1,
1158 NODE_NO_CHANGE = 0,
1159 NODE_UP
1160};
1161
1162static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1163 unsigned long *orig_bm,
1164 unsigned long *cur_bm)
1165{
1166 unsigned long p1, p2;
1167 int i;
1168
1169 iter->curnode = -1;
1170 iter->orig_bm = orig_bm;
1171 iter->cur_bm = cur_bm;
1172
1173 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1174 p1 = *(iter->orig_bm + i);
1175 p2 = *(iter->cur_bm + i);
1176 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1177 }
1178}
1179
1180static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1181 enum dlm_node_state_change *state)
1182{
1183 int bit;
1184
1185 if (iter->curnode >= O2NM_MAX_NODES)
1186 return -ENOENT;
1187
1188 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1189 iter->curnode+1);
1190 if (bit >= O2NM_MAX_NODES) {
1191 iter->curnode = O2NM_MAX_NODES;
1192 return -ENOENT;
1193 }
1194
1195 /* if it was there in the original then this node died */
1196 if (test_bit(bit, iter->orig_bm))
1197 *state = NODE_DOWN;
1198 else
1199 *state = NODE_UP;
1200
1201 iter->curnode = bit;
1202 return bit;
1203}
1204
1205
1206static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1207 struct dlm_lock_resource *res,
1208 struct dlm_master_list_entry *mle,
1209 int blocked)
1210{
1211 struct dlm_bitmap_diff_iter bdi;
1212 enum dlm_node_state_change sc;
1213 int node;
1214 int ret = 0;
1215
1216 mlog(0, "something happened such that the "
1217 "master process may need to be restarted!\n");
1218
1219 assert_spin_locked(&mle->spinlock);
1220
1221 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1222 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1223 while (node >= 0) {
1224 if (sc == NODE_UP) {
e2faea4c
KH
1225 /* a node came up. clear any old vote from
1226 * the response map and set it in the vote map
1227 * then restart the mastery. */
1228 mlog(ML_NOTICE, "node %d up while restarting\n", node);
6714d8e8
KH
1229
1230 /* redo the master request, but only for the new node */
1231 mlog(0, "sending request to new node\n");
1232 clear_bit(node, mle->response_map);
1233 set_bit(node, mle->vote_map);
1234 } else {
1235 mlog(ML_ERROR, "node down! %d\n", node);
6714d8e8
KH
1236 if (blocked) {
1237 int lowest = find_next_bit(mle->maybe_map,
1238 O2NM_MAX_NODES, 0);
1239
1240 /* act like it was never there */
1241 clear_bit(node, mle->maybe_map);
1242
e7e69eb3
KH
1243 if (node == lowest) {
1244 mlog(0, "expected master %u died"
1245 " while this node was blocked "
1246 "waiting on it!\n", node);
1247 lowest = find_next_bit(mle->maybe_map,
1248 O2NM_MAX_NODES,
1249 lowest+1);
1250 if (lowest < O2NM_MAX_NODES) {
1251 mlog(0, "%s:%.*s:still "
1252 "blocked. waiting on %u "
1253 "now\n", dlm->name,
1254 res->lockname.len,
1255 res->lockname.name,
1256 lowest);
1257 } else {
1258 /* mle is an MLE_BLOCK, but
1259 * there is now nothing left to
1260 * block on. we need to return
1261 * all the way back out and try
1262 * again with an MLE_MASTER.
1263 * dlm_do_local_recovery_cleanup
1264 * has already run, so the mle
1265 * refcount is ok */
1266 mlog(0, "%s:%.*s: no "
1267 "longer blocking. try to "
1268 "master this here\n",
1269 dlm->name,
1270 res->lockname.len,
1271 res->lockname.name);
1272 mle->type = DLM_MLE_MASTER;
1273 mle->u.res = res;
1274 }
6714d8e8 1275 }
6714d8e8
KH
1276 }
1277
e7e69eb3
KH
1278 /* now blank out everything, as if we had never
1279 * contacted anyone */
1280 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1281 memset(mle->response_map, 0, sizeof(mle->response_map));
1282 /* reset the vote_map to the current node_map */
1283 memcpy(mle->vote_map, mle->node_map,
1284 sizeof(mle->node_map));
1285 /* put myself into the maybe map */
1286 if (mle->type != DLM_MLE_BLOCK)
1287 set_bit(dlm->node_num, mle->maybe_map);
6714d8e8
KH
1288 }
1289 ret = -EAGAIN;
6714d8e8
KH
1290 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1291 }
1292 return ret;
1293}
1294
1295
1296/*
1297 * DLM_MASTER_REQUEST_MSG
1298 *
1299 * returns: 0 on success,
1300 * -errno on a network error
1301 *
1302 * on error, the caller should assume the target node is "dead"
1303 *
1304 */
1305
ba2bf218
KH
1306static int dlm_do_master_request(struct dlm_lock_resource *res,
1307 struct dlm_master_list_entry *mle, int to)
6714d8e8
KH
1308{
1309 struct dlm_ctxt *dlm = mle->dlm;
1310 struct dlm_master_request request;
1311 int ret, response=0, resend;
1312
1313 memset(&request, 0, sizeof(request));
1314 request.node_idx = dlm->node_num;
1315
1316 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1317
1318 if (mle->type != DLM_MLE_MASTER) {
1319 request.namelen = mle->u.name.len;
1320 memcpy(request.name, mle->u.name.name, request.namelen);
1321 } else {
1322 request.namelen = mle->u.res->lockname.len;
1323 memcpy(request.name, mle->u.res->lockname.name,
1324 request.namelen);
1325 }
1326
1327again:
1328 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1329 sizeof(request), to, &response);
1330 if (ret < 0) {
1331 if (ret == -ESRCH) {
1332 /* should never happen */
1333 mlog(ML_ERROR, "TCP stack not ready!\n");
1334 BUG();
1335 } else if (ret == -EINVAL) {
1336 mlog(ML_ERROR, "bad args passed to o2net!\n");
1337 BUG();
1338 } else if (ret == -ENOMEM) {
1339 mlog(ML_ERROR, "out of memory while trying to send "
1340 "network message! retrying\n");
1341 /* this is totally crude */
1342 msleep(50);
1343 goto again;
1344 } else if (!dlm_is_host_down(ret)) {
1345 /* not a network error. bad. */
1346 mlog_errno(ret);
1347 mlog(ML_ERROR, "unhandled error!");
1348 BUG();
1349 }
1350 /* all other errors should be network errors,
1351 * and likely indicate node death */
1352 mlog(ML_ERROR, "link to %d went down!\n", to);
1353 goto out;
1354 }
1355
1356 ret = 0;
1357 resend = 0;
1358 spin_lock(&mle->spinlock);
1359 switch (response) {
1360 case DLM_MASTER_RESP_YES:
1361 set_bit(to, mle->response_map);
1362 mlog(0, "node %u is the master, response=YES\n", to);
ba2bf218
KH
1363 mlog(0, "%s:%.*s: master node %u now knows I have a "
1364 "reference\n", dlm->name, res->lockname.len,
1365 res->lockname.name, to);
6714d8e8
KH
1366 mle->master = to;
1367 break;
1368 case DLM_MASTER_RESP_NO:
1369 mlog(0, "node %u not master, response=NO\n", to);
1370 set_bit(to, mle->response_map);
1371 break;
1372 case DLM_MASTER_RESP_MAYBE:
1373 mlog(0, "node %u not master, response=MAYBE\n", to);
1374 set_bit(to, mle->response_map);
1375 set_bit(to, mle->maybe_map);
1376 break;
1377 case DLM_MASTER_RESP_ERROR:
1378 mlog(0, "node %u hit an error, resending\n", to);
1379 resend = 1;
1380 response = 0;
1381 break;
1382 default:
1383 mlog(ML_ERROR, "bad response! %u\n", response);
1384 BUG();
1385 }
1386 spin_unlock(&mle->spinlock);
1387 if (resend) {
1388 /* this is also totally crude */
1389 msleep(50);
1390 goto again;
1391 }
1392
1393out:
1394 return ret;
1395}
1396
1397/*
1398 * locks that can be taken here:
1399 * dlm->spinlock
1400 * res->spinlock
1401 * mle->spinlock
1402 * dlm->master_list
1403 *
1404 * if possible, TRIM THIS DOWN!!!
1405 */
d74c9803
KH
1406int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1407 void **ret_data)
6714d8e8
KH
1408{
1409 u8 response = DLM_MASTER_RESP_MAYBE;
1410 struct dlm_ctxt *dlm = data;
9c6510a5 1411 struct dlm_lock_resource *res = NULL;
6714d8e8
KH
1412 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1413 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1414 char *name;
a3d33291 1415 unsigned int namelen, hash;
6714d8e8
KH
1416 int found, ret;
1417 int set_maybe;
9c6510a5 1418 int dispatch_assert = 0;
6714d8e8
KH
1419
1420 if (!dlm_grab(dlm))
1421 return DLM_MASTER_RESP_NO;
1422
1423 if (!dlm_domain_fully_joined(dlm)) {
1424 response = DLM_MASTER_RESP_NO;
1425 goto send_response;
1426 }
1427
1428 name = request->name;
1429 namelen = request->namelen;
a3d33291 1430 hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
1431
1432 if (namelen > DLM_LOCKID_NAME_MAX) {
1433 response = DLM_IVBUFLEN;
1434 goto send_response;
1435 }
1436
1437way_up_top:
1438 spin_lock(&dlm->spinlock);
a3d33291 1439 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
6714d8e8
KH
1440 if (res) {
1441 spin_unlock(&dlm->spinlock);
1442
1443 /* take care of the easy cases up front */
1444 spin_lock(&res->spinlock);
1cd04dbe
KH
1445 if (res->state & (DLM_LOCK_RES_RECOVERING|
1446 DLM_LOCK_RES_MIGRATING)) {
6714d8e8
KH
1447 spin_unlock(&res->spinlock);
1448 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1cd04dbe 1449 "being recovered/migrated\n");
6714d8e8
KH
1450 response = DLM_MASTER_RESP_ERROR;
1451 if (mle)
1452 kmem_cache_free(dlm_mle_cache, mle);
1453 goto send_response;
1454 }
1455
1456 if (res->owner == dlm->node_num) {
ba2bf218
KH
1457 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1458 dlm->name, namelen, name, request->node_idx);
1459 dlm_lockres_set_refmap_bit(request->node_idx, res);
6714d8e8 1460 spin_unlock(&res->spinlock);
6714d8e8
KH
1461 response = DLM_MASTER_RESP_YES;
1462 if (mle)
1463 kmem_cache_free(dlm_mle_cache, mle);
1464
1465 /* this node is the owner.
1466 * there is some extra work that needs to
1467 * happen now. the requesting node has
1468 * caused all nodes up to this one to
1469 * create mles. this node now needs to
1470 * go back and clean those up. */
9c6510a5 1471 dispatch_assert = 1;
6714d8e8
KH
1472 goto send_response;
1473 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1474 spin_unlock(&res->spinlock);
1475 // mlog(0, "node %u is the master\n", res->owner);
1476 response = DLM_MASTER_RESP_NO;
1477 if (mle)
1478 kmem_cache_free(dlm_mle_cache, mle);
1479 goto send_response;
1480 }
1481
1482 /* ok, there is no owner. either this node is
1483 * being blocked, or it is actively trying to
1484 * master this lock. */
1485 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1486 mlog(ML_ERROR, "lock with no owner should be "
1487 "in-progress!\n");
1488 BUG();
1489 }
1490
1491 // mlog(0, "lockres is in progress...\n");
1492 spin_lock(&dlm->master_lock);
1493 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1494 if (!found) {
1495 mlog(ML_ERROR, "no mle found for this lock!\n");
1496 BUG();
1497 }
1498 set_maybe = 1;
1499 spin_lock(&tmpmle->spinlock);
1500 if (tmpmle->type == DLM_MLE_BLOCK) {
1501 // mlog(0, "this node is waiting for "
1502 // "lockres to be mastered\n");
1503 response = DLM_MASTER_RESP_NO;
1504 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1505 mlog(0, "node %u is master, but trying to migrate to "
1506 "node %u.\n", tmpmle->master, tmpmle->new_master);
1507 if (tmpmle->master == dlm->node_num) {
6714d8e8
KH
1508 mlog(ML_ERROR, "no owner on lockres, but this "
1509 "node is trying to migrate it to %u?!\n",
1510 tmpmle->new_master);
1511 BUG();
1512 } else {
1513 /* the real master can respond on its own */
1514 response = DLM_MASTER_RESP_NO;
1515 }
1516 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1517 set_maybe = 0;
9c6510a5 1518 if (tmpmle->master == dlm->node_num) {
6714d8e8 1519 response = DLM_MASTER_RESP_YES;
9c6510a5
KH
1520 /* this node will be the owner.
1521 * go back and clean the mles on any
1522 * other nodes */
1523 dispatch_assert = 1;
ba2bf218
KH
1524 dlm_lockres_set_refmap_bit(request->node_idx, res);
1525 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1526 dlm->name, namelen, name,
1527 request->node_idx);
9c6510a5 1528 } else
6714d8e8
KH
1529 response = DLM_MASTER_RESP_NO;
1530 } else {
1531 // mlog(0, "this node is attempting to "
1532 // "master lockres\n");
1533 response = DLM_MASTER_RESP_MAYBE;
1534 }
1535 if (set_maybe)
1536 set_bit(request->node_idx, tmpmle->maybe_map);
1537 spin_unlock(&tmpmle->spinlock);
1538
1539 spin_unlock(&dlm->master_lock);
1540 spin_unlock(&res->spinlock);
1541
1542 /* keep the mle attached to heartbeat events */
1543 dlm_put_mle(tmpmle);
1544 if (mle)
1545 kmem_cache_free(dlm_mle_cache, mle);
1546 goto send_response;
1547 }
1548
1549 /*
1550 * lockres doesn't exist on this node
1551 * if there is an MLE_BLOCK, return NO
1552 * if there is an MLE_MASTER, return MAYBE
1553 * otherwise, add an MLE_BLOCK, return NO
1554 */
1555 spin_lock(&dlm->master_lock);
1556 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1557 if (!found) {
1558 /* this lockid has never been seen on this node yet */
1559 // mlog(0, "no mle found\n");
1560 if (!mle) {
1561 spin_unlock(&dlm->master_lock);
1562 spin_unlock(&dlm->spinlock);
1563
1564 mle = (struct dlm_master_list_entry *)
ad8100e0 1565 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
6714d8e8 1566 if (!mle) {
6714d8e8 1567 response = DLM_MASTER_RESP_ERROR;
9c6510a5 1568 mlog_errno(-ENOMEM);
6714d8e8
KH
1569 goto send_response;
1570 }
6714d8e8
KH
1571 goto way_up_top;
1572 }
1573
1574 // mlog(0, "this is second time thru, already allocated, "
1575 // "add the block.\n");
41b8c8a1 1576 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
6714d8e8
KH
1577 set_bit(request->node_idx, mle->maybe_map);
1578 list_add(&mle->list, &dlm->master_list);
1579 response = DLM_MASTER_RESP_NO;
1580 } else {
1581 // mlog(0, "mle was found\n");
1582 set_maybe = 1;
1583 spin_lock(&tmpmle->spinlock);
9c6510a5
KH
1584 if (tmpmle->master == dlm->node_num) {
1585 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1586 BUG();
1587 }
6714d8e8
KH
1588 if (tmpmle->type == DLM_MLE_BLOCK)
1589 response = DLM_MASTER_RESP_NO;
1590 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1591 mlog(0, "migration mle was found (%u->%u)\n",
1592 tmpmle->master, tmpmle->new_master);
6714d8e8
KH
1593 /* real master can respond on its own */
1594 response = DLM_MASTER_RESP_NO;
9c6510a5
KH
1595 } else
1596 response = DLM_MASTER_RESP_MAYBE;
6714d8e8
KH
1597 if (set_maybe)
1598 set_bit(request->node_idx, tmpmle->maybe_map);
1599 spin_unlock(&tmpmle->spinlock);
1600 }
1601 spin_unlock(&dlm->master_lock);
1602 spin_unlock(&dlm->spinlock);
1603
1604 if (found) {
1605 /* keep the mle attached to heartbeat events */
1606 dlm_put_mle(tmpmle);
1607 }
1608send_response:
b31cfc02
SM
1609 /*
1610 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1611 * The reference is released by dlm_assert_master_worker() under
1612 * the call to dlm_dispatch_assert_master(). If
1613 * dlm_assert_master_worker() isn't called, we drop it here.
1614 */
9c6510a5
KH
1615 if (dispatch_assert) {
1616 if (response != DLM_MASTER_RESP_YES)
1617 mlog(ML_ERROR, "invalid response %d\n", response);
1618 if (!res) {
1619 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1620 BUG();
1621 }
1622 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1623 dlm->node_num, res->lockname.len, res->lockname.name);
1624 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1625 DLM_ASSERT_MASTER_MLE_CLEANUP);
1626 if (ret < 0) {
1627 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1628 response = DLM_MASTER_RESP_ERROR;
b31cfc02 1629 dlm_lockres_put(res);
9c6510a5 1630 }
b31cfc02
SM
1631 } else {
1632 if (res)
1633 dlm_lockres_put(res);
9c6510a5
KH
1634 }
1635
6714d8e8
KH
1636 dlm_put(dlm);
1637 return response;
1638}
1639
1640/*
1641 * DLM_ASSERT_MASTER_MSG
1642 */
1643
1644
1645/*
1646 * NOTE: this can be used for debugging
1647 * can periodically run all locks owned by this node
1648 * and re-assert across the cluster...
1649 */
05488bbe
AB
1650static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1651 struct dlm_lock_resource *res,
1652 void *nodemap, u32 flags)
6714d8e8
KH
1653{
1654 struct dlm_assert_master assert;
1655 int to, tmpret;
1656 struct dlm_node_iter iter;
1657 int ret = 0;
9c6510a5 1658 int reassert;
ba2bf218
KH
1659 const char *lockname = res->lockname.name;
1660 unsigned int namelen = res->lockname.len;
6714d8e8
KH
1661
1662 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
f3f85464
SM
1663
1664 spin_lock(&res->spinlock);
1665 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1666 spin_unlock(&res->spinlock);
1667
9c6510a5
KH
1668again:
1669 reassert = 0;
6714d8e8
KH
1670
1671 /* note that if this nodemap is empty, it returns 0 */
1672 dlm_node_iter_init(nodemap, &iter);
1673 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1674 int r = 0;
a9ee4c8a
KH
1675 struct dlm_master_list_entry *mle = NULL;
1676
6714d8e8
KH
1677 mlog(0, "sending assert master to %d (%.*s)\n", to,
1678 namelen, lockname);
1679 memset(&assert, 0, sizeof(assert));
1680 assert.node_idx = dlm->node_num;
1681 assert.namelen = namelen;
1682 memcpy(assert.name, lockname, namelen);
1683 assert.flags = cpu_to_be32(flags);
1684
1685 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1686 &assert, sizeof(assert), to, &r);
1687 if (tmpret < 0) {
3b3b84a8 1688 mlog(0, "assert_master returned %d!\n", tmpret);
6714d8e8 1689 if (!dlm_is_host_down(tmpret)) {
3b3b84a8 1690 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
6714d8e8
KH
1691 BUG();
1692 }
1693 /* a node died. finish out the rest of the nodes. */
3b3b84a8 1694 mlog(0, "link to %d went down!\n", to);
6714d8e8
KH
1695 /* any nonzero status return will do */
1696 ret = tmpret;
ba2bf218 1697 r = 0;
6714d8e8
KH
1698 } else if (r < 0) {
1699 /* ok, something horribly messed. kill thyself. */
1700 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1701 "got %d.\n", namelen, lockname, to, r);
a9ee4c8a
KH
1702 spin_lock(&dlm->spinlock);
1703 spin_lock(&dlm->master_lock);
1704 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1705 namelen)) {
1706 dlm_print_one_mle(mle);
1707 __dlm_put_mle(mle);
1708 }
1709 spin_unlock(&dlm->master_lock);
1710 spin_unlock(&dlm->spinlock);
6714d8e8 1711 BUG();
ba2bf218
KH
1712 }
1713
1714 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1715 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1716 mlog(ML_ERROR, "%.*s: very strange, "
1717 "master MLE but no lockres on %u\n",
1718 namelen, lockname, to);
1719 }
1720
1721 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
9c6510a5
KH
1722 mlog(0, "%.*s: node %u create mles on other "
1723 "nodes and requests a re-assert\n",
1724 namelen, lockname, to);
1725 reassert = 1;
6714d8e8 1726 }
ba2bf218
KH
1727 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1728 mlog(0, "%.*s: node %u has a reference to this "
1729 "lockres, set the bit in the refmap\n",
1730 namelen, lockname, to);
1731 spin_lock(&res->spinlock);
1732 dlm_lockres_set_refmap_bit(to, res);
1733 spin_unlock(&res->spinlock);
1734 }
6714d8e8
KH
1735 }
1736
9c6510a5
KH
1737 if (reassert)
1738 goto again;
1739
f3f85464
SM
1740 spin_lock(&res->spinlock);
1741 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1742 spin_unlock(&res->spinlock);
1743 wake_up(&res->wq);
1744
6714d8e8
KH
1745 return ret;
1746}
1747
1748/*
1749 * locks that can be taken here:
1750 * dlm->spinlock
1751 * res->spinlock
1752 * mle->spinlock
1753 * dlm->master_list
1754 *
1755 * if possible, TRIM THIS DOWN!!!
1756 */
d74c9803
KH
1757int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1758 void **ret_data)
6714d8e8
KH
1759{
1760 struct dlm_ctxt *dlm = data;
1761 struct dlm_master_list_entry *mle = NULL;
1762 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1763 struct dlm_lock_resource *res = NULL;
1764 char *name;
a3d33291 1765 unsigned int namelen, hash;
6714d8e8 1766 u32 flags;
ba2bf218 1767 int master_request = 0, have_lockres_ref = 0;
9c6510a5 1768 int ret = 0;
6714d8e8
KH
1769
1770 if (!dlm_grab(dlm))
1771 return 0;
1772
1773 name = assert->name;
1774 namelen = assert->namelen;
a3d33291 1775 hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
1776 flags = be32_to_cpu(assert->flags);
1777
1778 if (namelen > DLM_LOCKID_NAME_MAX) {
1779 mlog(ML_ERROR, "Invalid name length!");
1780 goto done;
1781 }
1782
1783 spin_lock(&dlm->spinlock);
1784
1785 if (flags)
1786 mlog(0, "assert_master with flags: %u\n", flags);
1787
1788 /* find the MLE */
1789 spin_lock(&dlm->master_lock);
1790 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1791 /* not an error, could be master just re-asserting */
1792 mlog(0, "just got an assert_master from %u, but no "
1793 "MLE for it! (%.*s)\n", assert->node_idx,
1794 namelen, name);
1795 } else {
1796 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1797 if (bit >= O2NM_MAX_NODES) {
1798 /* not necessarily an error, though less likely.
1799 * could be master just re-asserting. */
aa852354 1800 mlog(0, "no bits set in the maybe_map, but %u "
6714d8e8
KH
1801 "is asserting! (%.*s)\n", assert->node_idx,
1802 namelen, name);
1803 } else if (bit != assert->node_idx) {
1804 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1805 mlog(0, "master %u was found, %u should "
1806 "back off\n", assert->node_idx, bit);
1807 } else {
1808 /* with the fix for bug 569, a higher node
1809 * number winning the mastery will respond
1810 * YES to mastery requests, but this node
1811 * had no way of knowing. let it pass. */
aa852354 1812 mlog(0, "%u is the lowest node, "
6714d8e8
KH
1813 "%u is asserting. (%.*s) %u must "
1814 "have begun after %u won.\n", bit,
1815 assert->node_idx, namelen, name, bit,
1816 assert->node_idx);
1817 }
1818 }
2d1a868c
KH
1819 if (mle->type == DLM_MLE_MIGRATION) {
1820 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1821 mlog(0, "%s:%.*s: got cleanup assert"
1822 " from %u for migration\n",
1823 dlm->name, namelen, name,
1824 assert->node_idx);
1825 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1826 mlog(0, "%s:%.*s: got unrelated assert"
1827 " from %u for migration, ignoring\n",
1828 dlm->name, namelen, name,
1829 assert->node_idx);
1830 __dlm_put_mle(mle);
1831 spin_unlock(&dlm->master_lock);
1832 spin_unlock(&dlm->spinlock);
1833 goto done;
1834 }
1835 }
6714d8e8
KH
1836 }
1837 spin_unlock(&dlm->master_lock);
1838
1839 /* ok everything checks out with the MLE
1840 * now check to see if there is a lockres */
a3d33291 1841 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
6714d8e8
KH
1842 if (res) {
1843 spin_lock(&res->spinlock);
1844 if (res->state & DLM_LOCK_RES_RECOVERING) {
1845 mlog(ML_ERROR, "%u asserting but %.*s is "
1846 "RECOVERING!\n", assert->node_idx, namelen, name);
1847 goto kill;
1848 }
1849 if (!mle) {
dc2ed195
KH
1850 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1851 res->owner != assert->node_idx) {
6714d8e8
KH
1852 mlog(ML_ERROR, "assert_master from "
1853 "%u, but current owner is "
1854 "%u! (%.*s)\n",
1855 assert->node_idx, res->owner,
1856 namelen, name);
1857 goto kill;
1858 }
1859 } else if (mle->type != DLM_MLE_MIGRATION) {
1860 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1861 /* owner is just re-asserting */
1862 if (res->owner == assert->node_idx) {
1863 mlog(0, "owner %u re-asserting on "
1864 "lock %.*s\n", assert->node_idx,
1865 namelen, name);
1866 goto ok;
1867 }
1868 mlog(ML_ERROR, "got assert_master from "
1869 "node %u, but %u is the owner! "
1870 "(%.*s)\n", assert->node_idx,
1871 res->owner, namelen, name);
1872 goto kill;
1873 }
1874 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1875 mlog(ML_ERROR, "got assert from %u, but lock "
1876 "with no owner should be "
1877 "in-progress! (%.*s)\n",
1878 assert->node_idx,
1879 namelen, name);
1880 goto kill;
1881 }
1882 } else /* mle->type == DLM_MLE_MIGRATION */ {
1883 /* should only be getting an assert from new master */
1884 if (assert->node_idx != mle->new_master) {
1885 mlog(ML_ERROR, "got assert from %u, but "
1886 "new master is %u, and old master "
1887 "was %u (%.*s)\n",
1888 assert->node_idx, mle->new_master,
1889 mle->master, namelen, name);
1890 goto kill;
1891 }
1892
1893 }
1894ok:
1895 spin_unlock(&res->spinlock);
1896 }
1897 spin_unlock(&dlm->spinlock);
1898
1899 // mlog(0, "woo! got an assert_master from node %u!\n",
1900 // assert->node_idx);
1901 if (mle) {
9c6510a5
KH
1902 int extra_ref = 0;
1903 int nn = -1;
a2bf0477 1904 int rr, err = 0;
6714d8e8
KH
1905
1906 spin_lock(&mle->spinlock);
9c6510a5
KH
1907 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1908 extra_ref = 1;
1909 else {
1910 /* MASTER mle: if any bits set in the response map
1911 * then the calling node needs to re-assert to clear
1912 * up nodes that this node contacted */
1913 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1914 nn+1)) < O2NM_MAX_NODES) {
1915 if (nn != dlm->node_num && nn != assert->node_idx)
1916 master_request = 1;
1917 }
1918 }
6714d8e8
KH
1919 mle->master = assert->node_idx;
1920 atomic_set(&mle->woken, 1);
1921 wake_up(&mle->wq);
1922 spin_unlock(&mle->spinlock);
1923
a2bf0477 1924 if (res) {
a6fa3640 1925 int wake = 0;
6714d8e8 1926 spin_lock(&res->spinlock);
a2bf0477
KH
1927 if (mle->type == DLM_MLE_MIGRATION) {
1928 mlog(0, "finishing off migration of lockres %.*s, "
1929 "from %u to %u\n",
1930 res->lockname.len, res->lockname.name,
1931 dlm->node_num, mle->new_master);
1932 res->state &= ~DLM_LOCK_RES_MIGRATING;
a6fa3640 1933 wake = 1;
a2bf0477
KH
1934 dlm_change_lockres_owner(dlm, res, mle->new_master);
1935 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1936 } else {
1937 dlm_change_lockres_owner(dlm, res, mle->master);
1938 }
6714d8e8 1939 spin_unlock(&res->spinlock);
ba2bf218 1940 have_lockres_ref = 1;
a6fa3640
KH
1941 if (wake)
1942 wake_up(&res->wq);
6714d8e8 1943 }
a2bf0477
KH
1944
1945 /* master is known, detach if not already detached.
1946 * ensures that only one assert_master call will happen
1947 * on this mle. */
1948 spin_lock(&dlm->spinlock);
1949 spin_lock(&dlm->master_lock);
1950
1951 rr = atomic_read(&mle->mle_refs.refcount);
1952 if (mle->inuse > 0) {
1953 if (extra_ref && rr < 3)
1954 err = 1;
1955 else if (!extra_ref && rr < 2)
1956 err = 1;
1957 } else {
1958 if (extra_ref && rr < 2)
1959 err = 1;
1960 else if (!extra_ref && rr < 1)
1961 err = 1;
1962 }
1963 if (err) {
1964 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1965 "that will mess up this node, refs=%d, extra=%d, "
1966 "inuse=%d\n", dlm->name, namelen, name,
1967 assert->node_idx, rr, extra_ref, mle->inuse);
1968 dlm_print_one_mle(mle);
1969 }
1970 list_del_init(&mle->list);
1971 __dlm_mle_detach_hb_events(dlm, mle);
1972 __dlm_put_mle(mle);
6714d8e8
KH
1973 if (extra_ref) {
1974 /* the assert master message now balances the extra
1975 * ref given by the master / migration request message.
1976 * if this is the last put, it will be removed
1977 * from the list. */
a2bf0477
KH
1978 __dlm_put_mle(mle);
1979 }
1980 spin_unlock(&dlm->master_lock);
1981 spin_unlock(&dlm->spinlock);
1982 } else if (res) {
1983 if (res->owner != assert->node_idx) {
1984 mlog(0, "assert_master from %u, but current "
1985 "owner is %u (%.*s), no mle\n", assert->node_idx,
1986 res->owner, namelen, name);
6714d8e8
KH
1987 }
1988 }
1989
1990done:
9c6510a5 1991 ret = 0;
3b8118cf
KH
1992 if (res) {
1993 spin_lock(&res->spinlock);
1994 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1995 spin_unlock(&res->spinlock);
1996 *ret_data = (void *)res;
1997 }
6714d8e8 1998 dlm_put(dlm);
9c6510a5
KH
1999 if (master_request) {
2000 mlog(0, "need to tell master to reassert\n");
ba2bf218
KH
2001 /* positive. negative would shoot down the node. */
2002 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2003 if (!have_lockres_ref) {
2004 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2005 "mle present here for %s:%.*s, but no lockres!\n",
2006 assert->node_idx, dlm->name, namelen, name);
2007 }
2008 }
2009 if (have_lockres_ref) {
2010 /* let the master know we have a reference to the lockres */
2011 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2012 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2013 dlm->name, namelen, name, assert->node_idx);
9c6510a5
KH
2014 }
2015 return ret;
6714d8e8
KH
2016
2017kill:
2018 /* kill the caller! */
a9ee4c8a
KH
2019 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
2020 "and killing the other node now! This node is OK and can continue.\n");
2021 __dlm_print_one_lock_resource(res);
6714d8e8
KH
2022 spin_unlock(&res->spinlock);
2023 spin_unlock(&dlm->spinlock);
3b8118cf 2024 *ret_data = (void *)res;
6714d8e8
KH
2025 dlm_put(dlm);
2026 return -EINVAL;
2027}
2028
3b8118cf
KH
2029void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2030{
2031 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2032
2033 if (ret_data) {
2034 spin_lock(&res->spinlock);
2035 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2036 spin_unlock(&res->spinlock);
2037 wake_up(&res->wq);
2038 dlm_lockres_put(res);
2039 }
2040 return;
2041}
2042
6714d8e8
KH
2043int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2044 struct dlm_lock_resource *res,
2045 int ignore_higher, u8 request_from, u32 flags)
2046{
2047 struct dlm_work_item *item;
cd861280 2048 item = kzalloc(sizeof(*item), GFP_NOFS);
6714d8e8
KH
2049 if (!item)
2050 return -ENOMEM;
2051
2052
2053 /* queue up work for dlm_assert_master_worker */
2054 dlm_grab(dlm); /* get an extra ref for the work item */
2055 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2056 item->u.am.lockres = res; /* already have a ref */
2057 /* can optionally ignore node numbers higher than this node */
2058 item->u.am.ignore_higher = ignore_higher;
2059 item->u.am.request_from = request_from;
2060 item->u.am.flags = flags;
2061
9c6510a5
KH
2062 if (ignore_higher)
2063 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2064 res->lockname.name);
2065
6714d8e8
KH
2066 spin_lock(&dlm->work_lock);
2067 list_add_tail(&item->list, &dlm->work_list);
2068 spin_unlock(&dlm->work_lock);
2069
3156d267 2070 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
6714d8e8
KH
2071 return 0;
2072}
2073
2074static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2075{
2076 struct dlm_ctxt *dlm = data;
2077 int ret = 0;
2078 struct dlm_lock_resource *res;
2079 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2080 int ignore_higher;
2081 int bit;
2082 u8 request_from;
2083 u32 flags;
2084
2085 dlm = item->dlm;
2086 res = item->u.am.lockres;
2087 ignore_higher = item->u.am.ignore_higher;
2088 request_from = item->u.am.request_from;
2089 flags = item->u.am.flags;
2090
2091 spin_lock(&dlm->spinlock);
2092 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2093 spin_unlock(&dlm->spinlock);
2094
2095 clear_bit(dlm->node_num, nodemap);
2096 if (ignore_higher) {
2097 /* if is this just to clear up mles for nodes below
2098 * this node, do not send the message to the original
2099 * caller or any node number higher than this */
2100 clear_bit(request_from, nodemap);
2101 bit = dlm->node_num;
2102 while (1) {
2103 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2104 bit+1);
2105 if (bit >= O2NM_MAX_NODES)
2106 break;
2107 clear_bit(bit, nodemap);
2108 }
2109 }
2110
36407488
KH
2111 /*
2112 * If we're migrating this lock to someone else, we are no
2113 * longer allowed to assert out own mastery. OTOH, we need to
2114 * prevent migration from starting while we're still asserting
2115 * our dominance. The reserved ast delays migration.
2116 */
2117 spin_lock(&res->spinlock);
2118 if (res->state & DLM_LOCK_RES_MIGRATING) {
2119 mlog(0, "Someone asked us to assert mastery, but we're "
2120 "in the middle of migration. Skipping assert, "
2121 "the new master will handle that.\n");
2122 spin_unlock(&res->spinlock);
2123 goto put;
2124 } else
2125 __dlm_lockres_reserve_ast(res);
2126 spin_unlock(&res->spinlock);
2127
6714d8e8
KH
2128 /* this call now finishes out the nodemap
2129 * even if one or more nodes die */
2130 mlog(0, "worker about to master %.*s here, this=%u\n",
2131 res->lockname.len, res->lockname.name, dlm->node_num);
ba2bf218 2132 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
6714d8e8
KH
2133 if (ret < 0) {
2134 /* no need to restart, we are done */
3b3b84a8
KH
2135 if (!dlm_is_host_down(ret))
2136 mlog_errno(ret);
6714d8e8
KH
2137 }
2138
36407488
KH
2139 /* Ok, we've asserted ourselves. Let's let migration start. */
2140 dlm_lockres_release_ast(dlm, res);
2141
2142put:
6714d8e8
KH
2143 dlm_lockres_put(res);
2144
2145 mlog(0, "finished with dlm_assert_master_worker\n");
2146}
2147
c03872f5
KH
2148/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2149 * We cannot wait for node recovery to complete to begin mastering this
2150 * lockres because this lockres is used to kick off recovery! ;-)
2151 * So, do a pre-check on all living nodes to see if any of those nodes
2152 * think that $RECOVERY is currently mastered by a dead node. If so,
2153 * we wait a short time to allow that node to get notified by its own
2154 * heartbeat stack, then check again. All $RECOVERY lock resources
2155 * mastered by dead nodes are purged when the hearbeat callback is
2156 * fired, so we can know for sure that it is safe to continue once
2157 * the node returns a live node or no node. */
2158static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2159 struct dlm_lock_resource *res)
2160{
2161 struct dlm_node_iter iter;
2162 int nodenum;
2163 int ret = 0;
2164 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2165
2166 spin_lock(&dlm->spinlock);
2167 dlm_node_iter_init(dlm->domain_map, &iter);
2168 spin_unlock(&dlm->spinlock);
2169
2170 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2171 /* do not send to self */
2172 if (nodenum == dlm->node_num)
2173 continue;
2174 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2175 if (ret < 0) {
2176 mlog_errno(ret);
2177 if (!dlm_is_host_down(ret))
2178 BUG();
2179 /* host is down, so answer for that node would be
2180 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
f42a100b 2181 ret = 0;
c03872f5
KH
2182 }
2183
2184 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2185 /* check to see if this master is in the recovery map */
2186 spin_lock(&dlm->spinlock);
2187 if (test_bit(master, dlm->recovery_map)) {
2188 mlog(ML_NOTICE, "%s: node %u has not seen "
2189 "node %u go down yet, and thinks the "
2190 "dead node is mastering the recovery "
2191 "lock. must wait.\n", dlm->name,
2192 nodenum, master);
2193 ret = -EAGAIN;
2194 }
2195 spin_unlock(&dlm->spinlock);
2196 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2197 master);
2198 break;
2199 }
2200 }
2201 return ret;
2202}
2203
ba2bf218
KH
2204/*
2205 * DLM_DEREF_LOCKRES_MSG
2206 */
2207
2208int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2209{
2210 struct dlm_deref_lockres deref;
2211 int ret = 0, r;
2212 const char *lockname;
2213 unsigned int namelen;
2214
2215 lockname = res->lockname.name;
2216 namelen = res->lockname.len;
2217 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2218
2219 mlog(0, "%s:%.*s: sending deref to %d\n",
2220 dlm->name, namelen, lockname, res->owner);
2221 memset(&deref, 0, sizeof(deref));
2222 deref.node_idx = dlm->node_num;
2223 deref.namelen = namelen;
2224 memcpy(deref.name, lockname, namelen);
2225
2226 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2227 &deref, sizeof(deref), res->owner, &r);
2228 if (ret < 0)
2229 mlog_errno(ret);
2230 else if (r < 0) {
2231 /* BAD. other node says I did not have a ref. */
2232 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2233 "(master=%u) got %d.\n", dlm->name, namelen,
2234 lockname, res->owner, r);
2235 dlm_print_one_lock_resource(res);
2236 BUG();
2237 }
2238 return ret;
2239}
2240
d74c9803
KH
2241int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2242 void **ret_data)
ba2bf218
KH
2243{
2244 struct dlm_ctxt *dlm = data;
2245 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2246 struct dlm_lock_resource *res = NULL;
2247 char *name;
2248 unsigned int namelen;
2249 int ret = -EINVAL;
2250 u8 node;
2251 unsigned int hash;
f3f85464
SM
2252 struct dlm_work_item *item;
2253 int cleared = 0;
2254 int dispatch = 0;
ba2bf218
KH
2255
2256 if (!dlm_grab(dlm))
2257 return 0;
2258
2259 name = deref->name;
2260 namelen = deref->namelen;
2261 node = deref->node_idx;
2262
2263 if (namelen > DLM_LOCKID_NAME_MAX) {
2264 mlog(ML_ERROR, "Invalid name length!");
2265 goto done;
2266 }
2267 if (deref->node_idx >= O2NM_MAX_NODES) {
2268 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2269 goto done;
2270 }
2271
2272 hash = dlm_lockid_hash(name, namelen);
2273
2274 spin_lock(&dlm->spinlock);
2275 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2276 if (!res) {
2277 spin_unlock(&dlm->spinlock);
2278 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2279 dlm->name, namelen, name);
2280 goto done;
2281 }
2282 spin_unlock(&dlm->spinlock);
2283
2284 spin_lock(&res->spinlock);
f3f85464
SM
2285 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2286 dispatch = 1;
2287 else {
2288 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2289 if (test_bit(node, res->refmap)) {
2290 dlm_lockres_clear_refmap_bit(node, res);
2291 cleared = 1;
2292 }
ba2bf218
KH
2293 }
2294 spin_unlock(&res->spinlock);
2295
f3f85464
SM
2296 if (!dispatch) {
2297 if (cleared)
2298 dlm_lockres_calc_usage(dlm, res);
2299 else {
2300 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2301 "but it is already dropped!\n", dlm->name,
2302 res->lockname.len, res->lockname.name, node);
2af37ce8 2303 dlm_print_one_lock_resource(res);
f3f85464
SM
2304 }
2305 ret = 0;
2306 goto done;
2307 }
2308
2309 item = kzalloc(sizeof(*item), GFP_NOFS);
2310 if (!item) {
2311 ret = -ENOMEM;
2312 mlog_errno(ret);
2313 goto done;
2314 }
2315
2316 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2317 item->u.dl.deref_res = res;
2318 item->u.dl.deref_node = node;
2319
2320 spin_lock(&dlm->work_lock);
2321 list_add_tail(&item->list, &dlm->work_list);
2322 spin_unlock(&dlm->work_lock);
2323
2324 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2325 return 0;
2326
ba2bf218
KH
2327done:
2328 if (res)
2329 dlm_lockres_put(res);
2330 dlm_put(dlm);
f3f85464 2331
ba2bf218
KH
2332 return ret;
2333}
2334
f3f85464
SM
2335static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2336{
2337 struct dlm_ctxt *dlm;
2338 struct dlm_lock_resource *res;
2339 u8 node;
2340 u8 cleared = 0;
2341
2342 dlm = item->dlm;
2343 res = item->u.dl.deref_res;
2344 node = item->u.dl.deref_node;
2345
2346 spin_lock(&res->spinlock);
2347 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2348 if (test_bit(node, res->refmap)) {
2349 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2350 dlm_lockres_clear_refmap_bit(node, res);
2351 cleared = 1;
2352 }
2353 spin_unlock(&res->spinlock);
2354
2355 if (cleared) {
2356 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2357 dlm->name, res->lockname.len, res->lockname.name, node);
2358 dlm_lockres_calc_usage(dlm, res);
2359 } else {
2360 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2361 "but it is already dropped!\n", dlm->name,
2362 res->lockname.len, res->lockname.name, node);
2af37ce8 2363 dlm_print_one_lock_resource(res);
f3f85464
SM
2364 }
2365
2366 dlm_lockres_put(res);
2367}
2368
2f5bf1f2
SM
2369/* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2370 * if not. If 0, numlocks is set to the number of locks in the lockres.
2371 */
2372static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2373 struct dlm_lock_resource *res,
2374 int *numlocks)
2375{
2376 int ret;
2377 int i;
2378 int count = 0;
800deef3 2379 struct list_head *queue;
2f5bf1f2
SM
2380 struct dlm_lock *lock;
2381
2382 assert_spin_locked(&res->spinlock);
2383
2384 ret = -EINVAL;
2385 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2386 mlog(0, "cannot migrate lockres with unknown owner!\n");
2387 goto leave;
2388 }
2389
2390 if (res->owner != dlm->node_num) {
2391 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2392 goto leave;
2393 }
2394
2395 ret = 0;
2396 queue = &res->granted;
2397 for (i = 0; i < 3; i++) {
800deef3 2398 list_for_each_entry(lock, queue, list) {
2f5bf1f2
SM
2399 ++count;
2400 if (lock->ml.node == dlm->node_num) {
2401 mlog(0, "found a lock owned by this node still "
2402 "on the %s queue! will not migrate this "
2403 "lockres\n", (i == 0 ? "granted" :
2404 (i == 1 ? "converting" :
2405 "blocked")));
2406 ret = -ENOTEMPTY;
2407 goto leave;
2408 }
2409 }
2410 queue++;
2411 }
2412
2413 *numlocks = count;
2414 mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2415
2416leave:
2417 return ret;
2418}
6714d8e8
KH
2419
2420/*
2421 * DLM_MIGRATE_LOCKRES
2422 */
2423
2424
faf0ec9f
AB
2425static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2426 struct dlm_lock_resource *res,
2427 u8 target)
6714d8e8
KH
2428{
2429 struct dlm_master_list_entry *mle = NULL;
2430 struct dlm_master_list_entry *oldmle = NULL;
2431 struct dlm_migratable_lockres *mres = NULL;
2f5bf1f2 2432 int ret = 0;
6714d8e8
KH
2433 const char *name;
2434 unsigned int namelen;
2435 int mle_added = 0;
2f5bf1f2
SM
2436 int numlocks;
2437 int wake = 0;
6714d8e8
KH
2438
2439 if (!dlm_grab(dlm))
2440 return -EINVAL;
2441
2442 name = res->lockname.name;
2443 namelen = res->lockname.len;
2444
2445 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2446
2447 /*
2448 * ensure this lockres is a proper candidate for migration
2449 */
2450 spin_lock(&res->spinlock);
2f5bf1f2
SM
2451 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2452 if (ret < 0) {
6714d8e8
KH
2453 spin_unlock(&res->spinlock);
2454 goto leave;
2455 }
6714d8e8
KH
2456 spin_unlock(&res->spinlock);
2457
2458 /* no work to do */
2f5bf1f2 2459 if (numlocks == 0) {
6714d8e8 2460 mlog(0, "no locks were found on this lockres! done!\n");
6714d8e8
KH
2461 goto leave;
2462 }
2463
2464 /*
2465 * preallocate up front
2466 * if this fails, abort
2467 */
2468
2469 ret = -ENOMEM;
ad8100e0 2470 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
6714d8e8
KH
2471 if (!mres) {
2472 mlog_errno(ret);
2473 goto leave;
2474 }
2475
2476 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
ad8100e0 2477 GFP_NOFS);
6714d8e8
KH
2478 if (!mle) {
2479 mlog_errno(ret);
2480 goto leave;
2481 }
2482 ret = 0;
2483
2484 /*
2485 * find a node to migrate the lockres to
2486 */
2487
2488 mlog(0, "picking a migration node\n");
2489 spin_lock(&dlm->spinlock);
2490 /* pick a new node */
2491 if (!test_bit(target, dlm->domain_map) ||
2492 target >= O2NM_MAX_NODES) {
2493 target = dlm_pick_migration_target(dlm, res);
2494 }
2495 mlog(0, "node %u chosen for migration\n", target);
2496
2497 if (target >= O2NM_MAX_NODES ||
2498 !test_bit(target, dlm->domain_map)) {
2499 /* target chosen is not alive */
2500 ret = -EINVAL;
2501 }
2502
2503 if (ret) {
2504 spin_unlock(&dlm->spinlock);
2505 goto fail;
2506 }
2507
2508 mlog(0, "continuing with target = %u\n", target);
2509
2510 /*
2511 * clear any existing master requests and
2512 * add the migration mle to the list
2513 */
2514 spin_lock(&dlm->master_lock);
2515 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2516 namelen, target, dlm->node_num);
2517 spin_unlock(&dlm->master_lock);
2518 spin_unlock(&dlm->spinlock);
2519
2520 if (ret == -EEXIST) {
2521 mlog(0, "another process is already migrating it\n");
2522 goto fail;
2523 }
2524 mle_added = 1;
2525
2526 /*
2527 * set the MIGRATING flag and flush asts
2528 * if we fail after this we need to re-dirty the lockres
2529 */
2530 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2531 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2532 "the target went down.\n", res->lockname.len,
2533 res->lockname.name, target);
2534 spin_lock(&res->spinlock);
2535 res->state &= ~DLM_LOCK_RES_MIGRATING;
a6fa3640 2536 wake = 1;
6714d8e8
KH
2537 spin_unlock(&res->spinlock);
2538 ret = -EINVAL;
2539 }
2540
2541fail:
2542 if (oldmle) {
2543 /* master is known, detach if not already detached */
2544 dlm_mle_detach_hb_events(dlm, oldmle);
2545 dlm_put_mle(oldmle);
2546 }
2547
2548 if (ret < 0) {
2549 if (mle_added) {
2550 dlm_mle_detach_hb_events(dlm, mle);
2551 dlm_put_mle(mle);
2552 } else if (mle) {
2553 kmem_cache_free(dlm_mle_cache, mle);
2554 }
2555 goto leave;
2556 }
2557
2558 /*
2559 * at this point, we have a migration target, an mle
2560 * in the master list, and the MIGRATING flag set on
2561 * the lockres
2562 */
2563
1cd04dbe
KH
2564 /* now that remote nodes are spinning on the MIGRATING flag,
2565 * ensure that all assert_master work is flushed. */
2566 flush_workqueue(dlm->dlm_worker);
6714d8e8
KH
2567
2568 /* get an extra reference on the mle.
2569 * otherwise the assert_master from the new
2570 * master will destroy this.
2571 * also, make sure that all callers of dlm_get_mle
2572 * take both dlm->spinlock and dlm->master_lock */
2573 spin_lock(&dlm->spinlock);
2574 spin_lock(&dlm->master_lock);
a2bf0477 2575 dlm_get_mle_inuse(mle);
6714d8e8
KH
2576 spin_unlock(&dlm->master_lock);
2577 spin_unlock(&dlm->spinlock);
2578
2579 /* notify new node and send all lock state */
2580 /* call send_one_lockres with migration flag.
2581 * this serves as notice to the target node that a
2582 * migration is starting. */
2583 ret = dlm_send_one_lockres(dlm, res, mres, target,
2584 DLM_MRES_MIGRATION);
2585
2586 if (ret < 0) {
2587 mlog(0, "migration to node %u failed with %d\n",
2588 target, ret);
2589 /* migration failed, detach and clean up mle */
2590 dlm_mle_detach_hb_events(dlm, mle);
2591 dlm_put_mle(mle);
a2bf0477
KH
2592 dlm_put_mle_inuse(mle);
2593 spin_lock(&res->spinlock);
2594 res->state &= ~DLM_LOCK_RES_MIGRATING;
a6fa3640 2595 wake = 1;
a2bf0477 2596 spin_unlock(&res->spinlock);
6714d8e8
KH
2597 goto leave;
2598 }
2599
2600 /* at this point, the target sends a message to all nodes,
2601 * (using dlm_do_migrate_request). this node is skipped since
2602 * we had to put an mle in the list to begin the process. this
2603 * node now waits for target to do an assert master. this node
2604 * will be the last one notified, ensuring that the migration
2605 * is complete everywhere. if the target dies while this is
2606 * going on, some nodes could potentially see the target as the
2607 * master, so it is important that my recovery finds the migration
2608 * mle and sets the master to UNKNONWN. */
2609
2610
2611 /* wait for new node to assert master */
2612 while (1) {
2613 ret = wait_event_interruptible_timeout(mle->wq,
2614 (atomic_read(&mle->woken) == 1),
2615 msecs_to_jiffies(5000));
2616
2617 if (ret >= 0) {
2618 if (atomic_read(&mle->woken) == 1 ||
2619 res->owner == target)
2620 break;
2621
1cd04dbe
KH
2622 mlog(0, "%s:%.*s: timed out during migration\n",
2623 dlm->name, res->lockname.len, res->lockname.name);
e2faea4c
KH
2624 /* avoid hang during shutdown when migrating lockres
2625 * to a node which also goes down */
2626 if (dlm_is_node_dead(dlm, target)) {
aa852354
KH
2627 mlog(0, "%s:%.*s: expected migration "
2628 "target %u is no longer up, restarting\n",
e2faea4c
KH
2629 dlm->name, res->lockname.len,
2630 res->lockname.name, target);
1cd04dbe
KH
2631 ret = -EINVAL;
2632 /* migration failed, detach and clean up mle */
2633 dlm_mle_detach_hb_events(dlm, mle);
2634 dlm_put_mle(mle);
2635 dlm_put_mle_inuse(mle);
2636 spin_lock(&res->spinlock);
2637 res->state &= ~DLM_LOCK_RES_MIGRATING;
a6fa3640 2638 wake = 1;
1cd04dbe
KH
2639 spin_unlock(&res->spinlock);
2640 goto leave;
e2faea4c 2641 }
1cd04dbe
KH
2642 } else
2643 mlog(0, "%s:%.*s: caught signal during migration\n",
2644 dlm->name, res->lockname.len, res->lockname.name);
6714d8e8
KH
2645 }
2646
2647 /* all done, set the owner, clear the flag */
2648 spin_lock(&res->spinlock);
2649 dlm_set_lockres_owner(dlm, res, target);
2650 res->state &= ~DLM_LOCK_RES_MIGRATING;
2651 dlm_remove_nonlocal_locks(dlm, res);
2652 spin_unlock(&res->spinlock);
2653 wake_up(&res->wq);
2654
2655 /* master is known, detach if not already detached */
2656 dlm_mle_detach_hb_events(dlm, mle);
a2bf0477 2657 dlm_put_mle_inuse(mle);
6714d8e8
KH
2658 ret = 0;
2659
2660 dlm_lockres_calc_usage(dlm, res);
2661
2662leave:
2663 /* re-dirty the lockres if we failed */
2664 if (ret < 0)
2665 dlm_kick_thread(dlm, res);
2666
a6fa3640
KH
2667 /* wake up waiters if the MIGRATING flag got set
2668 * but migration failed */
2669 if (wake)
2670 wake_up(&res->wq);
2671
6714d8e8
KH
2672 /* TODO: cleanup */
2673 if (mres)
2674 free_page((unsigned long)mres);
2675
2676 dlm_put(dlm);
2677
2678 mlog(0, "returning %d\n", ret);
2679 return ret;
2680}
6714d8e8 2681
ba2bf218
KH
2682#define DLM_MIGRATION_RETRY_MS 100
2683
2684/* Should be called only after beginning the domain leave process.
2685 * There should not be any remaining locks on nonlocal lock resources,
2686 * and there should be no local locks left on locally mastered resources.
2687 *
2688 * Called with the dlm spinlock held, may drop it to do migration, but
2689 * will re-acquire before exit.
2690 *
2691 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2692int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2693{
2694 int ret;
2695 int lock_dropped = 0;
2f5bf1f2 2696 int numlocks;
ba2bf218 2697
b36c3f84 2698 spin_lock(&res->spinlock);
ba2bf218
KH
2699 if (res->owner != dlm->node_num) {
2700 if (!__dlm_lockres_unused(res)) {
2701 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2702 "trying to free this but locks remain\n",
2703 dlm->name, res->lockname.len, res->lockname.name);
2704 }
b36c3f84 2705 spin_unlock(&res->spinlock);
ba2bf218
KH
2706 goto leave;
2707 }
2f5bf1f2
SM
2708
2709 /* No need to migrate a lockres having no locks */
2710 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2711 if (ret >= 0 && numlocks == 0) {
2712 spin_unlock(&res->spinlock);
2713 goto leave;
2714 }
b36c3f84 2715 spin_unlock(&res->spinlock);
ba2bf218
KH
2716
2717 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2718 spin_unlock(&dlm->spinlock);
2719 lock_dropped = 1;
2720 while (1) {
2721 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2722 if (ret >= 0)
2723 break;
2724 if (ret == -ENOTEMPTY) {
2725 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2726 res->lockname.len, res->lockname.name);
2727 BUG();
2728 }
2729
2730 mlog(0, "lockres %.*s: migrate failed, "
2731 "retrying\n", res->lockname.len,
2732 res->lockname.name);
2733 msleep(DLM_MIGRATION_RETRY_MS);
2734 }
2735 spin_lock(&dlm->spinlock);
2736leave:
2737 return lock_dropped;
2738}
2739
6714d8e8
KH
2740int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2741{
2742 int ret;
2743 spin_lock(&dlm->ast_lock);
2744 spin_lock(&lock->spinlock);
2745 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2746 spin_unlock(&lock->spinlock);
2747 spin_unlock(&dlm->ast_lock);
2748 return ret;
2749}
2750
2751static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2752 struct dlm_lock_resource *res,
2753 u8 mig_target)
2754{
2755 int can_proceed;
2756 spin_lock(&res->spinlock);
2757 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2758 spin_unlock(&res->spinlock);
2759
2760 /* target has died, so make the caller break out of the
2761 * wait_event, but caller must recheck the domain_map */
2762 spin_lock(&dlm->spinlock);
2763 if (!test_bit(mig_target, dlm->domain_map))
2764 can_proceed = 1;
2765 spin_unlock(&dlm->spinlock);
2766 return can_proceed;
2767}
2768
faf0ec9f
AB
2769static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2770 struct dlm_lock_resource *res)
6714d8e8
KH
2771{
2772 int ret;
2773 spin_lock(&res->spinlock);
2774 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2775 spin_unlock(&res->spinlock);
2776 return ret;
2777}
2778
2779
2780static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2781 struct dlm_lock_resource *res,
2782 u8 target)
2783{
2784 int ret = 0;
2785
2786 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2787 res->lockname.len, res->lockname.name, dlm->node_num,
2788 target);
2789 /* need to set MIGRATING flag on lockres. this is done by
2790 * ensuring that all asts have been flushed for this lockres. */
2791 spin_lock(&res->spinlock);
2792 BUG_ON(res->migration_pending);
2793 res->migration_pending = 1;
2794 /* strategy is to reserve an extra ast then release
2795 * it below, letting the release do all of the work */
2796 __dlm_lockres_reserve_ast(res);
2797 spin_unlock(&res->spinlock);
2798
ddc09c8d 2799 /* now flush all the pending asts */
6714d8e8 2800 dlm_kick_thread(dlm, res);
ddc09c8d
KH
2801 /* before waiting on DIRTY, block processes which may
2802 * try to dirty the lockres before MIGRATING is set */
2803 spin_lock(&res->spinlock);
2804 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2805 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2806 spin_unlock(&res->spinlock);
2807 /* now wait on any pending asts and the DIRTY state */
6714d8e8
KH
2808 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2809 dlm_lockres_release_ast(dlm, res);
2810
2811 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2812 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2813 /* if the extra ref we just put was the final one, this
2814 * will pass thru immediately. otherwise, we need to wait
2815 * for the last ast to finish. */
2816again:
2817 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2818 dlm_migration_can_proceed(dlm, res, target),
2819 msecs_to_jiffies(1000));
2820 if (ret < 0) {
2821 mlog(0, "woken again: migrating? %s, dead? %s\n",
2822 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2823 test_bit(target, dlm->domain_map) ? "no":"yes");
2824 } else {
2825 mlog(0, "all is well: migrating? %s, dead? %s\n",
2826 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2827 test_bit(target, dlm->domain_map) ? "no":"yes");
2828 }
2829 if (!dlm_migration_can_proceed(dlm, res, target)) {
2830 mlog(0, "trying again...\n");
2831 goto again;
2832 }
ddc09c8d
KH
2833 /* now that we are sure the MIGRATING state is there, drop
2834 * the unneded state which blocked threads trying to DIRTY */
2835 spin_lock(&res->spinlock);
2836 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2837 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2838 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2839 spin_unlock(&res->spinlock);
6714d8e8
KH
2840
2841 /* did the target go down or die? */
2842 spin_lock(&dlm->spinlock);
2843 if (!test_bit(target, dlm->domain_map)) {
2844 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2845 target);
2846 ret = -EHOSTDOWN;
2847 }
2848 spin_unlock(&dlm->spinlock);
2849
2850 /*
2851 * at this point:
2852 *
2853 * o the DLM_LOCK_RES_MIGRATING flag is set
2854 * o there are no pending asts on this lockres
2855 * o all processes trying to reserve an ast on this
2856 * lockres must wait for the MIGRATING flag to clear
2857 */
2858 return ret;
2859}
2860
2861/* last step in the migration process.
2862 * original master calls this to free all of the dlm_lock
2863 * structures that used to be for other nodes. */
2864static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2865 struct dlm_lock_resource *res)
2866{
6714d8e8 2867 struct list_head *queue = &res->granted;
ba2bf218 2868 int i, bit;
800deef3 2869 struct dlm_lock *lock, *next;
6714d8e8
KH
2870
2871 assert_spin_locked(&res->spinlock);
2872
2873 BUG_ON(res->owner == dlm->node_num);
2874
2875 for (i=0; i<3; i++) {
800deef3 2876 list_for_each_entry_safe(lock, next, queue, list) {
6714d8e8
KH
2877 if (lock->ml.node != dlm->node_num) {
2878 mlog(0, "putting lock for node %u\n",
2879 lock->ml.node);
2880 /* be extra careful */
2881 BUG_ON(!list_empty(&lock->ast_list));
2882 BUG_ON(!list_empty(&lock->bast_list));
2883 BUG_ON(lock->ast_pending);
2884 BUG_ON(lock->bast_pending);
ba2bf218 2885 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
6714d8e8
KH
2886 list_del_init(&lock->list);
2887 dlm_lock_put(lock);
2c5c54ac
SM
2888 /* In a normal unlock, we would have added a
2889 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2890 dlm_lock_put(lock);
6714d8e8
KH
2891 }
2892 }
2893 queue++;
2894 }
ba2bf218
KH
2895 bit = 0;
2896 while (1) {
2897 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2898 if (bit >= O2NM_MAX_NODES)
2899 break;
2900 /* do not clear the local node reference, if there is a
2901 * process holding this, let it drop the ref itself */
2902 if (bit != dlm->node_num) {
2903 mlog(0, "%s:%.*s: node %u had a ref to this "
2904 "migrating lockres, clearing\n", dlm->name,
2905 res->lockname.len, res->lockname.name, bit);
2906 dlm_lockres_clear_refmap_bit(bit, res);
2907 }
2908 bit++;
2909 }
6714d8e8
KH
2910}
2911
2912/* for now this is not too intelligent. we will
2913 * need stats to make this do the right thing.
2914 * this just finds the first lock on one of the
2915 * queues and uses that node as the target. */
2916static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2917 struct dlm_lock_resource *res)
2918{
2919 int i;
2920 struct list_head *queue = &res->granted;
6714d8e8
KH
2921 struct dlm_lock *lock;
2922 int nodenum;
2923
2924 assert_spin_locked(&dlm->spinlock);
2925
2926 spin_lock(&res->spinlock);
2927 for (i=0; i<3; i++) {
800deef3 2928 list_for_each_entry(lock, queue, list) {
6714d8e8
KH
2929 /* up to the caller to make sure this node
2930 * is alive */
6714d8e8
KH
2931 if (lock->ml.node != dlm->node_num) {
2932 spin_unlock(&res->spinlock);
2933 return lock->ml.node;
2934 }
2935 }
2936 queue++;
2937 }
2938 spin_unlock(&res->spinlock);
2939 mlog(0, "have not found a suitable target yet! checking domain map\n");
2940
2941 /* ok now we're getting desperate. pick anyone alive. */
2942 nodenum = -1;
2943 while (1) {
2944 nodenum = find_next_bit(dlm->domain_map,
2945 O2NM_MAX_NODES, nodenum+1);
2946 mlog(0, "found %d in domain map\n", nodenum);
2947 if (nodenum >= O2NM_MAX_NODES)
2948 break;
2949 if (nodenum != dlm->node_num) {
2950 mlog(0, "picking %d\n", nodenum);
2951 return nodenum;
2952 }
2953 }
2954
2955 mlog(0, "giving up. no master to migrate to\n");
2956 return DLM_LOCK_RES_OWNER_UNKNOWN;
2957}
2958
2959
2960
2961/* this is called by the new master once all lockres
2962 * data has been received */
2963static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2964 struct dlm_lock_resource *res,
2965 u8 master, u8 new_master,
2966 struct dlm_node_iter *iter)
2967{
2968 struct dlm_migrate_request migrate;
2b832564 2969 int ret, skip, status = 0;
6714d8e8
KH
2970 int nodenum;
2971
2972 memset(&migrate, 0, sizeof(migrate));
2973 migrate.namelen = res->lockname.len;
2974 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2975 migrate.new_master = new_master;
2976 migrate.master = master;
2977
2978 ret = 0;
2979
2980 /* send message to all nodes, except the master and myself */
2981 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2982 if (nodenum == master ||
2983 nodenum == new_master)
2984 continue;
2985
2b832564
SM
2986 /* We could race exit domain. If exited, skip. */
2987 spin_lock(&dlm->spinlock);
2988 skip = (!test_bit(nodenum, dlm->domain_map));
2989 spin_unlock(&dlm->spinlock);
2990 if (skip) {
2991 clear_bit(nodenum, iter->node_map);
2992 continue;
2993 }
2994
6714d8e8
KH
2995 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2996 &migrate, sizeof(migrate), nodenum,
2997 &status);
2b832564
SM
2998 if (ret < 0) {
2999 mlog(0, "migrate_request returned %d!\n", ret);
3000 if (!dlm_is_host_down(ret)) {
3001 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
3002 BUG();
3003 }
3004 clear_bit(nodenum, iter->node_map);
3005 ret = 0;
3006 } else if (status < 0) {
6714d8e8
KH
3007 mlog(0, "migrate request (node %u) returned %d!\n",
3008 nodenum, status);
3009 ret = status;
ba2bf218
KH
3010 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3011 /* during the migration request we short-circuited
3012 * the mastery of the lockres. make sure we have
3013 * a mastery ref for nodenum */
3014 mlog(0, "%s:%.*s: need ref for node %u\n",
3015 dlm->name, res->lockname.len, res->lockname.name,
3016 nodenum);
3017 spin_lock(&res->spinlock);
3018 dlm_lockres_set_refmap_bit(nodenum, res);
3019 spin_unlock(&res->spinlock);
6714d8e8
KH
3020 }
3021 }
3022
3023 if (ret < 0)
3024 mlog_errno(ret);
3025
3026 mlog(0, "returning ret=%d\n", ret);
3027 return ret;
3028}
3029
3030
3031/* if there is an existing mle for this lockres, we now know who the master is.
3032 * (the one who sent us *this* message) we can clear it up right away.
3033 * since the process that put the mle on the list still has a reference to it,
3034 * we can unhash it now, set the master and wake the process. as a result,
3035 * we will have no mle in the list to start with. now we can add an mle for
3036 * the migration and this should be the only one found for those scanning the
3037 * list. */
d74c9803
KH
3038int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3039 void **ret_data)
6714d8e8
KH
3040{
3041 struct dlm_ctxt *dlm = data;
3042 struct dlm_lock_resource *res = NULL;
3043 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3044 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3045 const char *name;
a3d33291 3046 unsigned int namelen, hash;
6714d8e8
KH
3047 int ret = 0;
3048
3049 if (!dlm_grab(dlm))
3050 return -EINVAL;
3051
3052 name = migrate->name;
3053 namelen = migrate->namelen;
a3d33291 3054 hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
3055
3056 /* preallocate.. if this fails, abort */
3057 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
ad8100e0 3058 GFP_NOFS);
6714d8e8
KH
3059
3060 if (!mle) {
3061 ret = -ENOMEM;
3062 goto leave;
3063 }
3064
3065 /* check for pre-existing lock */
3066 spin_lock(&dlm->spinlock);
a3d33291 3067 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
6714d8e8
KH
3068 spin_lock(&dlm->master_lock);
3069
3070 if (res) {
3071 spin_lock(&res->spinlock);
3072 if (res->state & DLM_LOCK_RES_RECOVERING) {
3073 /* if all is working ok, this can only mean that we got
3074 * a migrate request from a node that we now see as
3075 * dead. what can we do here? drop it to the floor? */
3076 spin_unlock(&res->spinlock);
3077 mlog(ML_ERROR, "Got a migrate request, but the "
3078 "lockres is marked as recovering!");
3079 kmem_cache_free(dlm_mle_cache, mle);
3080 ret = -EINVAL; /* need a better solution */
3081 goto unlock;
3082 }
3083 res->state |= DLM_LOCK_RES_MIGRATING;
3084 spin_unlock(&res->spinlock);
3085 }
3086
3087 /* ignore status. only nonzero status would BUG. */
3088 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3089 name, namelen,
3090 migrate->new_master,
3091 migrate->master);
3092
3093unlock:
3094 spin_unlock(&dlm->master_lock);
3095 spin_unlock(&dlm->spinlock);
3096
3097 if (oldmle) {
3098 /* master is known, detach if not already detached */
3099 dlm_mle_detach_hb_events(dlm, oldmle);
3100 dlm_put_mle(oldmle);
3101 }
3102
3103 if (res)
3104 dlm_lockres_put(res);
3105leave:
3106 dlm_put(dlm);
3107 return ret;
3108}
3109
3110/* must be holding dlm->spinlock and dlm->master_lock
3111 * when adding a migration mle, we can clear any other mles
3112 * in the master list because we know with certainty that
3113 * the master is "master". so we remove any old mle from
3114 * the list after setting it's master field, and then add
3115 * the new migration mle. this way we can hold with the rule
3116 * of having only one mle for a given lock name at all times. */
3117static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3118 struct dlm_lock_resource *res,
3119 struct dlm_master_list_entry *mle,
3120 struct dlm_master_list_entry **oldmle,
3121 const char *name, unsigned int namelen,
3122 u8 new_master, u8 master)
3123{
3124 int found;
3125 int ret = 0;
3126
3127 *oldmle = NULL;
3128
3129 mlog_entry_void();
3130
3131 assert_spin_locked(&dlm->spinlock);
3132 assert_spin_locked(&dlm->master_lock);
3133
3134 /* caller is responsible for any ref taken here on oldmle */
3135 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3136 if (found) {
3137 struct dlm_master_list_entry *tmp = *oldmle;
3138 spin_lock(&tmp->spinlock);
3139 if (tmp->type == DLM_MLE_MIGRATION) {
3140 if (master == dlm->node_num) {
3141 /* ah another process raced me to it */
3142 mlog(0, "tried to migrate %.*s, but some "
3143 "process beat me to it\n",
3144 namelen, name);
3145 ret = -EEXIST;
3146 } else {
3147 /* bad. 2 NODES are trying to migrate! */
3148 mlog(ML_ERROR, "migration error mle: "
3149 "master=%u new_master=%u // request: "
3150 "master=%u new_master=%u // "
3151 "lockres=%.*s\n",
3152 tmp->master, tmp->new_master,
3153 master, new_master,
3154 namelen, name);
3155 BUG();
3156 }
3157 } else {
3158 /* this is essentially what assert_master does */
3159 tmp->master = master;
3160 atomic_set(&tmp->woken, 1);
3161 wake_up(&tmp->wq);
3162 /* remove it from the list so that only one
3163 * mle will be found */
3164 list_del_init(&tmp->list);
ba2bf218
KH
3165 /* this was obviously WRONG. mle is uninited here. should be tmp. */
3166 __dlm_mle_detach_hb_events(dlm, tmp);
3167 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3168 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3169 "telling master to get ref for cleared out mle "
3170 "during migration\n", dlm->name, namelen, name,
3171 master, new_master);
6714d8e8
KH
3172 }
3173 spin_unlock(&tmp->spinlock);
3174 }
3175
3176 /* now add a migration mle to the tail of the list */
3177 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3178 mle->new_master = new_master;
ba2bf218
KH
3179 /* the new master will be sending an assert master for this.
3180 * at that point we will get the refmap reference */
6714d8e8
KH
3181 mle->master = master;
3182 /* do this for consistency with other mle types */
3183 set_bit(new_master, mle->maybe_map);
3184 list_add(&mle->list, &dlm->master_list);
3185
3186 return ret;
3187}
3188
3189
3190void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3191{
800deef3 3192 struct dlm_master_list_entry *mle, *next;
6714d8e8 3193 struct dlm_lock_resource *res;
a3d33291 3194 unsigned int hash;
6714d8e8
KH
3195
3196 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3197top:
3198 assert_spin_locked(&dlm->spinlock);
3199
3200 /* clean the master list */
3201 spin_lock(&dlm->master_lock);
800deef3 3202 list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
6714d8e8
KH
3203 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3204 mle->type != DLM_MLE_MASTER &&
3205 mle->type != DLM_MLE_MIGRATION);
3206
3207 /* MASTER mles are initiated locally. the waiting
3208 * process will notice the node map change
3209 * shortly. let that happen as normal. */
3210 if (mle->type == DLM_MLE_MASTER)
3211 continue;
3212
3213
3214 /* BLOCK mles are initiated by other nodes.
3215 * need to clean up if the dead node would have
3216 * been the master. */
3217 if (mle->type == DLM_MLE_BLOCK) {
3218 int bit;
3219
3220 spin_lock(&mle->spinlock);
3221 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3222 if (bit != dead_node) {
3223 mlog(0, "mle found, but dead node %u would "
3224 "not have been master\n", dead_node);
3225 spin_unlock(&mle->spinlock);
3226 } else {
3227 /* must drop the refcount by one since the
3228 * assert_master will never arrive. this
3229 * may result in the mle being unlinked and
3230 * freed, but there may still be a process
3231 * waiting in the dlmlock path which is fine. */
3b3b84a8 3232 mlog(0, "node %u was expected master\n",
6714d8e8
KH
3233 dead_node);
3234 atomic_set(&mle->woken, 1);
3235 spin_unlock(&mle->spinlock);
3236 wake_up(&mle->wq);
f671c09b
KH
3237 /* do not need events any longer, so detach
3238 * from heartbeat */
3239 __dlm_mle_detach_hb_events(dlm, mle);
6714d8e8
KH
3240 __dlm_put_mle(mle);
3241 }
3242 continue;
3243 }
3244
3245 /* everything else is a MIGRATION mle */
3246
3247 /* the rule for MIGRATION mles is that the master
3248 * becomes UNKNOWN if *either* the original or
3249 * the new master dies. all UNKNOWN lockreses
3250 * are sent to whichever node becomes the recovery
3251 * master. the new master is responsible for
3252 * determining if there is still a master for
3253 * this lockres, or if he needs to take over
3254 * mastery. either way, this node should expect
3255 * another message to resolve this. */
3256 if (mle->master != dead_node &&
3257 mle->new_master != dead_node)
3258 continue;
3259
3260 /* if we have reached this point, this mle needs to
3261 * be removed from the list and freed. */
3262
3263 /* remove from the list early. NOTE: unlinking
3264 * list_head while in list_for_each_safe */
da01ad05 3265 __dlm_mle_detach_hb_events(dlm, mle);
6714d8e8
KH
3266 spin_lock(&mle->spinlock);
3267 list_del_init(&mle->list);
3268 atomic_set(&mle->woken, 1);
3269 spin_unlock(&mle->spinlock);
3270 wake_up(&mle->wq);
3271
aa852354
KH
3272 mlog(0, "%s: node %u died during migration from "
3273 "%u to %u!\n", dlm->name, dead_node,
6714d8e8
KH
3274 mle->master, mle->new_master);
3275 /* if there is a lockres associated with this
3276 * mle, find it and set its owner to UNKNOWN */
a3d33291 3277 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
6714d8e8 3278 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
a3d33291 3279 mle->u.name.len, hash);
6714d8e8
KH
3280 if (res) {
3281 /* unfortunately if we hit this rare case, our
3282 * lock ordering is messed. we need to drop
3283 * the master lock so that we can take the
3284 * lockres lock, meaning that we will have to
3285 * restart from the head of list. */
3286 spin_unlock(&dlm->master_lock);
3287
3288 /* move lockres onto recovery list */
3289 spin_lock(&res->spinlock);
3290 dlm_set_lockres_owner(dlm, res,
3291 DLM_LOCK_RES_OWNER_UNKNOWN);
3292 dlm_move_lockres_to_recovery_list(dlm, res);
3293 spin_unlock(&res->spinlock);
3294 dlm_lockres_put(res);
3295
f671c09b
KH
3296 /* about to get rid of mle, detach from heartbeat */
3297 __dlm_mle_detach_hb_events(dlm, mle);
3298
6714d8e8
KH
3299 /* dump the mle */
3300 spin_lock(&dlm->master_lock);
3301 __dlm_put_mle(mle);
3302 spin_unlock(&dlm->master_lock);
3303
3304 /* restart */
3305 goto top;
3306 }
3307
3308 /* this may be the last reference */
3309 __dlm_put_mle(mle);
3310 }
3311 spin_unlock(&dlm->master_lock);
3312}
3313
3314
3315int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3316 u8 old_master)
3317{
3318 struct dlm_node_iter iter;
3319 int ret = 0;
3320
3321 spin_lock(&dlm->spinlock);
3322 dlm_node_iter_init(dlm->domain_map, &iter);
3323 clear_bit(old_master, iter.node_map);
3324 clear_bit(dlm->node_num, iter.node_map);
3325 spin_unlock(&dlm->spinlock);
3326
ba2bf218
KH
3327 /* ownership of the lockres is changing. account for the
3328 * mastery reference here since old_master will briefly have
3329 * a reference after the migration completes */
3330 spin_lock(&res->spinlock);
3331 dlm_lockres_set_refmap_bit(old_master, res);
3332 spin_unlock(&res->spinlock);
3333
6714d8e8
KH
3334 mlog(0, "now time to do a migrate request to other nodes\n");
3335 ret = dlm_do_migrate_request(dlm, res, old_master,
3336 dlm->node_num, &iter);
3337 if (ret < 0) {
3338 mlog_errno(ret);
3339 goto leave;
3340 }
3341
3342 mlog(0, "doing assert master of %.*s to all except the original node\n",
3343 res->lockname.len, res->lockname.name);
3344 /* this call now finishes out the nodemap
3345 * even if one or more nodes die */
ba2bf218 3346 ret = dlm_do_assert_master(dlm, res, iter.node_map,
6714d8e8
KH
3347 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3348 if (ret < 0) {
3349 /* no longer need to retry. all living nodes contacted. */
3350 mlog_errno(ret);
3351 ret = 0;
3352 }
3353
3354 memset(iter.node_map, 0, sizeof(iter.node_map));
3355 set_bit(old_master, iter.node_map);
3356 mlog(0, "doing assert master of %.*s back to %u\n",
3357 res->lockname.len, res->lockname.name, old_master);
ba2bf218 3358 ret = dlm_do_assert_master(dlm, res, iter.node_map,
6714d8e8
KH
3359 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3360 if (ret < 0) {
3361 mlog(0, "assert master to original master failed "
3362 "with %d.\n", ret);
3363 /* the only nonzero status here would be because of
3364 * a dead original node. we're done. */
3365 ret = 0;
3366 }
3367
3368 /* all done, set the owner, clear the flag */
3369 spin_lock(&res->spinlock);
3370 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3371 res->state &= ~DLM_LOCK_RES_MIGRATING;
3372 spin_unlock(&res->spinlock);
3373 /* re-dirty it on the new master */
3374 dlm_kick_thread(dlm, res);
3375 wake_up(&res->wq);
3376leave:
3377 return ret;
3378}
3379
3380/*
3381 * LOCKRES AST REFCOUNT
3382 * this is integral to migration
3383 */
3384
3385/* for future intent to call an ast, reserve one ahead of time.
3386 * this should be called only after waiting on the lockres
3387 * with dlm_wait_on_lockres, and while still holding the
3388 * spinlock after the call. */
3389void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3390{
3391 assert_spin_locked(&res->spinlock);
3392 if (res->state & DLM_LOCK_RES_MIGRATING) {
3393 __dlm_print_one_lock_resource(res);
3394 }
3395 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3396
3397 atomic_inc(&res->asts_reserved);
3398}
3399
3400/*
3401 * used to drop the reserved ast, either because it went unused,
3402 * or because the ast/bast was actually called.
3403 *
3404 * also, if there is a pending migration on this lockres,
3405 * and this was the last pending ast on the lockres,
3406 * atomically set the MIGRATING flag before we drop the lock.
3407 * this is how we ensure that migration can proceed with no
3408 * asts in progress. note that it is ok if the state of the
3409 * queues is such that a lock should be granted in the future
3410 * or that a bast should be fired, because the new master will
3411 * shuffle the lists on this lockres as soon as it is migrated.
3412 */
3413void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3414 struct dlm_lock_resource *res)
3415{
3416 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3417 return;
3418
3419 if (!res->migration_pending) {
3420 spin_unlock(&res->spinlock);
3421 return;
3422 }
3423
3424 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3425 res->migration_pending = 0;
3426 res->state |= DLM_LOCK_RES_MIGRATING;
3427 spin_unlock(&res->spinlock);
3428 wake_up(&res->wq);
3429 wake_up(&dlm->migration_wq);
3430}
This page took 0.524556 seconds and 5 git commands to generate.