Merge remote-tracking branch 'spi/fix/core' into spi-linus
[deliverable/linux.git] / fs / dlm / lockspace.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
60f98d18 5** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
e7fd4179
DT
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
e7fd4179
DT
18#include "dir.h"
19#include "lowcomms.h"
20#include "config.h"
21#include "memory.h"
22#include "lock.h"
c56b39cd 23#include "recover.h"
2896ee37 24#include "requestqueue.h"
0f8e0d9a 25#include "user.h"
23e8e1aa 26#include "ast.h"
e7fd4179 27
e7fd4179 28static int ls_count;
90135925 29static struct mutex ls_lock;
e7fd4179
DT
30static struct list_head lslist;
31static spinlock_t lslist_lock;
32static struct task_struct * scand_task;
33
34
35static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
36{
37 ssize_t ret = len;
38 int n = simple_strtol(buf, NULL, 0);
39
e2de7f56
PC
40 ls = dlm_find_lockspace_local(ls->ls_local_handle);
41 if (!ls)
42 return -EINVAL;
43
e7fd4179
DT
44 switch (n) {
45 case 0:
46 dlm_ls_stop(ls);
47 break;
48 case 1:
49 dlm_ls_start(ls);
50 break;
51 default:
52 ret = -EINVAL;
53 }
e2de7f56 54 dlm_put_lockspace(ls);
e7fd4179
DT
55 return ret;
56}
57
58static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
59{
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
63 return len;
64}
65
66static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
67{
a1d144c7 68 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
e7fd4179
DT
69}
70
71static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
72{
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
74 return len;
75}
76
4875647a
DT
77static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf)
78{
79 return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls));
80}
81
82static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len)
83{
84 int val = simple_strtoul(buf, NULL, 0);
85 if (val == 1)
86 set_bit(LSFL_NODIR, &ls->ls_flags);
87 return len;
88}
89
c56b39cd
DT
90static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
91{
92 uint32_t status = dlm_recover_status(ls);
a1d144c7 93 return snprintf(buf, PAGE_SIZE, "%x\n", status);
c56b39cd
DT
94}
95
faa0f267
DT
96static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
97{
a1d144c7 98 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
faa0f267
DT
99}
100
e7fd4179
DT
101struct dlm_attr {
102 struct attribute attr;
103 ssize_t (*show)(struct dlm_ls *, char *);
104 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
105};
106
107static struct dlm_attr dlm_attr_control = {
108 .attr = {.name = "control", .mode = S_IWUSR},
109 .store = dlm_control_store
110};
111
112static struct dlm_attr dlm_attr_event = {
113 .attr = {.name = "event_done", .mode = S_IWUSR},
114 .store = dlm_event_store
115};
116
117static struct dlm_attr dlm_attr_id = {
118 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
119 .show = dlm_id_show,
120 .store = dlm_id_store
121};
122
4875647a
DT
123static struct dlm_attr dlm_attr_nodir = {
124 .attr = {.name = "nodir", .mode = S_IRUGO | S_IWUSR},
125 .show = dlm_nodir_show,
126 .store = dlm_nodir_store
127};
128
c56b39cd
DT
129static struct dlm_attr dlm_attr_recover_status = {
130 .attr = {.name = "recover_status", .mode = S_IRUGO},
131 .show = dlm_recover_status_show
132};
133
faa0f267
DT
134static struct dlm_attr dlm_attr_recover_nodeid = {
135 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
136 .show = dlm_recover_nodeid_show
137};
138
e7fd4179
DT
139static struct attribute *dlm_attrs[] = {
140 &dlm_attr_control.attr,
141 &dlm_attr_event.attr,
142 &dlm_attr_id.attr,
4875647a 143 &dlm_attr_nodir.attr,
c56b39cd 144 &dlm_attr_recover_status.attr,
faa0f267 145 &dlm_attr_recover_nodeid.attr,
e7fd4179
DT
146 NULL,
147};
148
149static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
150 char *buf)
151{
152 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
153 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
154 return a->show ? a->show(ls, buf) : 0;
155}
156
157static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
158 const char *buf, size_t len)
159{
160 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
161 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
162 return a->store ? a->store(ls, buf, len) : len;
163}
164
ba542e3b
PC
165static void lockspace_kobj_release(struct kobject *k)
166{
167 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
168 kfree(ls);
169}
170
52cf25d0 171static const struct sysfs_ops dlm_attr_ops = {
e7fd4179
DT
172 .show = dlm_attr_show,
173 .store = dlm_attr_store,
174};
175
176static struct kobj_type dlm_ktype = {
177 .default_attrs = dlm_attrs,
178 .sysfs_ops = &dlm_attr_ops,
ba542e3b 179 .release = lockspace_kobj_release,
e7fd4179
DT
180};
181
d405936b 182static struct kset *dlm_kset;
e7fd4179 183
e7fd4179
DT
184static int do_uevent(struct dlm_ls *ls, int in)
185{
186 int error;
187
188 if (in)
189 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
190 else
191 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
192
8b0e7b2c
DT
193 log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
194
195 /* dlm_controld will see the uevent, do the necessary group management
196 and then write to sysfs to wake us */
197
e7fd4179
DT
198 error = wait_event_interruptible(ls->ls_uevent_wait,
199 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
8b0e7b2c
DT
200
201 log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
202
e7fd4179
DT
203 if (error)
204 goto out;
205
206 error = ls->ls_uevent_result;
207 out:
8b0e7b2c
DT
208 if (error)
209 log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
210 error, ls->ls_uevent_result);
e7fd4179
DT
211 return error;
212}
213
b4a5d4bc
SW
214static int dlm_uevent(struct kset *kset, struct kobject *kobj,
215 struct kobj_uevent_env *env)
216{
217 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
218
219 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
220 return 0;
221}
222
223static struct kset_uevent_ops dlm_uevent_ops = {
224 .uevent = dlm_uevent,
225};
e7fd4179 226
30727174 227int __init dlm_lockspace_init(void)
e7fd4179 228{
e7fd4179 229 ls_count = 0;
90135925 230 mutex_init(&ls_lock);
e7fd4179
DT
231 INIT_LIST_HEAD(&lslist);
232 spin_lock_init(&lslist_lock);
233
b4a5d4bc 234 dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj);
d405936b 235 if (!dlm_kset) {
8e24eea7 236 printk(KERN_WARNING "%s: can not create kset\n", __func__);
d405936b
GKH
237 return -ENOMEM;
238 }
239 return 0;
e7fd4179
DT
240}
241
242void dlm_lockspace_exit(void)
243{
d405936b 244 kset_unregister(dlm_kset);
e7fd4179
DT
245}
246
c1dcf65f
DT
247static struct dlm_ls *find_ls_to_scan(void)
248{
249 struct dlm_ls *ls;
250
251 spin_lock(&lslist_lock);
252 list_for_each_entry(ls, &lslist, ls_list) {
253 if (time_after_eq(jiffies, ls->ls_scan_time +
254 dlm_config.ci_scan_secs * HZ)) {
255 spin_unlock(&lslist_lock);
256 return ls;
257 }
258 }
259 spin_unlock(&lslist_lock);
260 return NULL;
261}
262
e7fd4179
DT
263static int dlm_scand(void *data)
264{
265 struct dlm_ls *ls;
266
267 while (!kthread_should_stop()) {
c1dcf65f
DT
268 ls = find_ls_to_scan();
269 if (ls) {
85e86edf 270 if (dlm_lock_recovery_try(ls)) {
c1dcf65f 271 ls->ls_scan_time = jiffies;
85e86edf 272 dlm_scan_rsbs(ls);
3ae1acf9 273 dlm_scan_timeout(ls);
c6ff669b 274 dlm_scan_waiters(ls);
85e86edf 275 dlm_unlock_recovery(ls);
c1dcf65f
DT
276 } else {
277 ls->ls_scan_time += HZ;
85e86edf 278 }
c6ff669b 279 continue;
85e86edf 280 }
c6ff669b 281 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
e7fd4179
DT
282 }
283 return 0;
284}
285
286static int dlm_scand_start(void)
287{
288 struct task_struct *p;
289 int error = 0;
290
291 p = kthread_run(dlm_scand, NULL, "dlm_scand");
292 if (IS_ERR(p))
293 error = PTR_ERR(p);
294 else
295 scand_task = p;
296 return error;
297}
298
299static void dlm_scand_stop(void)
300{
301 kthread_stop(scand_task);
302}
303
e7fd4179
DT
304struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
305{
306 struct dlm_ls *ls;
307
308 spin_lock(&lslist_lock);
309
310 list_for_each_entry(ls, &lslist, ls_list) {
311 if (ls->ls_global_id == id) {
312 ls->ls_count++;
313 goto out;
314 }
315 }
316 ls = NULL;
317 out:
318 spin_unlock(&lslist_lock);
319 return ls;
320}
321
597d0cae 322struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
e7fd4179 323{
597d0cae 324 struct dlm_ls *ls;
e7fd4179
DT
325
326 spin_lock(&lslist_lock);
597d0cae
DT
327 list_for_each_entry(ls, &lslist, ls_list) {
328 if (ls->ls_local_handle == lockspace) {
329 ls->ls_count++;
330 goto out;
331 }
332 }
333 ls = NULL;
334 out:
335 spin_unlock(&lslist_lock);
336 return ls;
337}
338
339struct dlm_ls *dlm_find_lockspace_device(int minor)
340{
341 struct dlm_ls *ls;
342
343 spin_lock(&lslist_lock);
344 list_for_each_entry(ls, &lslist, ls_list) {
345 if (ls->ls_device.minor == minor) {
346 ls->ls_count++;
347 goto out;
348 }
349 }
350 ls = NULL;
351 out:
e7fd4179
DT
352 spin_unlock(&lslist_lock);
353 return ls;
354}
355
356void dlm_put_lockspace(struct dlm_ls *ls)
357{
358 spin_lock(&lslist_lock);
359 ls->ls_count--;
360 spin_unlock(&lslist_lock);
361}
362
363static void remove_lockspace(struct dlm_ls *ls)
364{
365 for (;;) {
366 spin_lock(&lslist_lock);
367 if (ls->ls_count == 0) {
0f8e0d9a 368 WARN_ON(ls->ls_create_count != 0);
e7fd4179
DT
369 list_del(&ls->ls_list);
370 spin_unlock(&lslist_lock);
371 return;
372 }
373 spin_unlock(&lslist_lock);
374 ssleep(1);
375 }
376}
377
378static int threads_start(void)
379{
380 int error;
381
e7fd4179
DT
382 error = dlm_scand_start();
383 if (error) {
384 log_print("cannot start dlm_scand thread %d", error);
23e8e1aa 385 goto fail;
e7fd4179
DT
386 }
387
388 /* Thread for sending/receiving messages for all lockspace's */
389 error = dlm_lowcomms_start();
390 if (error) {
391 log_print("cannot start dlm lowcomms %d", error);
392 goto scand_fail;
393 }
394
395 return 0;
396
397 scand_fail:
398 dlm_scand_stop();
e7fd4179
DT
399 fail:
400 return error;
401}
402
403static void threads_stop(void)
404{
405 dlm_scand_stop();
406 dlm_lowcomms_stop();
e7fd4179
DT
407}
408
60f98d18
DT
409static int new_lockspace(const char *name, const char *cluster,
410 uint32_t flags, int lvblen,
411 const struct dlm_lockspace_ops *ops, void *ops_arg,
412 int *ops_result, dlm_lockspace_t **lockspace)
e7fd4179
DT
413{
414 struct dlm_ls *ls;
0f8e0d9a 415 int i, size, error;
79d72b54 416 int do_unreg = 0;
60f98d18 417 int namelen = strlen(name);
e7fd4179
DT
418
419 if (namelen > DLM_LOCKSPACE_LEN)
420 return -EINVAL;
421
422 if (!lvblen || (lvblen % 8))
423 return -EINVAL;
424
425 if (!try_module_get(THIS_MODULE))
426 return -EINVAL;
427
dc68c7ed 428 if (!dlm_user_daemon_available()) {
60f98d18
DT
429 log_print("dlm user daemon not available");
430 error = -EUNATCH;
431 goto out;
432 }
433
434 if (ops && ops_result) {
435 if (!dlm_config.ci_recover_callbacks)
436 *ops_result = -EOPNOTSUPP;
437 else
438 *ops_result = 0;
439 }
440
441 if (dlm_config.ci_recover_callbacks && cluster &&
442 strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) {
443 log_print("dlm cluster name %s mismatch %s",
444 dlm_config.ci_cluster_name, cluster);
445 error = -EBADR;
446 goto out;
dc68c7ed
DT
447 }
448
0f8e0d9a
DT
449 error = 0;
450
451 spin_lock(&lslist_lock);
452 list_for_each_entry(ls, &lslist, ls_list) {
453 WARN_ON(ls->ls_create_count <= 0);
454 if (ls->ls_namelen != namelen)
455 continue;
456 if (memcmp(ls->ls_name, name, namelen))
457 continue;
458 if (flags & DLM_LSFL_NEWEXCL) {
459 error = -EEXIST;
460 break;
461 }
462 ls->ls_create_count++;
8511a272
DT
463 *lockspace = ls;
464 error = 1;
0f8e0d9a 465 break;
e7fd4179 466 }
0f8e0d9a
DT
467 spin_unlock(&lslist_lock);
468
0f8e0d9a 469 if (error)
8511a272 470 goto out;
0f8e0d9a
DT
471
472 error = -ENOMEM;
e7fd4179 473
573c24c4 474 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
e7fd4179
DT
475 if (!ls)
476 goto out;
e7fd4179
DT
477 memcpy(ls->ls_name, name, namelen);
478 ls->ls_namelen = namelen;
e7fd4179
DT
479 ls->ls_lvblen = lvblen;
480 ls->ls_count = 0;
481 ls->ls_flags = 0;
c1dcf65f 482 ls->ls_scan_time = jiffies;
e7fd4179 483
60f98d18
DT
484 if (ops && dlm_config.ci_recover_callbacks) {
485 ls->ls_ops = ops;
486 ls->ls_ops_arg = ops_arg;
487 }
488
3ae1acf9
DT
489 if (flags & DLM_LSFL_TIMEWARN)
490 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
3ae1acf9 491
fad59c13 492 /* ls_exflags are forced to match among nodes, and we don't
0f8e0d9a
DT
493 need to require all nodes to have some flags set */
494 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
495 DLM_LSFL_NEWEXCL));
fad59c13 496
68c817a1 497 size = dlm_config.ci_rsbtbl_size;
e7fd4179
DT
498 ls->ls_rsbtbl_size = size;
499
c282af49 500 ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size);
e7fd4179
DT
501 if (!ls->ls_rsbtbl)
502 goto out_lsfree;
503 for (i = 0; i < size; i++) {
9beb3bf5
BP
504 ls->ls_rsbtbl[i].keep.rb_node = NULL;
505 ls->ls_rsbtbl[i].toss.rb_node = NULL;
c7be761a 506 spin_lock_init(&ls->ls_rsbtbl[i].lock);
e7fd4179
DT
507 }
508
05c32f47
DT
509 spin_lock_init(&ls->ls_remove_spin);
510
511 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
512 ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
513 GFP_KERNEL);
514 if (!ls->ls_remove_names[i])
515 goto out_rsbtbl;
516 }
517
3d6aa675
DT
518 idr_init(&ls->ls_lkbidr);
519 spin_lock_init(&ls->ls_lkbidr_spin);
e7fd4179 520
e7fd4179 521 INIT_LIST_HEAD(&ls->ls_waiters);
90135925 522 mutex_init(&ls->ls_waiters_mutex);
ef0c2bb0
DT
523 INIT_LIST_HEAD(&ls->ls_orphans);
524 mutex_init(&ls->ls_orphans_mutex);
3ae1acf9
DT
525 INIT_LIST_HEAD(&ls->ls_timeout);
526 mutex_init(&ls->ls_timeout_mutex);
e7fd4179 527
3881ac04
DT
528 INIT_LIST_HEAD(&ls->ls_new_rsb);
529 spin_lock_init(&ls->ls_new_rsb_spin);
530
e7fd4179
DT
531 INIT_LIST_HEAD(&ls->ls_nodes);
532 INIT_LIST_HEAD(&ls->ls_nodes_gone);
533 ls->ls_num_nodes = 0;
534 ls->ls_low_nodeid = 0;
535 ls->ls_total_weight = 0;
536 ls->ls_node_array = NULL;
537
538 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
539 ls->ls_stub_rsb.res_ls = ls;
540
5de6319b
DT
541 ls->ls_debug_rsb_dentry = NULL;
542 ls->ls_debug_waiters_dentry = NULL;
e7fd4179
DT
543
544 init_waitqueue_head(&ls->ls_uevent_wait);
545 ls->ls_uevent_result = 0;
8b0e7b2c
DT
546 init_completion(&ls->ls_members_done);
547 ls->ls_members_result = -1;
e7fd4179 548
23e8e1aa
DT
549 mutex_init(&ls->ls_cb_mutex);
550 INIT_LIST_HEAD(&ls->ls_cb_delay);
551
e7fd4179 552 ls->ls_recoverd_task = NULL;
90135925 553 mutex_init(&ls->ls_recoverd_active);
e7fd4179 554 spin_lock_init(&ls->ls_recover_lock);
98f176fb
DT
555 spin_lock_init(&ls->ls_rcom_spin);
556 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
e7fd4179
DT
557 ls->ls_recover_status = 0;
558 ls->ls_recover_seq = 0;
559 ls->ls_recover_args = NULL;
560 init_rwsem(&ls->ls_in_recovery);
c36258b5 561 init_rwsem(&ls->ls_recv_active);
e7fd4179 562 INIT_LIST_HEAD(&ls->ls_requestqueue);
90135925 563 mutex_init(&ls->ls_requestqueue_mutex);
597d0cae 564 mutex_init(&ls->ls_clear_proc_locks);
e7fd4179 565
573c24c4 566 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
e7fd4179 567 if (!ls->ls_recover_buf)
05c32f47 568 goto out_lkbidr;
e7fd4179 569
757a4271
DT
570 ls->ls_slot = 0;
571 ls->ls_num_slots = 0;
572 ls->ls_slots_size = 0;
573 ls->ls_slots = NULL;
574
e7fd4179
DT
575 INIT_LIST_HEAD(&ls->ls_recover_list);
576 spin_lock_init(&ls->ls_recover_list_lock);
1d7c484e
DT
577 idr_init(&ls->ls_recover_idr);
578 spin_lock_init(&ls->ls_recover_idr_lock);
e7fd4179 579 ls->ls_recover_list_count = 0;
597d0cae 580 ls->ls_local_handle = ls;
e7fd4179
DT
581 init_waitqueue_head(&ls->ls_wait_general);
582 INIT_LIST_HEAD(&ls->ls_root_list);
583 init_rwsem(&ls->ls_root_sem);
584
5f88f1ea 585 spin_lock(&lslist_lock);
0f8e0d9a 586 ls->ls_create_count = 1;
5f88f1ea
DT
587 list_add(&ls->ls_list, &lslist);
588 spin_unlock(&lslist_lock);
589
23e8e1aa
DT
590 if (flags & DLM_LSFL_FS) {
591 error = dlm_callback_start(ls);
592 if (error) {
593 log_error(ls, "can't start dlm_callback %d", error);
594 goto out_delist;
595 }
596 }
597
475f230c
DT
598 init_waitqueue_head(&ls->ls_recover_lock_wait);
599
600 /*
601 * Once started, dlm_recoverd first looks for ls in lslist, then
602 * initializes ls_in_recovery as locked in "down" mode. We need
603 * to wait for the wakeup from dlm_recoverd because in_recovery
604 * has to start out in down mode.
605 */
606
e7fd4179
DT
607 error = dlm_recoverd_start(ls);
608 if (error) {
609 log_error(ls, "can't start dlm_recoverd %d", error);
23e8e1aa 610 goto out_callback;
e7fd4179
DT
611 }
612
475f230c
DT
613 wait_event(ls->ls_recover_lock_wait,
614 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
615
901195ed
GKH
616 ls->ls_kobj.kset = dlm_kset;
617 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
618 "%s", ls->ls_name);
e7fd4179 619 if (error)
23e8e1aa 620 goto out_recoverd;
901195ed 621 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
79d72b54
DT
622
623 /* let kobject handle freeing of ls if there's an error */
624 do_unreg = 1;
e7fd4179 625
8b0e7b2c
DT
626 /* This uevent triggers dlm_controld in userspace to add us to the
627 group of nodes that are members of this lockspace (managed by the
628 cluster infrastructure.) Once it's done that, it tells us who the
629 current lockspace members are (via configfs) and then tells the
630 lockspace to start running (via sysfs) in dlm_ls_start(). */
631
e7fd4179
DT
632 error = do_uevent(ls, 1);
633 if (error)
23e8e1aa 634 goto out_recoverd;
79d72b54 635
8b0e7b2c
DT
636 wait_for_completion(&ls->ls_members_done);
637 error = ls->ls_members_result;
638 if (error)
639 goto out_members;
640
79d72b54
DT
641 dlm_create_debug_file(ls);
642
643 log_debug(ls, "join complete");
e7fd4179
DT
644 *lockspace = ls;
645 return 0;
646
8b0e7b2c
DT
647 out_members:
648 do_uevent(ls, 0);
649 dlm_clear_members(ls);
650 kfree(ls->ls_node_array);
23e8e1aa 651 out_recoverd:
5f88f1ea 652 dlm_recoverd_stop(ls);
23e8e1aa
DT
653 out_callback:
654 dlm_callback_stop(ls);
79d72b54 655 out_delist:
e7fd4179
DT
656 spin_lock(&lslist_lock);
657 list_del(&ls->ls_list);
658 spin_unlock(&lslist_lock);
1d7c484e 659 idr_destroy(&ls->ls_recover_idr);
e7fd4179 660 kfree(ls->ls_recover_buf);
05c32f47 661 out_lkbidr:
3d6aa675 662 idr_destroy(&ls->ls_lkbidr);
05c32f47
DT
663 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
664 if (ls->ls_remove_names[i])
665 kfree(ls->ls_remove_names[i]);
666 }
667 out_rsbtbl:
c282af49 668 vfree(ls->ls_rsbtbl);
e7fd4179 669 out_lsfree:
79d72b54 670 if (do_unreg)
197b12d6 671 kobject_put(&ls->ls_kobj);
79d72b54
DT
672 else
673 kfree(ls);
e7fd4179
DT
674 out:
675 module_put(THIS_MODULE);
676 return error;
677}
678
60f98d18
DT
679int dlm_new_lockspace(const char *name, const char *cluster,
680 uint32_t flags, int lvblen,
681 const struct dlm_lockspace_ops *ops, void *ops_arg,
682 int *ops_result, dlm_lockspace_t **lockspace)
e7fd4179
DT
683{
684 int error = 0;
685
90135925 686 mutex_lock(&ls_lock);
e7fd4179
DT
687 if (!ls_count)
688 error = threads_start();
689 if (error)
690 goto out;
691
60f98d18
DT
692 error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg,
693 ops_result, lockspace);
e7fd4179
DT
694 if (!error)
695 ls_count++;
8511a272
DT
696 if (error > 0)
697 error = 0;
698 if (!ls_count)
8b0e7b2c 699 threads_stop();
e7fd4179 700 out:
90135925 701 mutex_unlock(&ls_lock);
e7fd4179
DT
702 return error;
703}
704
3d6aa675 705static int lkb_idr_is_local(int id, void *p, void *data)
e7fd4179 706{
3d6aa675
DT
707 struct dlm_lkb *lkb = p;
708
a97f4a66 709 return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
3d6aa675
DT
710}
711
712static int lkb_idr_is_any(int id, void *p, void *data)
713{
714 return 1;
715}
716
717static int lkb_idr_free(int id, void *p, void *data)
718{
719 struct dlm_lkb *lkb = p;
720
3d6aa675
DT
721 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
722 dlm_free_lvb(lkb->lkb_lvbptr);
723
724 dlm_free_lkb(lkb);
725 return 0;
726}
727
728/* NOTE: We check the lkbidr here rather than the resource table.
729 This is because there may be LKBs queued as ASTs that have been unlinked
730 from their RSBs and are pending deletion once the AST has been delivered */
731
732static int lockspace_busy(struct dlm_ls *ls, int force)
733{
734 int rv;
735
736 spin_lock(&ls->ls_lkbidr_spin);
737 if (force == 0) {
738 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
739 } else if (force == 1) {
740 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
741 } else {
742 rv = 0;
e7fd4179 743 }
3d6aa675
DT
744 spin_unlock(&ls->ls_lkbidr_spin);
745 return rv;
e7fd4179
DT
746}
747
748static int release_lockspace(struct dlm_ls *ls, int force)
749{
e7fd4179 750 struct dlm_rsb *rsb;
9beb3bf5 751 struct rb_node *n;
0f8e0d9a
DT
752 int i, busy, rv;
753
3d6aa675 754 busy = lockspace_busy(ls, force);
0f8e0d9a
DT
755
756 spin_lock(&lslist_lock);
757 if (ls->ls_create_count == 1) {
3d6aa675 758 if (busy) {
0f8e0d9a 759 rv = -EBUSY;
3d6aa675 760 } else {
0f8e0d9a
DT
761 /* remove_lockspace takes ls off lslist */
762 ls->ls_create_count = 0;
763 rv = 0;
764 }
765 } else if (ls->ls_create_count > 1) {
766 rv = --ls->ls_create_count;
767 } else {
768 rv = -EINVAL;
769 }
770 spin_unlock(&lslist_lock);
771
772 if (rv) {
773 log_debug(ls, "release_lockspace no remove %d", rv);
774 return rv;
775 }
e7fd4179 776
0f8e0d9a 777 dlm_device_deregister(ls);
e7fd4179 778
dc68c7ed 779 if (force < 3 && dlm_user_daemon_available())
e7fd4179
DT
780 do_uevent(ls, 0);
781
782 dlm_recoverd_stop(ls);
783
23e8e1aa
DT
784 dlm_callback_stop(ls);
785
e7fd4179
DT
786 remove_lockspace(ls);
787
788 dlm_delete_debug_file(ls);
789
e7fd4179
DT
790 kfree(ls->ls_recover_buf);
791
e7fd4179 792 /*
3d6aa675 793 * Free all lkb's in idr
e7fd4179
DT
794 */
795
3d6aa675 796 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
3d6aa675 797 idr_destroy(&ls->ls_lkbidr);
e7fd4179 798
e7fd4179
DT
799 /*
800 * Free all rsb's on rsbtbl[] lists
801 */
802
803 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
9beb3bf5
BP
804 while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
805 rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
806 rb_erase(n, &ls->ls_rsbtbl[i].keep);
52bda2b5 807 dlm_free_rsb(rsb);
e7fd4179
DT
808 }
809
9beb3bf5
BP
810 while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
811 rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
812 rb_erase(n, &ls->ls_rsbtbl[i].toss);
52bda2b5 813 dlm_free_rsb(rsb);
e7fd4179
DT
814 }
815 }
816
c282af49 817 vfree(ls->ls_rsbtbl);
e7fd4179 818
05c32f47
DT
819 for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
820 kfree(ls->ls_remove_names[i]);
821
3881ac04
DT
822 while (!list_empty(&ls->ls_new_rsb)) {
823 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
824 res_hashchain);
825 list_del(&rsb->res_hashchain);
826 dlm_free_rsb(rsb);
827 }
828
e7fd4179
DT
829 /*
830 * Free structures on any other lists
831 */
832
2896ee37 833 dlm_purge_requestqueue(ls);
e7fd4179 834 kfree(ls->ls_recover_args);
e7fd4179
DT
835 dlm_clear_members(ls);
836 dlm_clear_members_gone(ls);
837 kfree(ls->ls_node_array);
0f8e0d9a 838 log_debug(ls, "release_lockspace final free");
197b12d6 839 kobject_put(&ls->ls_kobj);
79d72b54 840 /* The ls structure will be freed when the kobject is done with */
e7fd4179 841
e7fd4179
DT
842 module_put(THIS_MODULE);
843 return 0;
844}
845
846/*
847 * Called when a system has released all its locks and is not going to use the
848 * lockspace any longer. We free everything we're managing for this lockspace.
849 * Remaining nodes will go through the recovery process as if we'd died. The
850 * lockspace must continue to function as usual, participating in recoveries,
851 * until this returns.
852 *
853 * Force has 4 possible values:
854 * 0 - don't destroy locksapce if it has any LKBs
855 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
856 * 2 - destroy lockspace regardless of LKBs
857 * 3 - destroy lockspace as part of a forced shutdown
858 */
859
860int dlm_release_lockspace(void *lockspace, int force)
861{
862 struct dlm_ls *ls;
0f8e0d9a 863 int error;
e7fd4179
DT
864
865 ls = dlm_find_lockspace_local(lockspace);
866 if (!ls)
867 return -EINVAL;
868 dlm_put_lockspace(ls);
0f8e0d9a
DT
869
870 mutex_lock(&ls_lock);
871 error = release_lockspace(ls, force);
872 if (!error)
873 ls_count--;
278afcbf 874 if (!ls_count)
0f8e0d9a
DT
875 threads_stop();
876 mutex_unlock(&ls_lock);
877
878 return error;
e7fd4179
DT
879}
880
dc68c7ed
DT
881void dlm_stop_lockspaces(void)
882{
883 struct dlm_ls *ls;
696b3d84 884 int count;
dc68c7ed
DT
885
886 restart:
696b3d84 887 count = 0;
dc68c7ed
DT
888 spin_lock(&lslist_lock);
889 list_for_each_entry(ls, &lslist, ls_list) {
696b3d84
DT
890 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
891 count++;
dc68c7ed 892 continue;
696b3d84 893 }
dc68c7ed
DT
894 spin_unlock(&lslist_lock);
895 log_error(ls, "no userland control daemon, stopping lockspace");
896 dlm_ls_stop(ls);
897 goto restart;
898 }
899 spin_unlock(&lslist_lock);
696b3d84
DT
900
901 if (count)
902 log_print("dlm user daemon left %d lockspaces", count);
dc68c7ed
DT
903}
904
This page took 0.558102 seconds and 5 git commands to generate.