[PATCH] Use activate_mm() in fs/aio.c:use_mm()
[deliverable/linux.git] / fs / ocfs2 / dlm / dlmunlock.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmunlock.c
5 *
6 * underlying calls for unlocking locks
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
42
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49
50 #define MLOG_MASK_PREFIX ML_DLM
51 #include "cluster/masklog.h"
52
53 #define DLM_UNLOCK_FREE_LOCK 0x00000001
54 #define DLM_UNLOCK_CALL_AST 0x00000002
55 #define DLM_UNLOCK_REMOVE_LOCK 0x00000004
56 #define DLM_UNLOCK_REGRANT_LOCK 0x00000008
57 #define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010
58
59
60 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
61 struct dlm_lock_resource *res,
62 struct dlm_lock *lock,
63 struct dlm_lockstatus *lksb,
64 int *actions);
65 static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 struct dlm_lock *lock,
68 struct dlm_lockstatus *lksb,
69 int *actions);
70
71 static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
72 struct dlm_lock_resource *res,
73 struct dlm_lock *lock,
74 struct dlm_lockstatus *lksb,
75 int flags,
76 u8 owner);
77
78
79 /*
80 * according to the spec:
81 * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
82 *
83 * flags & LKM_CANCEL != 0: must be converting or blocked
84 * flags & LKM_CANCEL == 0: must be granted
85 *
86 * So to unlock a converting lock, you must first cancel the
87 * convert (passing LKM_CANCEL in flags), then call the unlock
88 * again (with no LKM_CANCEL in flags).
89 */
90
91
92 /*
93 * locking:
94 * caller needs: none
95 * taken: res->spinlock and lock->spinlock taken and dropped
96 * held on exit: none
97 * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
98 * all callers should have taken an extra ref on lock coming in
99 */
100 static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
101 struct dlm_lock_resource *res,
102 struct dlm_lock *lock,
103 struct dlm_lockstatus *lksb,
104 int flags, int *call_ast,
105 int master_node)
106 {
107 enum dlm_status status;
108 int actions = 0;
109 int in_use;
110 u8 owner;
111
112 mlog(0, "master_node = %d, valblk = %d\n", master_node,
113 flags & LKM_VALBLK);
114
115 if (master_node)
116 BUG_ON(res->owner != dlm->node_num);
117 else
118 BUG_ON(res->owner == dlm->node_num);
119
120 spin_lock(&dlm->spinlock);
121 /* We want to be sure that we're not freeing a lock
122 * that still has AST's pending... */
123 in_use = !list_empty(&lock->ast_list);
124 spin_unlock(&dlm->spinlock);
125 if (in_use) {
126 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
127 "while waiting for an ast!", res->lockname.len,
128 res->lockname.name);
129 return DLM_BADPARAM;
130 }
131
132 spin_lock(&res->spinlock);
133 if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
134 if (master_node) {
135 mlog(ML_ERROR, "lockres in progress!\n");
136 spin_unlock(&res->spinlock);
137 return DLM_FORWARD;
138 }
139 /* ok for this to sleep if not in a network handler */
140 __dlm_wait_on_lockres(res);
141 res->state |= DLM_LOCK_RES_IN_PROGRESS;
142 }
143 spin_lock(&lock->spinlock);
144
145 if (res->state & DLM_LOCK_RES_RECOVERING) {
146 status = DLM_RECOVERING;
147 goto leave;
148 }
149
150
151 /* see above for what the spec says about
152 * LKM_CANCEL and the lock queue state */
153 if (flags & LKM_CANCEL)
154 status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
155 else
156 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
157
158 if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
159 goto leave;
160
161 /* By now this has been masked out of cancel requests. */
162 if (flags & LKM_VALBLK) {
163 /* make the final update to the lvb */
164 if (master_node)
165 memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
166 else
167 flags |= LKM_PUT_LVB; /* let the send function
168 * handle it. */
169 }
170
171 if (!master_node) {
172 owner = res->owner;
173 /* drop locks and send message */
174 if (flags & LKM_CANCEL)
175 lock->cancel_pending = 1;
176 else
177 lock->unlock_pending = 1;
178 spin_unlock(&lock->spinlock);
179 spin_unlock(&res->spinlock);
180 status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
181 flags, owner);
182 spin_lock(&res->spinlock);
183 spin_lock(&lock->spinlock);
184 /* if the master told us the lock was already granted,
185 * let the ast handle all of these actions */
186 if (status == DLM_CANCELGRANT) {
187 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
188 DLM_UNLOCK_REGRANT_LOCK|
189 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
190 } else if (status == DLM_RECOVERING ||
191 status == DLM_MIGRATING ||
192 status == DLM_FORWARD) {
193 /* must clear the actions because this unlock
194 * is about to be retried. cannot free or do
195 * any list manipulation. */
196 mlog(0, "%s:%.*s: clearing actions, %s\n",
197 dlm->name, res->lockname.len,
198 res->lockname.name,
199 status==DLM_RECOVERING?"recovering":
200 (status==DLM_MIGRATING?"migrating":
201 "forward"));
202 actions = 0;
203 }
204 if (flags & LKM_CANCEL)
205 lock->cancel_pending = 0;
206 else
207 lock->unlock_pending = 0;
208
209 }
210
211 /* get an extra ref on lock. if we are just switching
212 * lists here, we dont want the lock to go away. */
213 dlm_lock_get(lock);
214
215 if (actions & DLM_UNLOCK_REMOVE_LOCK) {
216 list_del_init(&lock->list);
217 dlm_lock_put(lock);
218 }
219 if (actions & DLM_UNLOCK_REGRANT_LOCK) {
220 dlm_lock_get(lock);
221 list_add_tail(&lock->list, &res->granted);
222 }
223 if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
224 mlog(0, "clearing convert_type at %smaster node\n",
225 master_node ? "" : "non-");
226 lock->ml.convert_type = LKM_IVMODE;
227 }
228
229 /* remove the extra ref on lock */
230 dlm_lock_put(lock);
231
232 leave:
233 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
234 if (!dlm_lock_on_list(&res->converting, lock))
235 BUG_ON(lock->ml.convert_type != LKM_IVMODE);
236 else
237 BUG_ON(lock->ml.convert_type == LKM_IVMODE);
238 spin_unlock(&lock->spinlock);
239 spin_unlock(&res->spinlock);
240 wake_up(&res->wq);
241
242 /* let the caller's final dlm_lock_put handle the actual kfree */
243 if (actions & DLM_UNLOCK_FREE_LOCK) {
244 /* this should always be coupled with list removal */
245 BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
246 mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
247 dlm_get_lock_cookie_node(lock->ml.cookie),
248 dlm_get_lock_cookie_seq(lock->ml.cookie),
249 atomic_read(&lock->lock_refs.refcount)-1);
250 dlm_lock_put(lock);
251 }
252 if (actions & DLM_UNLOCK_CALL_AST)
253 *call_ast = 1;
254
255 /* if cancel or unlock succeeded, lvb work is done */
256 if (status == DLM_NORMAL)
257 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
258
259 return status;
260 }
261
262 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
263 struct dlm_lock *lock)
264 {
265 /* leave DLM_LKSB_PUT_LVB on the lksb so any final
266 * update of the lvb will be sent to the new master */
267 list_del_init(&lock->list);
268 }
269
270 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
271 struct dlm_lock *lock)
272 {
273 list_move_tail(&lock->list, &res->granted);
274 lock->ml.convert_type = LKM_IVMODE;
275 }
276
277
278 static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
279 struct dlm_lock_resource *res,
280 struct dlm_lock *lock,
281 struct dlm_lockstatus *lksb,
282 int flags,
283 int *call_ast)
284 {
285 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
286 }
287
288 static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
289 struct dlm_lock_resource *res,
290 struct dlm_lock *lock,
291 struct dlm_lockstatus *lksb,
292 int flags, int *call_ast)
293 {
294 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
295 }
296
297 /*
298 * locking:
299 * caller needs: none
300 * taken: none
301 * held on exit: none
302 * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
303 */
304 static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
305 struct dlm_lock_resource *res,
306 struct dlm_lock *lock,
307 struct dlm_lockstatus *lksb,
308 int flags,
309 u8 owner)
310 {
311 struct dlm_unlock_lock unlock;
312 int tmpret;
313 enum dlm_status ret;
314 int status = 0;
315 struct kvec vec[2];
316 size_t veclen = 1;
317
318 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
319
320 if (owner == dlm->node_num) {
321 /* ended up trying to contact ourself. this means
322 * that the lockres had been remote but became local
323 * via a migration. just retry it, now as local */
324 mlog(0, "%s:%.*s: this node became the master due to a "
325 "migration, re-evaluate now\n", dlm->name,
326 res->lockname.len, res->lockname.name);
327 return DLM_FORWARD;
328 }
329
330 memset(&unlock, 0, sizeof(unlock));
331 unlock.node_idx = dlm->node_num;
332 unlock.flags = cpu_to_be32(flags);
333 unlock.cookie = lock->ml.cookie;
334 unlock.namelen = res->lockname.len;
335 memcpy(unlock.name, res->lockname.name, unlock.namelen);
336
337 vec[0].iov_len = sizeof(struct dlm_unlock_lock);
338 vec[0].iov_base = &unlock;
339
340 if (flags & LKM_PUT_LVB) {
341 /* extra data to send if we are updating lvb */
342 vec[1].iov_len = DLM_LVB_LEN;
343 vec[1].iov_base = lock->lksb->lvb;
344 veclen++;
345 }
346
347 tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
348 vec, veclen, owner, &status);
349 if (tmpret >= 0) {
350 // successfully sent and received
351 if (status == DLM_FORWARD)
352 mlog(0, "master was in-progress. retry\n");
353 ret = status;
354 } else {
355 mlog_errno(tmpret);
356 if (dlm_is_host_down(tmpret)) {
357 /* NOTE: this seems strange, but it is what we want.
358 * when the master goes down during a cancel or
359 * unlock, the recovery code completes the operation
360 * as if the master had not died, then passes the
361 * updated state to the recovery master. this thread
362 * just needs to finish out the operation and call
363 * the unlockast. */
364 ret = DLM_NORMAL;
365 } else {
366 /* something bad. this will BUG in ocfs2 */
367 ret = dlm_err_to_dlm_status(tmpret);
368 }
369 }
370
371 return ret;
372 }
373
374 /*
375 * locking:
376 * caller needs: none
377 * taken: takes and drops res->spinlock
378 * held on exit: none
379 * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
380 * return value from dlmunlock_master
381 */
382 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
383 {
384 struct dlm_ctxt *dlm = data;
385 struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
386 struct dlm_lock_resource *res = NULL;
387 struct list_head *iter;
388 struct dlm_lock *lock = NULL;
389 enum dlm_status status = DLM_NORMAL;
390 int found = 0, i;
391 struct dlm_lockstatus *lksb = NULL;
392 int ignore;
393 u32 flags;
394 struct list_head *queue;
395
396 flags = be32_to_cpu(unlock->flags);
397
398 if (flags & LKM_GET_LVB) {
399 mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
400 return DLM_BADARGS;
401 }
402
403 if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
404 mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL "
405 "request!\n");
406 return DLM_BADARGS;
407 }
408
409 if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
410 mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
411 return DLM_IVBUFLEN;
412 }
413
414 if (!dlm_grab(dlm))
415 return DLM_REJECTED;
416
417 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
418 "Domain %s not fully joined!\n", dlm->name);
419
420 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
421
422 res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
423 if (!res) {
424 /* We assume here that a no lock resource simply means
425 * it was migrated away and destroyed before the other
426 * node could detect it. */
427 mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
428 status = DLM_FORWARD;
429 goto not_found;
430 }
431
432 queue=&res->granted;
433 found = 0;
434 spin_lock(&res->spinlock);
435 if (res->state & DLM_LOCK_RES_RECOVERING) {
436 spin_unlock(&res->spinlock);
437 mlog(0, "returning DLM_RECOVERING\n");
438 status = DLM_RECOVERING;
439 goto leave;
440 }
441
442 if (res->state & DLM_LOCK_RES_MIGRATING) {
443 spin_unlock(&res->spinlock);
444 mlog(0, "returning DLM_MIGRATING\n");
445 status = DLM_MIGRATING;
446 goto leave;
447 }
448
449 if (res->owner != dlm->node_num) {
450 spin_unlock(&res->spinlock);
451 mlog(0, "returning DLM_FORWARD -- not master\n");
452 status = DLM_FORWARD;
453 goto leave;
454 }
455
456 for (i=0; i<3; i++) {
457 list_for_each(iter, queue) {
458 lock = list_entry(iter, struct dlm_lock, list);
459 if (lock->ml.cookie == unlock->cookie &&
460 lock->ml.node == unlock->node_idx) {
461 dlm_lock_get(lock);
462 found = 1;
463 break;
464 }
465 }
466 if (found)
467 break;
468 /* scan granted -> converting -> blocked queues */
469 queue++;
470 }
471 spin_unlock(&res->spinlock);
472 if (!found) {
473 status = DLM_IVLOCKID;
474 goto not_found;
475 }
476
477 /* lock was found on queue */
478 lksb = lock->lksb;
479 if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
480 lock->ml.type != LKM_EXMODE)
481 flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
482
483 /* unlockast only called on originating node */
484 if (flags & LKM_PUT_LVB) {
485 lksb->flags |= DLM_LKSB_PUT_LVB;
486 memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
487 }
488
489 /* if this is in-progress, propagate the DLM_FORWARD
490 * all the way back out */
491 status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
492 if (status == DLM_FORWARD)
493 mlog(0, "lockres is in progress\n");
494
495 if (flags & LKM_PUT_LVB)
496 lksb->flags &= ~DLM_LKSB_PUT_LVB;
497
498 dlm_lockres_calc_usage(dlm, res);
499 dlm_kick_thread(dlm, res);
500
501 not_found:
502 if (!found)
503 mlog(ML_ERROR, "failed to find lock to unlock! "
504 "cookie=%u:%llu\n",
505 dlm_get_lock_cookie_node(unlock->cookie),
506 dlm_get_lock_cookie_seq(unlock->cookie));
507 else
508 dlm_lock_put(lock);
509
510 leave:
511 if (res)
512 dlm_lockres_put(res);
513
514 dlm_put(dlm);
515
516 return status;
517 }
518
519
520 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
521 struct dlm_lock_resource *res,
522 struct dlm_lock *lock,
523 struct dlm_lockstatus *lksb,
524 int *actions)
525 {
526 enum dlm_status status;
527
528 if (dlm_lock_on_list(&res->blocked, lock)) {
529 /* cancel this outright */
530 status = DLM_NORMAL;
531 *actions = (DLM_UNLOCK_CALL_AST |
532 DLM_UNLOCK_REMOVE_LOCK);
533 } else if (dlm_lock_on_list(&res->converting, lock)) {
534 /* cancel the request, put back on granted */
535 status = DLM_NORMAL;
536 *actions = (DLM_UNLOCK_CALL_AST |
537 DLM_UNLOCK_REMOVE_LOCK |
538 DLM_UNLOCK_REGRANT_LOCK |
539 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
540 } else if (dlm_lock_on_list(&res->granted, lock)) {
541 /* too late, already granted. */
542 status = DLM_CANCELGRANT;
543 *actions = DLM_UNLOCK_CALL_AST;
544 } else {
545 mlog(ML_ERROR, "lock to cancel is not on any list!\n");
546 status = DLM_IVLOCKID;
547 *actions = 0;
548 }
549 return status;
550 }
551
552 static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
553 struct dlm_lock_resource *res,
554 struct dlm_lock *lock,
555 struct dlm_lockstatus *lksb,
556 int *actions)
557 {
558 enum dlm_status status;
559
560 /* unlock request */
561 if (!dlm_lock_on_list(&res->granted, lock)) {
562 status = DLM_DENIED;
563 dlm_error(status);
564 *actions = 0;
565 } else {
566 /* unlock granted lock */
567 status = DLM_NORMAL;
568 *actions = (DLM_UNLOCK_FREE_LOCK |
569 DLM_UNLOCK_CALL_AST |
570 DLM_UNLOCK_REMOVE_LOCK);
571 }
572 return status;
573 }
574
575 /* there seems to be no point in doing this async
576 * since (even for the remote case) there is really
577 * no work to queue up... so just do it and fire the
578 * unlockast by hand when done... */
579 enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
580 int flags, dlm_astunlockfunc_t *unlockast, void *data)
581 {
582 enum dlm_status status;
583 struct dlm_lock_resource *res;
584 struct dlm_lock *lock = NULL;
585 int call_ast, is_master;
586
587 mlog_entry_void();
588
589 if (!lksb) {
590 dlm_error(DLM_BADARGS);
591 return DLM_BADARGS;
592 }
593
594 if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
595 dlm_error(DLM_BADPARAM);
596 return DLM_BADPARAM;
597 }
598
599 if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
600 mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
601 flags &= ~LKM_VALBLK;
602 }
603
604 if (!lksb->lockid || !lksb->lockid->lockres) {
605 dlm_error(DLM_BADPARAM);
606 return DLM_BADPARAM;
607 }
608
609 lock = lksb->lockid;
610 BUG_ON(!lock);
611 dlm_lock_get(lock);
612
613 res = lock->lockres;
614 BUG_ON(!res);
615 dlm_lockres_get(res);
616 retry:
617 call_ast = 0;
618 /* need to retry up here because owner may have changed */
619 mlog(0, "lock=%p res=%p\n", lock, res);
620
621 spin_lock(&res->spinlock);
622 is_master = (res->owner == dlm->node_num);
623 if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
624 flags &= ~LKM_VALBLK;
625 spin_unlock(&res->spinlock);
626
627 if (is_master) {
628 status = dlmunlock_master(dlm, res, lock, lksb, flags,
629 &call_ast);
630 mlog(0, "done calling dlmunlock_master: returned %d, "
631 "call_ast is %d\n", status, call_ast);
632 } else {
633 status = dlmunlock_remote(dlm, res, lock, lksb, flags,
634 &call_ast);
635 mlog(0, "done calling dlmunlock_remote: returned %d, "
636 "call_ast is %d\n", status, call_ast);
637 }
638
639 if (status == DLM_RECOVERING ||
640 status == DLM_MIGRATING ||
641 status == DLM_FORWARD) {
642 /* We want to go away for a tiny bit to allow recovery
643 * / migration to complete on this resource. I don't
644 * know of any wait queue we could sleep on as this
645 * may be happening on another node. Perhaps the
646 * proper solution is to queue up requests on the
647 * other end? */
648
649 /* do we want to yield(); ?? */
650 msleep(50);
651
652 mlog(0, "retrying unlock due to pending recovery/"
653 "migration/in-progress\n");
654 goto retry;
655 }
656
657 if (call_ast) {
658 mlog(0, "calling unlockast(%p, %d)\n", data, status);
659 if (is_master) {
660 /* it is possible that there is one last bast
661 * pending. make sure it is flushed, then
662 * call the unlockast.
663 * not an issue if this is a mastered remotely,
664 * since this lock has been removed from the
665 * lockres queues and cannot be found. */
666 dlm_kick_thread(dlm, NULL);
667 wait_event(dlm->ast_wq,
668 dlm_lock_basts_flushed(dlm, lock));
669 }
670 (*unlockast)(data, status);
671 }
672
673 if (status == DLM_CANCELGRANT)
674 status = DLM_NORMAL;
675
676 if (status == DLM_NORMAL) {
677 mlog(0, "kicking the thread\n");
678 dlm_kick_thread(dlm, res);
679 } else
680 dlm_error(status);
681
682 dlm_lockres_calc_usage(dlm, res);
683 dlm_lockres_put(res);
684 dlm_lock_put(lock);
685
686 mlog(0, "returning status=%d!\n", status);
687 return status;
688 }
689 EXPORT_SYMBOL_GPL(dlmunlock);
690
This page took 0.045033 seconds and 5 git commands to generate.