dlm: validate messages before processing
[deliverable/linux.git] / fs / dlm / lock.c
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
5 **
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
9 **
10 *******************************************************************************
11 ******************************************************************************/
12
13 /* Central locking logic has four stages:
14
15 dlm_lock()
16 dlm_unlock()
17
18 request_lock(ls, lkb)
19 convert_lock(ls, lkb)
20 unlock_lock(ls, lkb)
21 cancel_lock(ls, lkb)
22
23 _request_lock(r, lkb)
24 _convert_lock(r, lkb)
25 _unlock_lock(r, lkb)
26 _cancel_lock(r, lkb)
27
28 do_request(r, lkb)
29 do_convert(r, lkb)
30 do_unlock(r, lkb)
31 do_cancel(r, lkb)
32
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
35
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
40
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
43
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
46
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
49
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
53
54 L: send_xxxx() -> R: receive_xxxx()
55 R: do_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
57 */
58 #include <linux/types.h>
59 #include "dlm_internal.h"
60 #include <linux/dlm_device.h>
61 #include "memory.h"
62 #include "lowcomms.h"
63 #include "requestqueue.h"
64 #include "util.h"
65 #include "dir.h"
66 #include "member.h"
67 #include "lockspace.h"
68 #include "ast.h"
69 #include "lock.h"
70 #include "rcom.h"
71 #include "recover.h"
72 #include "lvb_table.h"
73 #include "user.h"
74 #include "config.h"
75
76 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_remove(struct dlm_rsb *r);
84 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
86 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
87 struct dlm_message *ms);
88 static int receive_extralen(struct dlm_message *ms);
89 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
90 static void del_timeout(struct dlm_lkb *lkb);
91
92 /*
93 * Lock compatibilty matrix - thanks Steve
94 * UN = Unlocked state. Not really a state, used as a flag
95 * PD = Padding. Used to make the matrix a nice power of two in size
96 * Other states are the same as the VMS DLM.
97 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
98 */
99
100 static const int __dlm_compat_matrix[8][8] = {
101 /* UN NL CR CW PR PW EX PD */
102 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
103 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
104 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
105 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
106 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
107 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
108 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
109 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
110 };
111
112 /*
113 * This defines the direction of transfer of LVB data.
114 * Granted mode is the row; requested mode is the column.
115 * Usage: matrix[grmode+1][rqmode+1]
116 * 1 = LVB is returned to the caller
117 * 0 = LVB is written to the resource
118 * -1 = nothing happens to the LVB
119 */
120
121 const int dlm_lvb_operations[8][8] = {
122 /* UN NL CR CW PR PW EX PD*/
123 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
124 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
125 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
126 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
127 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
128 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
129 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
130 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
131 };
132
133 #define modes_compat(gr, rq) \
134 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
135
136 int dlm_modes_compat(int mode1, int mode2)
137 {
138 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
139 }
140
141 /*
142 * Compatibility matrix for conversions with QUECVT set.
143 * Granted mode is the row; requested mode is the column.
144 * Usage: matrix[grmode+1][rqmode+1]
145 */
146
147 static const int __quecvt_compat_matrix[8][8] = {
148 /* UN NL CR CW PR PW EX PD */
149 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
150 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
151 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
152 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
153 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
154 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
155 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
156 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
157 };
158
159 void dlm_print_lkb(struct dlm_lkb *lkb)
160 {
161 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
162 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
163 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
164 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
165 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
166 }
167
168 void dlm_print_rsb(struct dlm_rsb *r)
169 {
170 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
171 r->res_nodeid, r->res_flags, r->res_first_lkid,
172 r->res_recover_locks_count, r->res_name);
173 }
174
175 void dlm_dump_rsb(struct dlm_rsb *r)
176 {
177 struct dlm_lkb *lkb;
178
179 dlm_print_rsb(r);
180
181 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
182 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
183 printk(KERN_ERR "rsb lookup list\n");
184 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
185 dlm_print_lkb(lkb);
186 printk(KERN_ERR "rsb grant queue:\n");
187 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
188 dlm_print_lkb(lkb);
189 printk(KERN_ERR "rsb convert queue:\n");
190 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
191 dlm_print_lkb(lkb);
192 printk(KERN_ERR "rsb wait queue:\n");
193 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
194 dlm_print_lkb(lkb);
195 }
196
197 /* Threads cannot use the lockspace while it's being recovered */
198
199 static inline void dlm_lock_recovery(struct dlm_ls *ls)
200 {
201 down_read(&ls->ls_in_recovery);
202 }
203
204 void dlm_unlock_recovery(struct dlm_ls *ls)
205 {
206 up_read(&ls->ls_in_recovery);
207 }
208
209 int dlm_lock_recovery_try(struct dlm_ls *ls)
210 {
211 return down_read_trylock(&ls->ls_in_recovery);
212 }
213
214 static inline int can_be_queued(struct dlm_lkb *lkb)
215 {
216 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
217 }
218
219 static inline int force_blocking_asts(struct dlm_lkb *lkb)
220 {
221 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
222 }
223
224 static inline int is_demoted(struct dlm_lkb *lkb)
225 {
226 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
227 }
228
229 static inline int is_altmode(struct dlm_lkb *lkb)
230 {
231 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
232 }
233
234 static inline int is_granted(struct dlm_lkb *lkb)
235 {
236 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
237 }
238
239 static inline int is_remote(struct dlm_rsb *r)
240 {
241 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
242 return !!r->res_nodeid;
243 }
244
245 static inline int is_process_copy(struct dlm_lkb *lkb)
246 {
247 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
248 }
249
250 static inline int is_master_copy(struct dlm_lkb *lkb)
251 {
252 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
253 DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
254 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
255 }
256
257 static inline int middle_conversion(struct dlm_lkb *lkb)
258 {
259 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
260 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
261 return 1;
262 return 0;
263 }
264
265 static inline int down_conversion(struct dlm_lkb *lkb)
266 {
267 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
268 }
269
270 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
271 {
272 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
273 }
274
275 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
276 {
277 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
278 }
279
280 static inline int is_overlap(struct dlm_lkb *lkb)
281 {
282 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
283 DLM_IFL_OVERLAP_CANCEL));
284 }
285
286 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
287 {
288 if (is_master_copy(lkb))
289 return;
290
291 del_timeout(lkb);
292
293 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
294
295 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
296 timeout caused the cancel then return -ETIMEDOUT */
297 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
298 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
299 rv = -ETIMEDOUT;
300 }
301
302 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
303 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
304 rv = -EDEADLK;
305 }
306
307 lkb->lkb_lksb->sb_status = rv;
308 lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
309
310 dlm_add_ast(lkb, AST_COMP);
311 }
312
313 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
314 {
315 queue_cast(r, lkb,
316 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
317 }
318
319 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
320 {
321 if (is_master_copy(lkb))
322 send_bast(r, lkb, rqmode);
323 else {
324 lkb->lkb_bastmode = rqmode;
325 dlm_add_ast(lkb, AST_BAST);
326 }
327 }
328
329 /*
330 * Basic operations on rsb's and lkb's
331 */
332
333 static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
334 {
335 struct dlm_rsb *r;
336
337 r = dlm_allocate_rsb(ls, len);
338 if (!r)
339 return NULL;
340
341 r->res_ls = ls;
342 r->res_length = len;
343 memcpy(r->res_name, name, len);
344 mutex_init(&r->res_mutex);
345
346 INIT_LIST_HEAD(&r->res_lookup);
347 INIT_LIST_HEAD(&r->res_grantqueue);
348 INIT_LIST_HEAD(&r->res_convertqueue);
349 INIT_LIST_HEAD(&r->res_waitqueue);
350 INIT_LIST_HEAD(&r->res_root_list);
351 INIT_LIST_HEAD(&r->res_recover_list);
352
353 return r;
354 }
355
356 static int search_rsb_list(struct list_head *head, char *name, int len,
357 unsigned int flags, struct dlm_rsb **r_ret)
358 {
359 struct dlm_rsb *r;
360 int error = 0;
361
362 list_for_each_entry(r, head, res_hashchain) {
363 if (len == r->res_length && !memcmp(name, r->res_name, len))
364 goto found;
365 }
366 return -EBADR;
367
368 found:
369 if (r->res_nodeid && (flags & R_MASTER))
370 error = -ENOTBLK;
371 *r_ret = r;
372 return error;
373 }
374
375 static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
376 unsigned int flags, struct dlm_rsb **r_ret)
377 {
378 struct dlm_rsb *r;
379 int error;
380
381 error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
382 if (!error) {
383 kref_get(&r->res_ref);
384 goto out;
385 }
386 error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
387 if (error)
388 goto out;
389
390 list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
391
392 if (dlm_no_directory(ls))
393 goto out;
394
395 if (r->res_nodeid == -1) {
396 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
397 r->res_first_lkid = 0;
398 } else if (r->res_nodeid > 0) {
399 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
400 r->res_first_lkid = 0;
401 } else {
402 DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
403 DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
404 }
405 out:
406 *r_ret = r;
407 return error;
408 }
409
410 static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
411 unsigned int flags, struct dlm_rsb **r_ret)
412 {
413 int error;
414 write_lock(&ls->ls_rsbtbl[b].lock);
415 error = _search_rsb(ls, name, len, b, flags, r_ret);
416 write_unlock(&ls->ls_rsbtbl[b].lock);
417 return error;
418 }
419
420 /*
421 * Find rsb in rsbtbl and potentially create/add one
422 *
423 * Delaying the release of rsb's has a similar benefit to applications keeping
424 * NL locks on an rsb, but without the guarantee that the cached master value
425 * will still be valid when the rsb is reused. Apps aren't always smart enough
426 * to keep NL locks on an rsb that they may lock again shortly; this can lead
427 * to excessive master lookups and removals if we don't delay the release.
428 *
429 * Searching for an rsb means looking through both the normal list and toss
430 * list. When found on the toss list the rsb is moved to the normal list with
431 * ref count of 1; when found on normal list the ref count is incremented.
432 */
433
434 static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
435 unsigned int flags, struct dlm_rsb **r_ret)
436 {
437 struct dlm_rsb *r, *tmp;
438 uint32_t hash, bucket;
439 int error = 0;
440
441 if (dlm_no_directory(ls))
442 flags |= R_CREATE;
443
444 hash = jhash(name, namelen, 0);
445 bucket = hash & (ls->ls_rsbtbl_size - 1);
446
447 error = search_rsb(ls, name, namelen, bucket, flags, &r);
448 if (!error)
449 goto out;
450
451 if (error == -EBADR && !(flags & R_CREATE))
452 goto out;
453
454 /* the rsb was found but wasn't a master copy */
455 if (error == -ENOTBLK)
456 goto out;
457
458 error = -ENOMEM;
459 r = create_rsb(ls, name, namelen);
460 if (!r)
461 goto out;
462
463 r->res_hash = hash;
464 r->res_bucket = bucket;
465 r->res_nodeid = -1;
466 kref_init(&r->res_ref);
467
468 /* With no directory, the master can be set immediately */
469 if (dlm_no_directory(ls)) {
470 int nodeid = dlm_dir_nodeid(r);
471 if (nodeid == dlm_our_nodeid())
472 nodeid = 0;
473 r->res_nodeid = nodeid;
474 }
475
476 write_lock(&ls->ls_rsbtbl[bucket].lock);
477 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
478 if (!error) {
479 write_unlock(&ls->ls_rsbtbl[bucket].lock);
480 dlm_free_rsb(r);
481 r = tmp;
482 goto out;
483 }
484 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
485 write_unlock(&ls->ls_rsbtbl[bucket].lock);
486 error = 0;
487 out:
488 *r_ret = r;
489 return error;
490 }
491
492 int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
493 unsigned int flags, struct dlm_rsb **r_ret)
494 {
495 return find_rsb(ls, name, namelen, flags, r_ret);
496 }
497
498 /* This is only called to add a reference when the code already holds
499 a valid reference to the rsb, so there's no need for locking. */
500
501 static inline void hold_rsb(struct dlm_rsb *r)
502 {
503 kref_get(&r->res_ref);
504 }
505
506 void dlm_hold_rsb(struct dlm_rsb *r)
507 {
508 hold_rsb(r);
509 }
510
511 static void toss_rsb(struct kref *kref)
512 {
513 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
514 struct dlm_ls *ls = r->res_ls;
515
516 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
517 kref_init(&r->res_ref);
518 list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
519 r->res_toss_time = jiffies;
520 if (r->res_lvbptr) {
521 dlm_free_lvb(r->res_lvbptr);
522 r->res_lvbptr = NULL;
523 }
524 }
525
526 /* When all references to the rsb are gone it's transfered to
527 the tossed list for later disposal. */
528
529 static void put_rsb(struct dlm_rsb *r)
530 {
531 struct dlm_ls *ls = r->res_ls;
532 uint32_t bucket = r->res_bucket;
533
534 write_lock(&ls->ls_rsbtbl[bucket].lock);
535 kref_put(&r->res_ref, toss_rsb);
536 write_unlock(&ls->ls_rsbtbl[bucket].lock);
537 }
538
539 void dlm_put_rsb(struct dlm_rsb *r)
540 {
541 put_rsb(r);
542 }
543
544 /* See comment for unhold_lkb */
545
546 static void unhold_rsb(struct dlm_rsb *r)
547 {
548 int rv;
549 rv = kref_put(&r->res_ref, toss_rsb);
550 DLM_ASSERT(!rv, dlm_dump_rsb(r););
551 }
552
553 static void kill_rsb(struct kref *kref)
554 {
555 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
556
557 /* All work is done after the return from kref_put() so we
558 can release the write_lock before the remove and free. */
559
560 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
561 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
562 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
563 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
564 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
565 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
566 }
567
568 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
569 The rsb must exist as long as any lkb's for it do. */
570
571 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
572 {
573 hold_rsb(r);
574 lkb->lkb_resource = r;
575 }
576
577 static void detach_lkb(struct dlm_lkb *lkb)
578 {
579 if (lkb->lkb_resource) {
580 put_rsb(lkb->lkb_resource);
581 lkb->lkb_resource = NULL;
582 }
583 }
584
585 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
586 {
587 struct dlm_lkb *lkb, *tmp;
588 uint32_t lkid = 0;
589 uint16_t bucket;
590
591 lkb = dlm_allocate_lkb(ls);
592 if (!lkb)
593 return -ENOMEM;
594
595 lkb->lkb_nodeid = -1;
596 lkb->lkb_grmode = DLM_LOCK_IV;
597 kref_init(&lkb->lkb_ref);
598 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
599 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
600 INIT_LIST_HEAD(&lkb->lkb_time_list);
601
602 get_random_bytes(&bucket, sizeof(bucket));
603 bucket &= (ls->ls_lkbtbl_size - 1);
604
605 write_lock(&ls->ls_lkbtbl[bucket].lock);
606
607 /* counter can roll over so we must verify lkid is not in use */
608
609 while (lkid == 0) {
610 lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++;
611
612 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
613 lkb_idtbl_list) {
614 if (tmp->lkb_id != lkid)
615 continue;
616 lkid = 0;
617 break;
618 }
619 }
620
621 lkb->lkb_id = lkid;
622 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
623 write_unlock(&ls->ls_lkbtbl[bucket].lock);
624
625 *lkb_ret = lkb;
626 return 0;
627 }
628
629 static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
630 {
631 struct dlm_lkb *lkb;
632 uint16_t bucket = (lkid >> 16);
633
634 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
635 if (lkb->lkb_id == lkid)
636 return lkb;
637 }
638 return NULL;
639 }
640
641 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
642 {
643 struct dlm_lkb *lkb;
644 uint16_t bucket = (lkid >> 16);
645
646 if (bucket >= ls->ls_lkbtbl_size)
647 return -EBADSLT;
648
649 read_lock(&ls->ls_lkbtbl[bucket].lock);
650 lkb = __find_lkb(ls, lkid);
651 if (lkb)
652 kref_get(&lkb->lkb_ref);
653 read_unlock(&ls->ls_lkbtbl[bucket].lock);
654
655 *lkb_ret = lkb;
656 return lkb ? 0 : -ENOENT;
657 }
658
659 static void kill_lkb(struct kref *kref)
660 {
661 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
662
663 /* All work is done after the return from kref_put() so we
664 can release the write_lock before the detach_lkb */
665
666 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
667 }
668
669 /* __put_lkb() is used when an lkb may not have an rsb attached to
670 it so we need to provide the lockspace explicitly */
671
672 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
673 {
674 uint16_t bucket = (lkb->lkb_id >> 16);
675
676 write_lock(&ls->ls_lkbtbl[bucket].lock);
677 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
678 list_del(&lkb->lkb_idtbl_list);
679 write_unlock(&ls->ls_lkbtbl[bucket].lock);
680
681 detach_lkb(lkb);
682
683 /* for local/process lkbs, lvbptr points to caller's lksb */
684 if (lkb->lkb_lvbptr && is_master_copy(lkb))
685 dlm_free_lvb(lkb->lkb_lvbptr);
686 dlm_free_lkb(lkb);
687 return 1;
688 } else {
689 write_unlock(&ls->ls_lkbtbl[bucket].lock);
690 return 0;
691 }
692 }
693
694 int dlm_put_lkb(struct dlm_lkb *lkb)
695 {
696 struct dlm_ls *ls;
697
698 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
699 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
700
701 ls = lkb->lkb_resource->res_ls;
702 return __put_lkb(ls, lkb);
703 }
704
705 /* This is only called to add a reference when the code already holds
706 a valid reference to the lkb, so there's no need for locking. */
707
708 static inline void hold_lkb(struct dlm_lkb *lkb)
709 {
710 kref_get(&lkb->lkb_ref);
711 }
712
713 /* This is called when we need to remove a reference and are certain
714 it's not the last ref. e.g. del_lkb is always called between a
715 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
716 put_lkb would work fine, but would involve unnecessary locking */
717
718 static inline void unhold_lkb(struct dlm_lkb *lkb)
719 {
720 int rv;
721 rv = kref_put(&lkb->lkb_ref, kill_lkb);
722 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
723 }
724
725 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
726 int mode)
727 {
728 struct dlm_lkb *lkb = NULL;
729
730 list_for_each_entry(lkb, head, lkb_statequeue)
731 if (lkb->lkb_rqmode < mode)
732 break;
733
734 if (!lkb)
735 list_add_tail(new, head);
736 else
737 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
738 }
739
740 /* add/remove lkb to rsb's grant/convert/wait queue */
741
742 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
743 {
744 kref_get(&lkb->lkb_ref);
745
746 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
747
748 lkb->lkb_status = status;
749
750 switch (status) {
751 case DLM_LKSTS_WAITING:
752 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
753 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
754 else
755 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
756 break;
757 case DLM_LKSTS_GRANTED:
758 /* convention says granted locks kept in order of grmode */
759 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
760 lkb->lkb_grmode);
761 break;
762 case DLM_LKSTS_CONVERT:
763 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
764 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
765 else
766 list_add_tail(&lkb->lkb_statequeue,
767 &r->res_convertqueue);
768 break;
769 default:
770 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
771 }
772 }
773
774 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
775 {
776 lkb->lkb_status = 0;
777 list_del(&lkb->lkb_statequeue);
778 unhold_lkb(lkb);
779 }
780
781 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
782 {
783 hold_lkb(lkb);
784 del_lkb(r, lkb);
785 add_lkb(r, lkb, sts);
786 unhold_lkb(lkb);
787 }
788
789 static int msg_reply_type(int mstype)
790 {
791 switch (mstype) {
792 case DLM_MSG_REQUEST:
793 return DLM_MSG_REQUEST_REPLY;
794 case DLM_MSG_CONVERT:
795 return DLM_MSG_CONVERT_REPLY;
796 case DLM_MSG_UNLOCK:
797 return DLM_MSG_UNLOCK_REPLY;
798 case DLM_MSG_CANCEL:
799 return DLM_MSG_CANCEL_REPLY;
800 case DLM_MSG_LOOKUP:
801 return DLM_MSG_LOOKUP_REPLY;
802 }
803 return -1;
804 }
805
806 /* add/remove lkb from global waiters list of lkb's waiting for
807 a reply from a remote node */
808
809 static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
810 {
811 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
812 int error = 0;
813
814 mutex_lock(&ls->ls_waiters_mutex);
815
816 if (is_overlap_unlock(lkb) ||
817 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
818 error = -EINVAL;
819 goto out;
820 }
821
822 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
823 switch (mstype) {
824 case DLM_MSG_UNLOCK:
825 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
826 break;
827 case DLM_MSG_CANCEL:
828 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
829 break;
830 default:
831 error = -EBUSY;
832 goto out;
833 }
834 lkb->lkb_wait_count++;
835 hold_lkb(lkb);
836
837 log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
838 lkb->lkb_id, lkb->lkb_wait_type, mstype,
839 lkb->lkb_wait_count, lkb->lkb_flags);
840 goto out;
841 }
842
843 DLM_ASSERT(!lkb->lkb_wait_count,
844 dlm_print_lkb(lkb);
845 printk("wait_count %d\n", lkb->lkb_wait_count););
846
847 lkb->lkb_wait_count++;
848 lkb->lkb_wait_type = mstype;
849 hold_lkb(lkb);
850 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
851 out:
852 if (error)
853 log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
854 lkb->lkb_id, error, lkb->lkb_flags, mstype,
855 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
856 mutex_unlock(&ls->ls_waiters_mutex);
857 return error;
858 }
859
860 /* We clear the RESEND flag because we might be taking an lkb off the waiters
861 list as part of process_requestqueue (e.g. a lookup that has an optimized
862 request reply on the requestqueue) between dlm_recover_waiters_pre() which
863 set RESEND and dlm_recover_waiters_post() */
864
865 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
866 {
867 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
868 int overlap_done = 0;
869
870 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
871 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
872 overlap_done = 1;
873 goto out_del;
874 }
875
876 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
877 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
878 overlap_done = 1;
879 goto out_del;
880 }
881
882 /* N.B. type of reply may not always correspond to type of original
883 msg due to lookup->request optimization, verify others? */
884
885 if (lkb->lkb_wait_type) {
886 lkb->lkb_wait_type = 0;
887 goto out_del;
888 }
889
890 log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
891 lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
892 return -1;
893
894 out_del:
895 /* the force-unlock/cancel has completed and we haven't recvd a reply
896 to the op that was in progress prior to the unlock/cancel; we
897 give up on any reply to the earlier op. FIXME: not sure when/how
898 this would happen */
899
900 if (overlap_done && lkb->lkb_wait_type) {
901 log_error(ls, "remove_from_waiters %x reply %d give up on %d",
902 lkb->lkb_id, mstype, lkb->lkb_wait_type);
903 lkb->lkb_wait_count--;
904 lkb->lkb_wait_type = 0;
905 }
906
907 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
908
909 lkb->lkb_flags &= ~DLM_IFL_RESEND;
910 lkb->lkb_wait_count--;
911 if (!lkb->lkb_wait_count)
912 list_del_init(&lkb->lkb_wait_reply);
913 unhold_lkb(lkb);
914 return 0;
915 }
916
917 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
918 {
919 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
920 int error;
921
922 mutex_lock(&ls->ls_waiters_mutex);
923 error = _remove_from_waiters(lkb, mstype);
924 mutex_unlock(&ls->ls_waiters_mutex);
925 return error;
926 }
927
928 /* Handles situations where we might be processing a "fake" or "stub" reply in
929 which we can't try to take waiters_mutex again. */
930
931 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
932 {
933 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
934 int error;
935
936 if (ms != &ls->ls_stub_ms)
937 mutex_lock(&ls->ls_waiters_mutex);
938 error = _remove_from_waiters(lkb, ms->m_type);
939 if (ms != &ls->ls_stub_ms)
940 mutex_unlock(&ls->ls_waiters_mutex);
941 return error;
942 }
943
944 static void dir_remove(struct dlm_rsb *r)
945 {
946 int to_nodeid;
947
948 if (dlm_no_directory(r->res_ls))
949 return;
950
951 to_nodeid = dlm_dir_nodeid(r);
952 if (to_nodeid != dlm_our_nodeid())
953 send_remove(r);
954 else
955 dlm_dir_remove_entry(r->res_ls, to_nodeid,
956 r->res_name, r->res_length);
957 }
958
959 /* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
960 found since they are in order of newest to oldest? */
961
962 static int shrink_bucket(struct dlm_ls *ls, int b)
963 {
964 struct dlm_rsb *r;
965 int count = 0, found;
966
967 for (;;) {
968 found = 0;
969 write_lock(&ls->ls_rsbtbl[b].lock);
970 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
971 res_hashchain) {
972 if (!time_after_eq(jiffies, r->res_toss_time +
973 dlm_config.ci_toss_secs * HZ))
974 continue;
975 found = 1;
976 break;
977 }
978
979 if (!found) {
980 write_unlock(&ls->ls_rsbtbl[b].lock);
981 break;
982 }
983
984 if (kref_put(&r->res_ref, kill_rsb)) {
985 list_del(&r->res_hashchain);
986 write_unlock(&ls->ls_rsbtbl[b].lock);
987
988 if (is_master(r))
989 dir_remove(r);
990 dlm_free_rsb(r);
991 count++;
992 } else {
993 write_unlock(&ls->ls_rsbtbl[b].lock);
994 log_error(ls, "tossed rsb in use %s", r->res_name);
995 }
996 }
997
998 return count;
999 }
1000
1001 void dlm_scan_rsbs(struct dlm_ls *ls)
1002 {
1003 int i;
1004
1005 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1006 shrink_bucket(ls, i);
1007 if (dlm_locking_stopped(ls))
1008 break;
1009 cond_resched();
1010 }
1011 }
1012
1013 static void add_timeout(struct dlm_lkb *lkb)
1014 {
1015 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1016
1017 if (is_master_copy(lkb)) {
1018 lkb->lkb_timestamp = jiffies;
1019 return;
1020 }
1021
1022 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1023 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1024 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1025 goto add_it;
1026 }
1027 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1028 goto add_it;
1029 return;
1030
1031 add_it:
1032 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1033 mutex_lock(&ls->ls_timeout_mutex);
1034 hold_lkb(lkb);
1035 lkb->lkb_timestamp = jiffies;
1036 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1037 mutex_unlock(&ls->ls_timeout_mutex);
1038 }
1039
1040 static void del_timeout(struct dlm_lkb *lkb)
1041 {
1042 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1043
1044 mutex_lock(&ls->ls_timeout_mutex);
1045 if (!list_empty(&lkb->lkb_time_list)) {
1046 list_del_init(&lkb->lkb_time_list);
1047 unhold_lkb(lkb);
1048 }
1049 mutex_unlock(&ls->ls_timeout_mutex);
1050 }
1051
1052 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1053 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1054 and then lock rsb because of lock ordering in add_timeout. We may need
1055 to specify some special timeout-related bits in the lkb that are just to
1056 be accessed under the timeout_mutex. */
1057
1058 void dlm_scan_timeout(struct dlm_ls *ls)
1059 {
1060 struct dlm_rsb *r;
1061 struct dlm_lkb *lkb;
1062 int do_cancel, do_warn;
1063
1064 for (;;) {
1065 if (dlm_locking_stopped(ls))
1066 break;
1067
1068 do_cancel = 0;
1069 do_warn = 0;
1070 mutex_lock(&ls->ls_timeout_mutex);
1071 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1072
1073 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1074 time_after_eq(jiffies, lkb->lkb_timestamp +
1075 lkb->lkb_timeout_cs * HZ/100))
1076 do_cancel = 1;
1077
1078 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1079 time_after_eq(jiffies, lkb->lkb_timestamp +
1080 dlm_config.ci_timewarn_cs * HZ/100))
1081 do_warn = 1;
1082
1083 if (!do_cancel && !do_warn)
1084 continue;
1085 hold_lkb(lkb);
1086 break;
1087 }
1088 mutex_unlock(&ls->ls_timeout_mutex);
1089
1090 if (!do_cancel && !do_warn)
1091 break;
1092
1093 r = lkb->lkb_resource;
1094 hold_rsb(r);
1095 lock_rsb(r);
1096
1097 if (do_warn) {
1098 /* clear flag so we only warn once */
1099 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1100 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1101 del_timeout(lkb);
1102 dlm_timeout_warn(lkb);
1103 }
1104
1105 if (do_cancel) {
1106 log_debug(ls, "timeout cancel %x node %d %s",
1107 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1108 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1109 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1110 del_timeout(lkb);
1111 _cancel_lock(r, lkb);
1112 }
1113
1114 unlock_rsb(r);
1115 unhold_rsb(r);
1116 dlm_put_lkb(lkb);
1117 }
1118 }
1119
1120 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1121 dlm_recoverd before checking/setting ls_recover_begin. */
1122
1123 void dlm_adjust_timeouts(struct dlm_ls *ls)
1124 {
1125 struct dlm_lkb *lkb;
1126 long adj = jiffies - ls->ls_recover_begin;
1127
1128 ls->ls_recover_begin = 0;
1129 mutex_lock(&ls->ls_timeout_mutex);
1130 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1131 lkb->lkb_timestamp += adj;
1132 mutex_unlock(&ls->ls_timeout_mutex);
1133 }
1134
1135 /* lkb is master or local copy */
1136
1137 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1138 {
1139 int b, len = r->res_ls->ls_lvblen;
1140
1141 /* b=1 lvb returned to caller
1142 b=0 lvb written to rsb or invalidated
1143 b=-1 do nothing */
1144
1145 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1146
1147 if (b == 1) {
1148 if (!lkb->lkb_lvbptr)
1149 return;
1150
1151 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1152 return;
1153
1154 if (!r->res_lvbptr)
1155 return;
1156
1157 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1158 lkb->lkb_lvbseq = r->res_lvbseq;
1159
1160 } else if (b == 0) {
1161 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1162 rsb_set_flag(r, RSB_VALNOTVALID);
1163 return;
1164 }
1165
1166 if (!lkb->lkb_lvbptr)
1167 return;
1168
1169 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1170 return;
1171
1172 if (!r->res_lvbptr)
1173 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1174
1175 if (!r->res_lvbptr)
1176 return;
1177
1178 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1179 r->res_lvbseq++;
1180 lkb->lkb_lvbseq = r->res_lvbseq;
1181 rsb_clear_flag(r, RSB_VALNOTVALID);
1182 }
1183
1184 if (rsb_flag(r, RSB_VALNOTVALID))
1185 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1186 }
1187
1188 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1189 {
1190 if (lkb->lkb_grmode < DLM_LOCK_PW)
1191 return;
1192
1193 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1194 rsb_set_flag(r, RSB_VALNOTVALID);
1195 return;
1196 }
1197
1198 if (!lkb->lkb_lvbptr)
1199 return;
1200
1201 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1202 return;
1203
1204 if (!r->res_lvbptr)
1205 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1206
1207 if (!r->res_lvbptr)
1208 return;
1209
1210 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1211 r->res_lvbseq++;
1212 rsb_clear_flag(r, RSB_VALNOTVALID);
1213 }
1214
1215 /* lkb is process copy (pc) */
1216
1217 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1218 struct dlm_message *ms)
1219 {
1220 int b;
1221
1222 if (!lkb->lkb_lvbptr)
1223 return;
1224
1225 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1226 return;
1227
1228 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1229 if (b == 1) {
1230 int len = receive_extralen(ms);
1231 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1232 lkb->lkb_lvbseq = ms->m_lvbseq;
1233 }
1234 }
1235
1236 /* Manipulate lkb's on rsb's convert/granted/waiting queues
1237 remove_lock -- used for unlock, removes lkb from granted
1238 revert_lock -- used for cancel, moves lkb from convert to granted
1239 grant_lock -- used for request and convert, adds lkb to granted or
1240 moves lkb from convert or waiting to granted
1241
1242 Each of these is used for master or local copy lkb's. There is
1243 also a _pc() variation used to make the corresponding change on
1244 a process copy (pc) lkb. */
1245
1246 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1247 {
1248 del_lkb(r, lkb);
1249 lkb->lkb_grmode = DLM_LOCK_IV;
1250 /* this unhold undoes the original ref from create_lkb()
1251 so this leads to the lkb being freed */
1252 unhold_lkb(lkb);
1253 }
1254
1255 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1256 {
1257 set_lvb_unlock(r, lkb);
1258 _remove_lock(r, lkb);
1259 }
1260
1261 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1262 {
1263 _remove_lock(r, lkb);
1264 }
1265
1266 /* returns: 0 did nothing
1267 1 moved lock to granted
1268 -1 removed lock */
1269
1270 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1271 {
1272 int rv = 0;
1273
1274 lkb->lkb_rqmode = DLM_LOCK_IV;
1275
1276 switch (lkb->lkb_status) {
1277 case DLM_LKSTS_GRANTED:
1278 break;
1279 case DLM_LKSTS_CONVERT:
1280 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1281 rv = 1;
1282 break;
1283 case DLM_LKSTS_WAITING:
1284 del_lkb(r, lkb);
1285 lkb->lkb_grmode = DLM_LOCK_IV;
1286 /* this unhold undoes the original ref from create_lkb()
1287 so this leads to the lkb being freed */
1288 unhold_lkb(lkb);
1289 rv = -1;
1290 break;
1291 default:
1292 log_print("invalid status for revert %d", lkb->lkb_status);
1293 }
1294 return rv;
1295 }
1296
1297 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1298 {
1299 return revert_lock(r, lkb);
1300 }
1301
1302 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1303 {
1304 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1305 lkb->lkb_grmode = lkb->lkb_rqmode;
1306 if (lkb->lkb_status)
1307 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1308 else
1309 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1310 }
1311
1312 lkb->lkb_rqmode = DLM_LOCK_IV;
1313 }
1314
1315 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1316 {
1317 set_lvb_lock(r, lkb);
1318 _grant_lock(r, lkb);
1319 lkb->lkb_highbast = 0;
1320 }
1321
1322 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1323 struct dlm_message *ms)
1324 {
1325 set_lvb_lock_pc(r, lkb, ms);
1326 _grant_lock(r, lkb);
1327 }
1328
1329 /* called by grant_pending_locks() which means an async grant message must
1330 be sent to the requesting node in addition to granting the lock if the
1331 lkb belongs to a remote node. */
1332
1333 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1334 {
1335 grant_lock(r, lkb);
1336 if (is_master_copy(lkb))
1337 send_grant(r, lkb);
1338 else
1339 queue_cast(r, lkb, 0);
1340 }
1341
1342 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
1343 change the granted/requested modes. We're munging things accordingly in
1344 the process copy.
1345 CONVDEADLK: our grmode may have been forced down to NL to resolve a
1346 conversion deadlock
1347 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1348 compatible with other granted locks */
1349
1350 static void munge_demoted(struct dlm_lkb *lkb, struct dlm_message *ms)
1351 {
1352 if (ms->m_type != DLM_MSG_CONVERT_REPLY) {
1353 log_print("munge_demoted %x invalid reply type %d",
1354 lkb->lkb_id, ms->m_type);
1355 return;
1356 }
1357
1358 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1359 log_print("munge_demoted %x invalid modes gr %d rq %d",
1360 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
1361 return;
1362 }
1363
1364 lkb->lkb_grmode = DLM_LOCK_NL;
1365 }
1366
1367 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
1368 {
1369 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
1370 ms->m_type != DLM_MSG_GRANT) {
1371 log_print("munge_altmode %x invalid reply type %d",
1372 lkb->lkb_id, ms->m_type);
1373 return;
1374 }
1375
1376 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
1377 lkb->lkb_rqmode = DLM_LOCK_PR;
1378 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
1379 lkb->lkb_rqmode = DLM_LOCK_CW;
1380 else {
1381 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
1382 dlm_print_lkb(lkb);
1383 }
1384 }
1385
1386 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1387 {
1388 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1389 lkb_statequeue);
1390 if (lkb->lkb_id == first->lkb_id)
1391 return 1;
1392
1393 return 0;
1394 }
1395
1396 /* Check if the given lkb conflicts with another lkb on the queue. */
1397
1398 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1399 {
1400 struct dlm_lkb *this;
1401
1402 list_for_each_entry(this, head, lkb_statequeue) {
1403 if (this == lkb)
1404 continue;
1405 if (!modes_compat(this, lkb))
1406 return 1;
1407 }
1408 return 0;
1409 }
1410
1411 /*
1412 * "A conversion deadlock arises with a pair of lock requests in the converting
1413 * queue for one resource. The granted mode of each lock blocks the requested
1414 * mode of the other lock."
1415 *
1416 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
1417 * convert queue from being granted, then deadlk/demote lkb.
1418 *
1419 * Example:
1420 * Granted Queue: empty
1421 * Convert Queue: NL->EX (first lock)
1422 * PR->EX (second lock)
1423 *
1424 * The first lock can't be granted because of the granted mode of the second
1425 * lock and the second lock can't be granted because it's not first in the
1426 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
1427 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
1428 * flag set and return DEMOTED in the lksb flags.
1429 *
1430 * Originally, this function detected conv-deadlk in a more limited scope:
1431 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
1432 * - if lkb1 was the first entry in the queue (not just earlier), and was
1433 * blocked by the granted mode of lkb2, and there was nothing on the
1434 * granted queue preventing lkb1 from being granted immediately, i.e.
1435 * lkb2 was the only thing preventing lkb1 from being granted.
1436 *
1437 * That second condition meant we'd only say there was conv-deadlk if
1438 * resolving it (by demotion) would lead to the first lock on the convert
1439 * queue being granted right away. It allowed conversion deadlocks to exist
1440 * between locks on the convert queue while they couldn't be granted anyway.
1441 *
1442 * Now, we detect and take action on conversion deadlocks immediately when
1443 * they're created, even if they may not be immediately consequential. If
1444 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
1445 * mode that would prevent lkb1's conversion from being granted, we do a
1446 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
1447 * I think this means that the lkb_is_ahead condition below should always
1448 * be zero, i.e. there will never be conv-deadlk between two locks that are
1449 * both already on the convert queue.
1450 */
1451
1452 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
1453 {
1454 struct dlm_lkb *lkb1;
1455 int lkb_is_ahead = 0;
1456
1457 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
1458 if (lkb1 == lkb2) {
1459 lkb_is_ahead = 1;
1460 continue;
1461 }
1462
1463 if (!lkb_is_ahead) {
1464 if (!modes_compat(lkb2, lkb1))
1465 return 1;
1466 } else {
1467 if (!modes_compat(lkb2, lkb1) &&
1468 !modes_compat(lkb1, lkb2))
1469 return 1;
1470 }
1471 }
1472 return 0;
1473 }
1474
1475 /*
1476 * Return 1 if the lock can be granted, 0 otherwise.
1477 * Also detect and resolve conversion deadlocks.
1478 *
1479 * lkb is the lock to be granted
1480 *
1481 * now is 1 if the function is being called in the context of the
1482 * immediate request, it is 0 if called later, after the lock has been
1483 * queued.
1484 *
1485 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1486 */
1487
1488 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1489 {
1490 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1491
1492 /*
1493 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1494 * a new request for a NL mode lock being blocked.
1495 *
1496 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1497 * request, then it would be granted. In essence, the use of this flag
1498 * tells the Lock Manager to expedite theis request by not considering
1499 * what may be in the CONVERTING or WAITING queues... As of this
1500 * writing, the EXPEDITE flag can be used only with new requests for NL
1501 * mode locks. This flag is not valid for conversion requests.
1502 *
1503 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
1504 * conversion or used with a non-NL requested mode. We also know an
1505 * EXPEDITE request is always granted immediately, so now must always
1506 * be 1. The full condition to grant an expedite request: (now &&
1507 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1508 * therefore be shortened to just checking the flag.
1509 */
1510
1511 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1512 return 1;
1513
1514 /*
1515 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1516 * added to the remaining conditions.
1517 */
1518
1519 if (queue_conflict(&r->res_grantqueue, lkb))
1520 goto out;
1521
1522 /*
1523 * 6-3: By default, a conversion request is immediately granted if the
1524 * requested mode is compatible with the modes of all other granted
1525 * locks
1526 */
1527
1528 if (queue_conflict(&r->res_convertqueue, lkb))
1529 goto out;
1530
1531 /*
1532 * 6-5: But the default algorithm for deciding whether to grant or
1533 * queue conversion requests does not by itself guarantee that such
1534 * requests are serviced on a "first come first serve" basis. This, in
1535 * turn, can lead to a phenomenon known as "indefinate postponement".
1536 *
1537 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1538 * the system service employed to request a lock conversion. This flag
1539 * forces certain conversion requests to be queued, even if they are
1540 * compatible with the granted modes of other locks on the same
1541 * resource. Thus, the use of this flag results in conversion requests
1542 * being ordered on a "first come first servce" basis.
1543 *
1544 * DCT: This condition is all about new conversions being able to occur
1545 * "in place" while the lock remains on the granted queue (assuming
1546 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
1547 * doesn't _have_ to go onto the convert queue where it's processed in
1548 * order. The "now" variable is necessary to distinguish converts
1549 * being received and processed for the first time now, because once a
1550 * convert is moved to the conversion queue the condition below applies
1551 * requiring fifo granting.
1552 */
1553
1554 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1555 return 1;
1556
1557 /*
1558 * The NOORDER flag is set to avoid the standard vms rules on grant
1559 * order.
1560 */
1561
1562 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1563 return 1;
1564
1565 /*
1566 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1567 * granted until all other conversion requests ahead of it are granted
1568 * and/or canceled.
1569 */
1570
1571 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1572 return 1;
1573
1574 /*
1575 * 6-4: By default, a new request is immediately granted only if all
1576 * three of the following conditions are satisfied when the request is
1577 * issued:
1578 * - The queue of ungranted conversion requests for the resource is
1579 * empty.
1580 * - The queue of ungranted new requests for the resource is empty.
1581 * - The mode of the new request is compatible with the most
1582 * restrictive mode of all granted locks on the resource.
1583 */
1584
1585 if (now && !conv && list_empty(&r->res_convertqueue) &&
1586 list_empty(&r->res_waitqueue))
1587 return 1;
1588
1589 /*
1590 * 6-4: Once a lock request is in the queue of ungranted new requests,
1591 * it cannot be granted until the queue of ungranted conversion
1592 * requests is empty, all ungranted new requests ahead of it are
1593 * granted and/or canceled, and it is compatible with the granted mode
1594 * of the most restrictive lock granted on the resource.
1595 */
1596
1597 if (!now && !conv && list_empty(&r->res_convertqueue) &&
1598 first_in_list(lkb, &r->res_waitqueue))
1599 return 1;
1600 out:
1601 return 0;
1602 }
1603
1604 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
1605 int *err)
1606 {
1607 int rv;
1608 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1609 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
1610
1611 if (err)
1612 *err = 0;
1613
1614 rv = _can_be_granted(r, lkb, now);
1615 if (rv)
1616 goto out;
1617
1618 /*
1619 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
1620 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
1621 * cancels one of the locks.
1622 */
1623
1624 if (is_convert && can_be_queued(lkb) &&
1625 conversion_deadlock_detect(r, lkb)) {
1626 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
1627 lkb->lkb_grmode = DLM_LOCK_NL;
1628 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1629 } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1630 if (err)
1631 *err = -EDEADLK;
1632 else {
1633 log_print("can_be_granted deadlock %x now %d",
1634 lkb->lkb_id, now);
1635 dlm_dump_rsb(r);
1636 }
1637 }
1638 goto out;
1639 }
1640
1641 /*
1642 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
1643 * to grant a request in a mode other than the normal rqmode. It's a
1644 * simple way to provide a big optimization to applications that can
1645 * use them.
1646 */
1647
1648 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
1649 alt = DLM_LOCK_PR;
1650 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
1651 alt = DLM_LOCK_CW;
1652
1653 if (alt) {
1654 lkb->lkb_rqmode = alt;
1655 rv = _can_be_granted(r, lkb, now);
1656 if (rv)
1657 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1658 else
1659 lkb->lkb_rqmode = rqmode;
1660 }
1661 out:
1662 return rv;
1663 }
1664
1665 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
1666 for locks pending on the convert list. Once verified (watch for these
1667 log_prints), we should be able to just call _can_be_granted() and not
1668 bother with the demote/deadlk cases here (and there's no easy way to deal
1669 with a deadlk here, we'd have to generate something like grant_lock with
1670 the deadlk error.) */
1671
1672 /* Returns the highest requested mode of all blocked conversions; sets
1673 cw if there's a blocked conversion to DLM_LOCK_CW. */
1674
1675 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
1676 {
1677 struct dlm_lkb *lkb, *s;
1678 int hi, demoted, quit, grant_restart, demote_restart;
1679 int deadlk;
1680
1681 quit = 0;
1682 restart:
1683 grant_restart = 0;
1684 demote_restart = 0;
1685 hi = DLM_LOCK_IV;
1686
1687 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1688 demoted = is_demoted(lkb);
1689 deadlk = 0;
1690
1691 if (can_be_granted(r, lkb, 0, &deadlk)) {
1692 grant_lock_pending(r, lkb);
1693 grant_restart = 1;
1694 continue;
1695 }
1696
1697 if (!demoted && is_demoted(lkb)) {
1698 log_print("WARN: pending demoted %x node %d %s",
1699 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1700 demote_restart = 1;
1701 continue;
1702 }
1703
1704 if (deadlk) {
1705 log_print("WARN: pending deadlock %x node %d %s",
1706 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1707 dlm_dump_rsb(r);
1708 continue;
1709 }
1710
1711 hi = max_t(int, lkb->lkb_rqmode, hi);
1712
1713 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
1714 *cw = 1;
1715 }
1716
1717 if (grant_restart)
1718 goto restart;
1719 if (demote_restart && !quit) {
1720 quit = 1;
1721 goto restart;
1722 }
1723
1724 return max_t(int, high, hi);
1725 }
1726
1727 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
1728 {
1729 struct dlm_lkb *lkb, *s;
1730
1731 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1732 if (can_be_granted(r, lkb, 0, NULL))
1733 grant_lock_pending(r, lkb);
1734 else {
1735 high = max_t(int, lkb->lkb_rqmode, high);
1736 if (lkb->lkb_rqmode == DLM_LOCK_CW)
1737 *cw = 1;
1738 }
1739 }
1740
1741 return high;
1742 }
1743
1744 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
1745 on either the convert or waiting queue.
1746 high is the largest rqmode of all locks blocked on the convert or
1747 waiting queue. */
1748
1749 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
1750 {
1751 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
1752 if (gr->lkb_highbast < DLM_LOCK_EX)
1753 return 1;
1754 return 0;
1755 }
1756
1757 if (gr->lkb_highbast < high &&
1758 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
1759 return 1;
1760 return 0;
1761 }
1762
1763 static void grant_pending_locks(struct dlm_rsb *r)
1764 {
1765 struct dlm_lkb *lkb, *s;
1766 int high = DLM_LOCK_IV;
1767 int cw = 0;
1768
1769 DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
1770
1771 high = grant_pending_convert(r, high, &cw);
1772 high = grant_pending_wait(r, high, &cw);
1773
1774 if (high == DLM_LOCK_IV)
1775 return;
1776
1777 /*
1778 * If there are locks left on the wait/convert queue then send blocking
1779 * ASTs to granted locks based on the largest requested mode (high)
1780 * found above.
1781 */
1782
1783 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1784 if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) {
1785 if (cw && high == DLM_LOCK_PR)
1786 queue_bast(r, lkb, DLM_LOCK_CW);
1787 else
1788 queue_bast(r, lkb, high);
1789 lkb->lkb_highbast = high;
1790 }
1791 }
1792 }
1793
1794 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
1795 {
1796 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
1797 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
1798 if (gr->lkb_highbast < DLM_LOCK_EX)
1799 return 1;
1800 return 0;
1801 }
1802
1803 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
1804 return 1;
1805 return 0;
1806 }
1807
1808 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1809 struct dlm_lkb *lkb)
1810 {
1811 struct dlm_lkb *gr;
1812
1813 list_for_each_entry(gr, head, lkb_statequeue) {
1814 if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) {
1815 queue_bast(r, gr, lkb->lkb_rqmode);
1816 gr->lkb_highbast = lkb->lkb_rqmode;
1817 }
1818 }
1819 }
1820
1821 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1822 {
1823 send_bast_queue(r, &r->res_grantqueue, lkb);
1824 }
1825
1826 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1827 {
1828 send_bast_queue(r, &r->res_grantqueue, lkb);
1829 send_bast_queue(r, &r->res_convertqueue, lkb);
1830 }
1831
1832 /* set_master(r, lkb) -- set the master nodeid of a resource
1833
1834 The purpose of this function is to set the nodeid field in the given
1835 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
1836 known, it can just be copied to the lkb and the function will return
1837 0. If the rsb's nodeid is _not_ known, it needs to be looked up
1838 before it can be copied to the lkb.
1839
1840 When the rsb nodeid is being looked up remotely, the initial lkb
1841 causing the lookup is kept on the ls_waiters list waiting for the
1842 lookup reply. Other lkb's waiting for the same rsb lookup are kept
1843 on the rsb's res_lookup list until the master is verified.
1844
1845 Return values:
1846 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1847 1: the rsb master is not available and the lkb has been placed on
1848 a wait queue
1849 */
1850
1851 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1852 {
1853 struct dlm_ls *ls = r->res_ls;
1854 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1855
1856 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1857 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1858 r->res_first_lkid = lkb->lkb_id;
1859 lkb->lkb_nodeid = r->res_nodeid;
1860 return 0;
1861 }
1862
1863 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1864 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1865 return 1;
1866 }
1867
1868 if (r->res_nodeid == 0) {
1869 lkb->lkb_nodeid = 0;
1870 return 0;
1871 }
1872
1873 if (r->res_nodeid > 0) {
1874 lkb->lkb_nodeid = r->res_nodeid;
1875 return 0;
1876 }
1877
1878 DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
1879
1880 dir_nodeid = dlm_dir_nodeid(r);
1881
1882 if (dir_nodeid != our_nodeid) {
1883 r->res_first_lkid = lkb->lkb_id;
1884 send_lookup(r, lkb);
1885 return 1;
1886 }
1887
1888 for (;;) {
1889 /* It's possible for dlm_scand to remove an old rsb for
1890 this same resource from the toss list, us to create
1891 a new one, look up the master locally, and find it
1892 already exists just before dlm_scand does the
1893 dir_remove() on the previous rsb. */
1894
1895 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1896 r->res_length, &ret_nodeid);
1897 if (!error)
1898 break;
1899 log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1900 schedule();
1901 }
1902
1903 if (ret_nodeid == our_nodeid) {
1904 r->res_first_lkid = 0;
1905 r->res_nodeid = 0;
1906 lkb->lkb_nodeid = 0;
1907 } else {
1908 r->res_first_lkid = lkb->lkb_id;
1909 r->res_nodeid = ret_nodeid;
1910 lkb->lkb_nodeid = ret_nodeid;
1911 }
1912 return 0;
1913 }
1914
1915 static void process_lookup_list(struct dlm_rsb *r)
1916 {
1917 struct dlm_lkb *lkb, *safe;
1918
1919 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
1920 list_del_init(&lkb->lkb_rsb_lookup);
1921 _request_lock(r, lkb);
1922 schedule();
1923 }
1924 }
1925
1926 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
1927
1928 static void confirm_master(struct dlm_rsb *r, int error)
1929 {
1930 struct dlm_lkb *lkb;
1931
1932 if (!r->res_first_lkid)
1933 return;
1934
1935 switch (error) {
1936 case 0:
1937 case -EINPROGRESS:
1938 r->res_first_lkid = 0;
1939 process_lookup_list(r);
1940 break;
1941
1942 case -EAGAIN:
1943 case -EBADR:
1944 case -ENOTBLK:
1945 /* the remote request failed and won't be retried (it was
1946 a NOQUEUE, or has been canceled/unlocked); make a waiting
1947 lkb the first_lkid */
1948
1949 r->res_first_lkid = 0;
1950
1951 if (!list_empty(&r->res_lookup)) {
1952 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1953 lkb_rsb_lookup);
1954 list_del_init(&lkb->lkb_rsb_lookup);
1955 r->res_first_lkid = lkb->lkb_id;
1956 _request_lock(r, lkb);
1957 } else
1958 r->res_nodeid = -1;
1959 break;
1960
1961 default:
1962 log_error(r->res_ls, "confirm_master unknown error %d", error);
1963 }
1964 }
1965
1966 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1967 int namelen, unsigned long timeout_cs, void *ast,
1968 void *astarg, void *bast, struct dlm_args *args)
1969 {
1970 int rv = -EINVAL;
1971
1972 /* check for invalid arg usage */
1973
1974 if (mode < 0 || mode > DLM_LOCK_EX)
1975 goto out;
1976
1977 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1978 goto out;
1979
1980 if (flags & DLM_LKF_CANCEL)
1981 goto out;
1982
1983 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1984 goto out;
1985
1986 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1987 goto out;
1988
1989 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1990 goto out;
1991
1992 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1993 goto out;
1994
1995 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1996 goto out;
1997
1998 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1999 goto out;
2000
2001 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2002 goto out;
2003
2004 if (!ast || !lksb)
2005 goto out;
2006
2007 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2008 goto out;
2009
2010 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2011 goto out;
2012
2013 /* these args will be copied to the lkb in validate_lock_args,
2014 it cannot be done now because when converting locks, fields in
2015 an active lkb cannot be modified before locking the rsb */
2016
2017 args->flags = flags;
2018 args->astaddr = ast;
2019 args->astparam = (long) astarg;
2020 args->bastaddr = bast;
2021 args->timeout = timeout_cs;
2022 args->mode = mode;
2023 args->lksb = lksb;
2024 rv = 0;
2025 out:
2026 return rv;
2027 }
2028
2029 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2030 {
2031 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2032 DLM_LKF_FORCEUNLOCK))
2033 return -EINVAL;
2034
2035 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2036 return -EINVAL;
2037
2038 args->flags = flags;
2039 args->astparam = (long) astarg;
2040 return 0;
2041 }
2042
2043 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2044 struct dlm_args *args)
2045 {
2046 int rv = -EINVAL;
2047
2048 if (args->flags & DLM_LKF_CONVERT) {
2049 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2050 goto out;
2051
2052 if (args->flags & DLM_LKF_QUECVT &&
2053 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2054 goto out;
2055
2056 rv = -EBUSY;
2057 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2058 goto out;
2059
2060 if (lkb->lkb_wait_type)
2061 goto out;
2062
2063 if (is_overlap(lkb))
2064 goto out;
2065 }
2066
2067 lkb->lkb_exflags = args->flags;
2068 lkb->lkb_sbflags = 0;
2069 lkb->lkb_astaddr = args->astaddr;
2070 lkb->lkb_astparam = args->astparam;
2071 lkb->lkb_bastaddr = args->bastaddr;
2072 lkb->lkb_rqmode = args->mode;
2073 lkb->lkb_lksb = args->lksb;
2074 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2075 lkb->lkb_ownpid = (int) current->pid;
2076 lkb->lkb_timeout_cs = args->timeout;
2077 rv = 0;
2078 out:
2079 return rv;
2080 }
2081
2082 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2083 for success */
2084
2085 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2086 because there may be a lookup in progress and it's valid to do
2087 cancel/unlockf on it */
2088
2089 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2090 {
2091 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2092 int rv = -EINVAL;
2093
2094 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2095 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2096 dlm_print_lkb(lkb);
2097 goto out;
2098 }
2099
2100 /* an lkb may still exist even though the lock is EOL'ed due to a
2101 cancel, unlock or failed noqueue request; an app can't use these
2102 locks; return same error as if the lkid had not been found at all */
2103
2104 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2105 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2106 rv = -ENOENT;
2107 goto out;
2108 }
2109
2110 /* an lkb may be waiting for an rsb lookup to complete where the
2111 lookup was initiated by another lock */
2112
2113 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2114 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2115 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2116 list_del_init(&lkb->lkb_rsb_lookup);
2117 queue_cast(lkb->lkb_resource, lkb,
2118 args->flags & DLM_LKF_CANCEL ?
2119 -DLM_ECANCEL : -DLM_EUNLOCK);
2120 unhold_lkb(lkb); /* undoes create_lkb() */
2121 rv = -EBUSY;
2122 goto out;
2123 }
2124 }
2125
2126 /* cancel not allowed with another cancel/unlock in progress */
2127
2128 if (args->flags & DLM_LKF_CANCEL) {
2129 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2130 goto out;
2131
2132 if (is_overlap(lkb))
2133 goto out;
2134
2135 /* don't let scand try to do a cancel */
2136 del_timeout(lkb);
2137
2138 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2139 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2140 rv = -EBUSY;
2141 goto out;
2142 }
2143
2144 switch (lkb->lkb_wait_type) {
2145 case DLM_MSG_LOOKUP:
2146 case DLM_MSG_REQUEST:
2147 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2148 rv = -EBUSY;
2149 goto out;
2150 case DLM_MSG_UNLOCK:
2151 case DLM_MSG_CANCEL:
2152 goto out;
2153 }
2154 /* add_to_waiters() will set OVERLAP_CANCEL */
2155 goto out_ok;
2156 }
2157
2158 /* do we need to allow a force-unlock if there's a normal unlock
2159 already in progress? in what conditions could the normal unlock
2160 fail such that we'd want to send a force-unlock to be sure? */
2161
2162 if (args->flags & DLM_LKF_FORCEUNLOCK) {
2163 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2164 goto out;
2165
2166 if (is_overlap_unlock(lkb))
2167 goto out;
2168
2169 /* don't let scand try to do a cancel */
2170 del_timeout(lkb);
2171
2172 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2173 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2174 rv = -EBUSY;
2175 goto out;
2176 }
2177
2178 switch (lkb->lkb_wait_type) {
2179 case DLM_MSG_LOOKUP:
2180 case DLM_MSG_REQUEST:
2181 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2182 rv = -EBUSY;
2183 goto out;
2184 case DLM_MSG_UNLOCK:
2185 goto out;
2186 }
2187 /* add_to_waiters() will set OVERLAP_UNLOCK */
2188 goto out_ok;
2189 }
2190
2191 /* normal unlock not allowed if there's any op in progress */
2192 rv = -EBUSY;
2193 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2194 goto out;
2195
2196 out_ok:
2197 /* an overlapping op shouldn't blow away exflags from other op */
2198 lkb->lkb_exflags |= args->flags;
2199 lkb->lkb_sbflags = 0;
2200 lkb->lkb_astparam = args->astparam;
2201 rv = 0;
2202 out:
2203 if (rv)
2204 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
2205 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
2206 args->flags, lkb->lkb_wait_type,
2207 lkb->lkb_resource->res_name);
2208 return rv;
2209 }
2210
2211 /*
2212 * Four stage 4 varieties:
2213 * do_request(), do_convert(), do_unlock(), do_cancel()
2214 * These are called on the master node for the given lock and
2215 * from the central locking logic.
2216 */
2217
2218 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2219 {
2220 int error = 0;
2221
2222 if (can_be_granted(r, lkb, 1, NULL)) {
2223 grant_lock(r, lkb);
2224 queue_cast(r, lkb, 0);
2225 goto out;
2226 }
2227
2228 if (can_be_queued(lkb)) {
2229 error = -EINPROGRESS;
2230 add_lkb(r, lkb, DLM_LKSTS_WAITING);
2231 send_blocking_asts(r, lkb);
2232 add_timeout(lkb);
2233 goto out;
2234 }
2235
2236 error = -EAGAIN;
2237 if (force_blocking_asts(lkb))
2238 send_blocking_asts_all(r, lkb);
2239 queue_cast(r, lkb, -EAGAIN);
2240
2241 out:
2242 return error;
2243 }
2244
2245 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2246 {
2247 int error = 0;
2248 int deadlk = 0;
2249
2250 /* changing an existing lock may allow others to be granted */
2251
2252 if (can_be_granted(r, lkb, 1, &deadlk)) {
2253 grant_lock(r, lkb);
2254 queue_cast(r, lkb, 0);
2255 grant_pending_locks(r);
2256 goto out;
2257 }
2258
2259 /* can_be_granted() detected that this lock would block in a conversion
2260 deadlock, so we leave it on the granted queue and return EDEADLK in
2261 the ast for the convert. */
2262
2263 if (deadlk) {
2264 /* it's left on the granted queue */
2265 log_debug(r->res_ls, "deadlock %x node %d sts%d g%d r%d %s",
2266 lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_status,
2267 lkb->lkb_grmode, lkb->lkb_rqmode, r->res_name);
2268 revert_lock(r, lkb);
2269 queue_cast(r, lkb, -EDEADLK);
2270 error = -EDEADLK;
2271 goto out;
2272 }
2273
2274 /* is_demoted() means the can_be_granted() above set the grmode
2275 to NL, and left us on the granted queue. This auto-demotion
2276 (due to CONVDEADLK) might mean other locks, and/or this lock, are
2277 now grantable. We have to try to grant other converting locks
2278 before we try again to grant this one. */
2279
2280 if (is_demoted(lkb)) {
2281 grant_pending_convert(r, DLM_LOCK_IV, NULL);
2282 if (_can_be_granted(r, lkb, 1)) {
2283 grant_lock(r, lkb);
2284 queue_cast(r, lkb, 0);
2285 grant_pending_locks(r);
2286 goto out;
2287 }
2288 /* else fall through and move to convert queue */
2289 }
2290
2291 if (can_be_queued(lkb)) {
2292 error = -EINPROGRESS;
2293 del_lkb(r, lkb);
2294 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2295 send_blocking_asts(r, lkb);
2296 add_timeout(lkb);
2297 goto out;
2298 }
2299
2300 error = -EAGAIN;
2301 if (force_blocking_asts(lkb))
2302 send_blocking_asts_all(r, lkb);
2303 queue_cast(r, lkb, -EAGAIN);
2304
2305 out:
2306 return error;
2307 }
2308
2309 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2310 {
2311 remove_lock(r, lkb);
2312 queue_cast(r, lkb, -DLM_EUNLOCK);
2313 grant_pending_locks(r);
2314 return -DLM_EUNLOCK;
2315 }
2316
2317 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2318
2319 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2320 {
2321 int error;
2322
2323 error = revert_lock(r, lkb);
2324 if (error) {
2325 queue_cast(r, lkb, -DLM_ECANCEL);
2326 grant_pending_locks(r);
2327 return -DLM_ECANCEL;
2328 }
2329 return 0;
2330 }
2331
2332 /*
2333 * Four stage 3 varieties:
2334 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
2335 */
2336
2337 /* add a new lkb to a possibly new rsb, called by requesting process */
2338
2339 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2340 {
2341 int error;
2342
2343 /* set_master: sets lkb nodeid from r */
2344
2345 error = set_master(r, lkb);
2346 if (error < 0)
2347 goto out;
2348 if (error) {
2349 error = 0;
2350 goto out;
2351 }
2352
2353 if (is_remote(r))
2354 /* receive_request() calls do_request() on remote node */
2355 error = send_request(r, lkb);
2356 else
2357 error = do_request(r, lkb);
2358 out:
2359 return error;
2360 }
2361
2362 /* change some property of an existing lkb, e.g. mode */
2363
2364 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2365 {
2366 int error;
2367
2368 if (is_remote(r))
2369 /* receive_convert() calls do_convert() on remote node */
2370 error = send_convert(r, lkb);
2371 else
2372 error = do_convert(r, lkb);
2373
2374 return error;
2375 }
2376
2377 /* remove an existing lkb from the granted queue */
2378
2379 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2380 {
2381 int error;
2382
2383 if (is_remote(r))
2384 /* receive_unlock() calls do_unlock() on remote node */
2385 error = send_unlock(r, lkb);
2386 else
2387 error = do_unlock(r, lkb);
2388
2389 return error;
2390 }
2391
2392 /* remove an existing lkb from the convert or wait queue */
2393
2394 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2395 {
2396 int error;
2397
2398 if (is_remote(r))
2399 /* receive_cancel() calls do_cancel() on remote node */
2400 error = send_cancel(r, lkb);
2401 else
2402 error = do_cancel(r, lkb);
2403
2404 return error;
2405 }
2406
2407 /*
2408 * Four stage 2 varieties:
2409 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
2410 */
2411
2412 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
2413 int len, struct dlm_args *args)
2414 {
2415 struct dlm_rsb *r;
2416 int error;
2417
2418 error = validate_lock_args(ls, lkb, args);
2419 if (error)
2420 goto out;
2421
2422 error = find_rsb(ls, name, len, R_CREATE, &r);
2423 if (error)
2424 goto out;
2425
2426 lock_rsb(r);
2427
2428 attach_lkb(r, lkb);
2429 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
2430
2431 error = _request_lock(r, lkb);
2432
2433 unlock_rsb(r);
2434 put_rsb(r);
2435
2436 out:
2437 return error;
2438 }
2439
2440 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2441 struct dlm_args *args)
2442 {
2443 struct dlm_rsb *r;
2444 int error;
2445
2446 r = lkb->lkb_resource;
2447
2448 hold_rsb(r);
2449 lock_rsb(r);
2450
2451 error = validate_lock_args(ls, lkb, args);
2452 if (error)
2453 goto out;
2454
2455 error = _convert_lock(r, lkb);
2456 out:
2457 unlock_rsb(r);
2458 put_rsb(r);
2459 return error;
2460 }
2461
2462 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2463 struct dlm_args *args)
2464 {
2465 struct dlm_rsb *r;
2466 int error;
2467
2468 r = lkb->lkb_resource;
2469
2470 hold_rsb(r);
2471 lock_rsb(r);
2472
2473 error = validate_unlock_args(lkb, args);
2474 if (error)
2475 goto out;
2476
2477 error = _unlock_lock(r, lkb);
2478 out:
2479 unlock_rsb(r);
2480 put_rsb(r);
2481 return error;
2482 }
2483
2484 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2485 struct dlm_args *args)
2486 {
2487 struct dlm_rsb *r;
2488 int error;
2489
2490 r = lkb->lkb_resource;
2491
2492 hold_rsb(r);
2493 lock_rsb(r);
2494
2495 error = validate_unlock_args(lkb, args);
2496 if (error)
2497 goto out;
2498
2499 error = _cancel_lock(r, lkb);
2500 out:
2501 unlock_rsb(r);
2502 put_rsb(r);
2503 return error;
2504 }
2505
2506 /*
2507 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
2508 */
2509
2510 int dlm_lock(dlm_lockspace_t *lockspace,
2511 int mode,
2512 struct dlm_lksb *lksb,
2513 uint32_t flags,
2514 void *name,
2515 unsigned int namelen,
2516 uint32_t parent_lkid,
2517 void (*ast) (void *astarg),
2518 void *astarg,
2519 void (*bast) (void *astarg, int mode))
2520 {
2521 struct dlm_ls *ls;
2522 struct dlm_lkb *lkb;
2523 struct dlm_args args;
2524 int error, convert = flags & DLM_LKF_CONVERT;
2525
2526 ls = dlm_find_lockspace_local(lockspace);
2527 if (!ls)
2528 return -EINVAL;
2529
2530 dlm_lock_recovery(ls);
2531
2532 if (convert)
2533 error = find_lkb(ls, lksb->sb_lkid, &lkb);
2534 else
2535 error = create_lkb(ls, &lkb);
2536
2537 if (error)
2538 goto out;
2539
2540 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
2541 astarg, bast, &args);
2542 if (error)
2543 goto out_put;
2544
2545 if (convert)
2546 error = convert_lock(ls, lkb, &args);
2547 else
2548 error = request_lock(ls, lkb, name, namelen, &args);
2549
2550 if (error == -EINPROGRESS)
2551 error = 0;
2552 out_put:
2553 if (convert || error)
2554 __put_lkb(ls, lkb);
2555 if (error == -EAGAIN || error == -EDEADLK)
2556 error = 0;
2557 out:
2558 dlm_unlock_recovery(ls);
2559 dlm_put_lockspace(ls);
2560 return error;
2561 }
2562
2563 int dlm_unlock(dlm_lockspace_t *lockspace,
2564 uint32_t lkid,
2565 uint32_t flags,
2566 struct dlm_lksb *lksb,
2567 void *astarg)
2568 {
2569 struct dlm_ls *ls;
2570 struct dlm_lkb *lkb;
2571 struct dlm_args args;
2572 int error;
2573
2574 ls = dlm_find_lockspace_local(lockspace);
2575 if (!ls)
2576 return -EINVAL;
2577
2578 dlm_lock_recovery(ls);
2579
2580 error = find_lkb(ls, lkid, &lkb);
2581 if (error)
2582 goto out;
2583
2584 error = set_unlock_args(flags, astarg, &args);
2585 if (error)
2586 goto out_put;
2587
2588 if (flags & DLM_LKF_CANCEL)
2589 error = cancel_lock(ls, lkb, &args);
2590 else
2591 error = unlock_lock(ls, lkb, &args);
2592
2593 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2594 error = 0;
2595 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
2596 error = 0;
2597 out_put:
2598 dlm_put_lkb(lkb);
2599 out:
2600 dlm_unlock_recovery(ls);
2601 dlm_put_lockspace(ls);
2602 return error;
2603 }
2604
2605 /*
2606 * send/receive routines for remote operations and replies
2607 *
2608 * send_args
2609 * send_common
2610 * send_request receive_request
2611 * send_convert receive_convert
2612 * send_unlock receive_unlock
2613 * send_cancel receive_cancel
2614 * send_grant receive_grant
2615 * send_bast receive_bast
2616 * send_lookup receive_lookup
2617 * send_remove receive_remove
2618 *
2619 * send_common_reply
2620 * receive_request_reply send_request_reply
2621 * receive_convert_reply send_convert_reply
2622 * receive_unlock_reply send_unlock_reply
2623 * receive_cancel_reply send_cancel_reply
2624 * receive_lookup_reply send_lookup_reply
2625 */
2626
2627 static int _create_message(struct dlm_ls *ls, int mb_len,
2628 int to_nodeid, int mstype,
2629 struct dlm_message **ms_ret,
2630 struct dlm_mhandle **mh_ret)
2631 {
2632 struct dlm_message *ms;
2633 struct dlm_mhandle *mh;
2634 char *mb;
2635
2636 /* get_buffer gives us a message handle (mh) that we need to
2637 pass into lowcomms_commit and a message buffer (mb) that we
2638 write our data into */
2639
2640 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
2641 if (!mh)
2642 return -ENOBUFS;
2643
2644 memset(mb, 0, mb_len);
2645
2646 ms = (struct dlm_message *) mb;
2647
2648 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2649 ms->m_header.h_lockspace = ls->ls_global_id;
2650 ms->m_header.h_nodeid = dlm_our_nodeid();
2651 ms->m_header.h_length = mb_len;
2652 ms->m_header.h_cmd = DLM_MSG;
2653
2654 ms->m_type = mstype;
2655
2656 *mh_ret = mh;
2657 *ms_ret = ms;
2658 return 0;
2659 }
2660
2661 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2662 int to_nodeid, int mstype,
2663 struct dlm_message **ms_ret,
2664 struct dlm_mhandle **mh_ret)
2665 {
2666 int mb_len = sizeof(struct dlm_message);
2667
2668 switch (mstype) {
2669 case DLM_MSG_REQUEST:
2670 case DLM_MSG_LOOKUP:
2671 case DLM_MSG_REMOVE:
2672 mb_len += r->res_length;
2673 break;
2674 case DLM_MSG_CONVERT:
2675 case DLM_MSG_UNLOCK:
2676 case DLM_MSG_REQUEST_REPLY:
2677 case DLM_MSG_CONVERT_REPLY:
2678 case DLM_MSG_GRANT:
2679 if (lkb && lkb->lkb_lvbptr)
2680 mb_len += r->res_ls->ls_lvblen;
2681 break;
2682 }
2683
2684 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
2685 ms_ret, mh_ret);
2686 }
2687
2688 /* further lowcomms enhancements or alternate implementations may make
2689 the return value from this function useful at some point */
2690
2691 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2692 {
2693 dlm_message_out(ms);
2694 dlm_lowcomms_commit_buffer(mh);
2695 return 0;
2696 }
2697
2698 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2699 struct dlm_message *ms)
2700 {
2701 ms->m_nodeid = lkb->lkb_nodeid;
2702 ms->m_pid = lkb->lkb_ownpid;
2703 ms->m_lkid = lkb->lkb_id;
2704 ms->m_remid = lkb->lkb_remid;
2705 ms->m_exflags = lkb->lkb_exflags;
2706 ms->m_sbflags = lkb->lkb_sbflags;
2707 ms->m_flags = lkb->lkb_flags;
2708 ms->m_lvbseq = lkb->lkb_lvbseq;
2709 ms->m_status = lkb->lkb_status;
2710 ms->m_grmode = lkb->lkb_grmode;
2711 ms->m_rqmode = lkb->lkb_rqmode;
2712 ms->m_hash = r->res_hash;
2713
2714 /* m_result and m_bastmode are set from function args,
2715 not from lkb fields */
2716
2717 if (lkb->lkb_bastaddr)
2718 ms->m_asts |= AST_BAST;
2719 if (lkb->lkb_astaddr)
2720 ms->m_asts |= AST_COMP;
2721
2722 /* compare with switch in create_message; send_remove() doesn't
2723 use send_args() */
2724
2725 switch (ms->m_type) {
2726 case DLM_MSG_REQUEST:
2727 case DLM_MSG_LOOKUP:
2728 memcpy(ms->m_extra, r->res_name, r->res_length);
2729 break;
2730 case DLM_MSG_CONVERT:
2731 case DLM_MSG_UNLOCK:
2732 case DLM_MSG_REQUEST_REPLY:
2733 case DLM_MSG_CONVERT_REPLY:
2734 case DLM_MSG_GRANT:
2735 if (!lkb->lkb_lvbptr)
2736 break;
2737 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2738 break;
2739 }
2740 }
2741
2742 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2743 {
2744 struct dlm_message *ms;
2745 struct dlm_mhandle *mh;
2746 int to_nodeid, error;
2747
2748 error = add_to_waiters(lkb, mstype);
2749 if (error)
2750 return error;
2751
2752 to_nodeid = r->res_nodeid;
2753
2754 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2755 if (error)
2756 goto fail;
2757
2758 send_args(r, lkb, ms);
2759
2760 error = send_message(mh, ms);
2761 if (error)
2762 goto fail;
2763 return 0;
2764
2765 fail:
2766 remove_from_waiters(lkb, msg_reply_type(mstype));
2767 return error;
2768 }
2769
2770 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2771 {
2772 return send_common(r, lkb, DLM_MSG_REQUEST);
2773 }
2774
2775 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2776 {
2777 int error;
2778
2779 error = send_common(r, lkb, DLM_MSG_CONVERT);
2780
2781 /* down conversions go without a reply from the master */
2782 if (!error && down_conversion(lkb)) {
2783 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2784 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
2785 r->res_ls->ls_stub_ms.m_result = 0;
2786 r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
2787 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2788 }
2789
2790 return error;
2791 }
2792
2793 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
2794 MASTER_UNCERTAIN to force the next request on the rsb to confirm
2795 that the master is still correct. */
2796
2797 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2798 {
2799 return send_common(r, lkb, DLM_MSG_UNLOCK);
2800 }
2801
2802 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2803 {
2804 return send_common(r, lkb, DLM_MSG_CANCEL);
2805 }
2806
2807 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2808 {
2809 struct dlm_message *ms;
2810 struct dlm_mhandle *mh;
2811 int to_nodeid, error;
2812
2813 to_nodeid = lkb->lkb_nodeid;
2814
2815 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2816 if (error)
2817 goto out;
2818
2819 send_args(r, lkb, ms);
2820
2821 ms->m_result = 0;
2822
2823 error = send_message(mh, ms);
2824 out:
2825 return error;
2826 }
2827
2828 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2829 {
2830 struct dlm_message *ms;
2831 struct dlm_mhandle *mh;
2832 int to_nodeid, error;
2833
2834 to_nodeid = lkb->lkb_nodeid;
2835
2836 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2837 if (error)
2838 goto out;
2839
2840 send_args(r, lkb, ms);
2841
2842 ms->m_bastmode = mode;
2843
2844 error = send_message(mh, ms);
2845 out:
2846 return error;
2847 }
2848
2849 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2850 {
2851 struct dlm_message *ms;
2852 struct dlm_mhandle *mh;
2853 int to_nodeid, error;
2854
2855 error = add_to_waiters(lkb, DLM_MSG_LOOKUP);
2856 if (error)
2857 return error;
2858
2859 to_nodeid = dlm_dir_nodeid(r);
2860
2861 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2862 if (error)
2863 goto fail;
2864
2865 send_args(r, lkb, ms);
2866
2867 error = send_message(mh, ms);
2868 if (error)
2869 goto fail;
2870 return 0;
2871
2872 fail:
2873 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
2874 return error;
2875 }
2876
2877 static int send_remove(struct dlm_rsb *r)
2878 {
2879 struct dlm_message *ms;
2880 struct dlm_mhandle *mh;
2881 int to_nodeid, error;
2882
2883 to_nodeid = dlm_dir_nodeid(r);
2884
2885 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2886 if (error)
2887 goto out;
2888
2889 memcpy(ms->m_extra, r->res_name, r->res_length);
2890 ms->m_hash = r->res_hash;
2891
2892 error = send_message(mh, ms);
2893 out:
2894 return error;
2895 }
2896
2897 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2898 int mstype, int rv)
2899 {
2900 struct dlm_message *ms;
2901 struct dlm_mhandle *mh;
2902 int to_nodeid, error;
2903
2904 to_nodeid = lkb->lkb_nodeid;
2905
2906 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2907 if (error)
2908 goto out;
2909
2910 send_args(r, lkb, ms);
2911
2912 ms->m_result = rv;
2913
2914 error = send_message(mh, ms);
2915 out:
2916 return error;
2917 }
2918
2919 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2920 {
2921 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2922 }
2923
2924 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2925 {
2926 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2927 }
2928
2929 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2930 {
2931 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2932 }
2933
2934 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2935 {
2936 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2937 }
2938
2939 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2940 int ret_nodeid, int rv)
2941 {
2942 struct dlm_rsb *r = &ls->ls_stub_rsb;
2943 struct dlm_message *ms;
2944 struct dlm_mhandle *mh;
2945 int error, nodeid = ms_in->m_header.h_nodeid;
2946
2947 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2948 if (error)
2949 goto out;
2950
2951 ms->m_lkid = ms_in->m_lkid;
2952 ms->m_result = rv;
2953 ms->m_nodeid = ret_nodeid;
2954
2955 error = send_message(mh, ms);
2956 out:
2957 return error;
2958 }
2959
2960 /* which args we save from a received message depends heavily on the type
2961 of message, unlike the send side where we can safely send everything about
2962 the lkb for any type of message */
2963
2964 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2965 {
2966 lkb->lkb_exflags = ms->m_exflags;
2967 lkb->lkb_sbflags = ms->m_sbflags;
2968 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2969 (ms->m_flags & 0x0000FFFF);
2970 }
2971
2972 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2973 {
2974 lkb->lkb_sbflags = ms->m_sbflags;
2975 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2976 (ms->m_flags & 0x0000FFFF);
2977 }
2978
2979 static int receive_extralen(struct dlm_message *ms)
2980 {
2981 return (ms->m_header.h_length - sizeof(struct dlm_message));
2982 }
2983
2984 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2985 struct dlm_message *ms)
2986 {
2987 int len;
2988
2989 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2990 if (!lkb->lkb_lvbptr)
2991 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
2992 if (!lkb->lkb_lvbptr)
2993 return -ENOMEM;
2994 len = receive_extralen(ms);
2995 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2996 }
2997 return 0;
2998 }
2999
3000 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3001 struct dlm_message *ms)
3002 {
3003 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3004 lkb->lkb_ownpid = ms->m_pid;
3005 lkb->lkb_remid = ms->m_lkid;
3006 lkb->lkb_grmode = DLM_LOCK_IV;
3007 lkb->lkb_rqmode = ms->m_rqmode;
3008 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
3009 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
3010
3011 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3012 /* lkb was just created so there won't be an lvb yet */
3013 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3014 if (!lkb->lkb_lvbptr)
3015 return -ENOMEM;
3016 }
3017
3018 return 0;
3019 }
3020
3021 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3022 struct dlm_message *ms)
3023 {
3024 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3025 return -EBUSY;
3026
3027 if (receive_lvb(ls, lkb, ms))
3028 return -ENOMEM;
3029
3030 lkb->lkb_rqmode = ms->m_rqmode;
3031 lkb->lkb_lvbseq = ms->m_lvbseq;
3032
3033 return 0;
3034 }
3035
3036 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3037 struct dlm_message *ms)
3038 {
3039 if (receive_lvb(ls, lkb, ms))
3040 return -ENOMEM;
3041 return 0;
3042 }
3043
3044 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3045 uses to send a reply and that the remote end uses to process the reply. */
3046
3047 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3048 {
3049 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3050 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3051 lkb->lkb_remid = ms->m_lkid;
3052 }
3053
3054 /* This is called after the rsb is locked so that we can safely inspect
3055 fields in the lkb. */
3056
3057 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3058 {
3059 int from = ms->m_header.h_nodeid;
3060 int error = 0;
3061
3062 switch (ms->m_type) {
3063 case DLM_MSG_CONVERT:
3064 case DLM_MSG_UNLOCK:
3065 case DLM_MSG_CANCEL:
3066 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3067 error = -EINVAL;
3068 break;
3069
3070 case DLM_MSG_CONVERT_REPLY:
3071 case DLM_MSG_UNLOCK_REPLY:
3072 case DLM_MSG_CANCEL_REPLY:
3073 case DLM_MSG_GRANT:
3074 case DLM_MSG_BAST:
3075 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3076 error = -EINVAL;
3077 break;
3078
3079 case DLM_MSG_REQUEST_REPLY:
3080 if (!is_process_copy(lkb))
3081 error = -EINVAL;
3082 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3083 error = -EINVAL;
3084 break;
3085
3086 default:
3087 error = -EINVAL;
3088 }
3089
3090 if (error)
3091 log_error(lkb->lkb_resource->res_ls,
3092 "ignore invalid message %d from %d %x %x %x %d",
3093 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
3094 lkb->lkb_flags, lkb->lkb_nodeid);
3095 return error;
3096 }
3097
3098 static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
3099 {
3100 struct dlm_lkb *lkb;
3101 struct dlm_rsb *r;
3102 int error, namelen;
3103
3104 error = create_lkb(ls, &lkb);
3105 if (error)
3106 goto fail;
3107
3108 receive_flags(lkb, ms);
3109 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3110 error = receive_request_args(ls, lkb, ms);
3111 if (error) {
3112 __put_lkb(ls, lkb);
3113 goto fail;
3114 }
3115
3116 namelen = receive_extralen(ms);
3117
3118 error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
3119 if (error) {
3120 __put_lkb(ls, lkb);
3121 goto fail;
3122 }
3123
3124 lock_rsb(r);
3125
3126 attach_lkb(r, lkb);
3127 error = do_request(r, lkb);
3128 send_request_reply(r, lkb, error);
3129
3130 unlock_rsb(r);
3131 put_rsb(r);
3132
3133 if (error == -EINPROGRESS)
3134 error = 0;
3135 if (error)
3136 dlm_put_lkb(lkb);
3137 return;
3138
3139 fail:
3140 setup_stub_lkb(ls, ms);
3141 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3142 }
3143
3144 static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3145 {
3146 struct dlm_lkb *lkb;
3147 struct dlm_rsb *r;
3148 int error, reply = 1;
3149
3150 error = find_lkb(ls, ms->m_remid, &lkb);
3151 if (error)
3152 goto fail;
3153
3154 r = lkb->lkb_resource;
3155
3156 hold_rsb(r);
3157 lock_rsb(r);
3158
3159 error = validate_message(lkb, ms);
3160 if (error)
3161 goto out;
3162
3163 receive_flags(lkb, ms);
3164 error = receive_convert_args(ls, lkb, ms);
3165 if (error)
3166 goto out_reply;
3167 reply = !down_conversion(lkb);
3168
3169 error = do_convert(r, lkb);
3170 out_reply:
3171 if (reply)
3172 send_convert_reply(r, lkb, error);
3173 out:
3174 unlock_rsb(r);
3175 put_rsb(r);
3176 dlm_put_lkb(lkb);
3177 return;
3178
3179 fail:
3180 setup_stub_lkb(ls, ms);
3181 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3182 }
3183
3184 static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
3185 {
3186 struct dlm_lkb *lkb;
3187 struct dlm_rsb *r;
3188 int error;
3189
3190 error = find_lkb(ls, ms->m_remid, &lkb);
3191 if (error)
3192 goto fail;
3193
3194 r = lkb->lkb_resource;
3195
3196 hold_rsb(r);
3197 lock_rsb(r);
3198
3199 error = validate_message(lkb, ms);
3200 if (error)
3201 goto out;
3202
3203 receive_flags(lkb, ms);
3204 error = receive_unlock_args(ls, lkb, ms);
3205 if (error)
3206 goto out_reply;
3207
3208 error = do_unlock(r, lkb);
3209 out_reply:
3210 send_unlock_reply(r, lkb, error);
3211 out:
3212 unlock_rsb(r);
3213 put_rsb(r);
3214 dlm_put_lkb(lkb);
3215 return;
3216
3217 fail:
3218 setup_stub_lkb(ls, ms);
3219 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3220 }
3221
3222 static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
3223 {
3224 struct dlm_lkb *lkb;
3225 struct dlm_rsb *r;
3226 int error;
3227
3228 error = find_lkb(ls, ms->m_remid, &lkb);
3229 if (error)
3230 goto fail;
3231
3232 receive_flags(lkb, ms);
3233
3234 r = lkb->lkb_resource;
3235
3236 hold_rsb(r);
3237 lock_rsb(r);
3238
3239 error = validate_message(lkb, ms);
3240 if (error)
3241 goto out;
3242
3243 error = do_cancel(r, lkb);
3244 send_cancel_reply(r, lkb, error);
3245 out:
3246 unlock_rsb(r);
3247 put_rsb(r);
3248 dlm_put_lkb(lkb);
3249 return;
3250
3251 fail:
3252 setup_stub_lkb(ls, ms);
3253 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3254 }
3255
3256 static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
3257 {
3258 struct dlm_lkb *lkb;
3259 struct dlm_rsb *r;
3260 int error;
3261
3262 error = find_lkb(ls, ms->m_remid, &lkb);
3263 if (error) {
3264 log_debug(ls, "receive_grant from %d no lkb %x",
3265 ms->m_header.h_nodeid, ms->m_remid);
3266 return;
3267 }
3268
3269 r = lkb->lkb_resource;
3270
3271 hold_rsb(r);
3272 lock_rsb(r);
3273
3274 error = validate_message(lkb, ms);
3275 if (error)
3276 goto out;
3277
3278 receive_flags_reply(lkb, ms);
3279 if (is_altmode(lkb))
3280 munge_altmode(lkb, ms);
3281 grant_lock_pc(r, lkb, ms);
3282 queue_cast(r, lkb, 0);
3283 out:
3284 unlock_rsb(r);
3285 put_rsb(r);
3286 dlm_put_lkb(lkb);
3287 }
3288
3289 static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
3290 {
3291 struct dlm_lkb *lkb;
3292 struct dlm_rsb *r;
3293 int error;
3294
3295 error = find_lkb(ls, ms->m_remid, &lkb);
3296 if (error) {
3297 log_debug(ls, "receive_bast from %d no lkb %x",
3298 ms->m_header.h_nodeid, ms->m_remid);
3299 return;
3300 }
3301
3302 r = lkb->lkb_resource;
3303
3304 hold_rsb(r);
3305 lock_rsb(r);
3306
3307 error = validate_message(lkb, ms);
3308 if (error)
3309 goto out;
3310
3311 queue_bast(r, lkb, ms->m_bastmode);
3312 out:
3313 unlock_rsb(r);
3314 put_rsb(r);
3315 dlm_put_lkb(lkb);
3316 }
3317
3318 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
3319 {
3320 int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
3321
3322 from_nodeid = ms->m_header.h_nodeid;
3323 our_nodeid = dlm_our_nodeid();
3324
3325 len = receive_extralen(ms);
3326
3327 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3328 if (dir_nodeid != our_nodeid) {
3329 log_error(ls, "lookup dir_nodeid %d from %d",
3330 dir_nodeid, from_nodeid);
3331 error = -EINVAL;
3332 ret_nodeid = -1;
3333 goto out;
3334 }
3335
3336 error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
3337
3338 /* Optimization: we're master so treat lookup as a request */
3339 if (!error && ret_nodeid == our_nodeid) {
3340 receive_request(ls, ms);
3341 return;
3342 }
3343 out:
3344 send_lookup_reply(ls, ms, ret_nodeid, error);
3345 }
3346
3347 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
3348 {
3349 int len, dir_nodeid, from_nodeid;
3350
3351 from_nodeid = ms->m_header.h_nodeid;
3352
3353 len = receive_extralen(ms);
3354
3355 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3356 if (dir_nodeid != dlm_our_nodeid()) {
3357 log_error(ls, "remove dir entry dir_nodeid %d from %d",
3358 dir_nodeid, from_nodeid);
3359 return;
3360 }
3361
3362 dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
3363 }
3364
3365 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
3366 {
3367 do_purge(ls, ms->m_nodeid, ms->m_pid);
3368 }
3369
3370 static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
3371 {
3372 struct dlm_lkb *lkb;
3373 struct dlm_rsb *r;
3374 int error, mstype, result;
3375
3376 error = find_lkb(ls, ms->m_remid, &lkb);
3377 if (error) {
3378 log_debug(ls, "receive_request_reply from %d no lkb %x",
3379 ms->m_header.h_nodeid, ms->m_remid);
3380 return;
3381 }
3382
3383 r = lkb->lkb_resource;
3384 hold_rsb(r);
3385 lock_rsb(r);
3386
3387 error = validate_message(lkb, ms);
3388 if (error)
3389 goto out;
3390
3391 mstype = lkb->lkb_wait_type;
3392 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
3393 if (error)
3394 goto out;
3395
3396 /* Optimization: the dir node was also the master, so it took our
3397 lookup as a request and sent request reply instead of lookup reply */
3398 if (mstype == DLM_MSG_LOOKUP) {
3399 r->res_nodeid = ms->m_header.h_nodeid;
3400 lkb->lkb_nodeid = r->res_nodeid;
3401 }
3402
3403 /* this is the value returned from do_request() on the master */
3404 result = ms->m_result;
3405
3406 switch (result) {
3407 case -EAGAIN:
3408 /* request would block (be queued) on remote master */
3409 queue_cast(r, lkb, -EAGAIN);
3410 confirm_master(r, -EAGAIN);
3411 unhold_lkb(lkb); /* undoes create_lkb() */
3412 break;
3413
3414 case -EINPROGRESS:
3415 case 0:
3416 /* request was queued or granted on remote master */
3417 receive_flags_reply(lkb, ms);
3418 lkb->lkb_remid = ms->m_lkid;
3419 if (is_altmode(lkb))
3420 munge_altmode(lkb, ms);
3421 if (result) {
3422 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3423 add_timeout(lkb);
3424 } else {
3425 grant_lock_pc(r, lkb, ms);
3426 queue_cast(r, lkb, 0);
3427 }
3428 confirm_master(r, result);
3429 break;
3430
3431 case -EBADR:
3432 case -ENOTBLK:
3433 /* find_rsb failed to find rsb or rsb wasn't master */
3434 log_debug(ls, "receive_request_reply %x %x master diff %d %d",
3435 lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
3436 r->res_nodeid = -1;
3437 lkb->lkb_nodeid = -1;
3438
3439 if (is_overlap(lkb)) {
3440 /* we'll ignore error in cancel/unlock reply */
3441 queue_cast_overlap(r, lkb);
3442 confirm_master(r, result);
3443 unhold_lkb(lkb); /* undoes create_lkb() */
3444 } else
3445 _request_lock(r, lkb);
3446 break;
3447
3448 default:
3449 log_error(ls, "receive_request_reply %x error %d",
3450 lkb->lkb_id, result);
3451 }
3452
3453 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
3454 log_debug(ls, "receive_request_reply %x result %d unlock",
3455 lkb->lkb_id, result);
3456 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3457 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3458 send_unlock(r, lkb);
3459 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
3460 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
3461 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3462 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3463 send_cancel(r, lkb);
3464 } else {
3465 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3466 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3467 }
3468 out:
3469 unlock_rsb(r);
3470 put_rsb(r);
3471 dlm_put_lkb(lkb);
3472 }
3473
3474 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3475 struct dlm_message *ms)
3476 {
3477 /* this is the value returned from do_convert() on the master */
3478 switch (ms->m_result) {
3479 case -EAGAIN:
3480 /* convert would block (be queued) on remote master */
3481 queue_cast(r, lkb, -EAGAIN);
3482 break;
3483
3484 case -EDEADLK:
3485 receive_flags_reply(lkb, ms);
3486 revert_lock_pc(r, lkb);
3487 queue_cast(r, lkb, -EDEADLK);
3488 break;
3489
3490 case -EINPROGRESS:
3491 /* convert was queued on remote master */
3492 receive_flags_reply(lkb, ms);
3493 if (is_demoted(lkb))
3494 munge_demoted(lkb, ms);
3495 del_lkb(r, lkb);
3496 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3497 add_timeout(lkb);
3498 break;
3499
3500 case 0:
3501 /* convert was granted on remote master */
3502 receive_flags_reply(lkb, ms);
3503 if (is_demoted(lkb))
3504 munge_demoted(lkb, ms);
3505 grant_lock_pc(r, lkb, ms);
3506 queue_cast(r, lkb, 0);
3507 break;
3508
3509 default:
3510 log_error(r->res_ls, "receive_convert_reply %x error %d",
3511 lkb->lkb_id, ms->m_result);
3512 }
3513 }
3514
3515 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3516 {
3517 struct dlm_rsb *r = lkb->lkb_resource;
3518 int error;
3519
3520 hold_rsb(r);
3521 lock_rsb(r);
3522
3523 error = validate_message(lkb, ms);
3524 if (error)
3525 goto out;
3526
3527 /* stub reply can happen with waiters_mutex held */
3528 error = remove_from_waiters_ms(lkb, ms);
3529 if (error)
3530 goto out;
3531
3532 __receive_convert_reply(r, lkb, ms);
3533 out:
3534 unlock_rsb(r);
3535 put_rsb(r);
3536 }
3537
3538 static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
3539 {
3540 struct dlm_lkb *lkb;
3541 int error;
3542
3543 error = find_lkb(ls, ms->m_remid, &lkb);
3544 if (error) {
3545 log_debug(ls, "receive_convert_reply from %d no lkb %x",
3546 ms->m_header.h_nodeid, ms->m_remid);
3547 return;
3548 }
3549
3550 _receive_convert_reply(lkb, ms);
3551 dlm_put_lkb(lkb);
3552 }
3553
3554 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3555 {
3556 struct dlm_rsb *r = lkb->lkb_resource;
3557 int error;
3558
3559 hold_rsb(r);
3560 lock_rsb(r);
3561
3562 error = validate_message(lkb, ms);
3563 if (error)
3564 goto out;
3565
3566 /* stub reply can happen with waiters_mutex held */
3567 error = remove_from_waiters_ms(lkb, ms);
3568 if (error)
3569 goto out;
3570
3571 /* this is the value returned from do_unlock() on the master */
3572
3573 switch (ms->m_result) {
3574 case -DLM_EUNLOCK:
3575 receive_flags_reply(lkb, ms);
3576 remove_lock_pc(r, lkb);
3577 queue_cast(r, lkb, -DLM_EUNLOCK);
3578 break;
3579 case -ENOENT:
3580 break;
3581 default:
3582 log_error(r->res_ls, "receive_unlock_reply %x error %d",
3583 lkb->lkb_id, ms->m_result);
3584 }
3585 out:
3586 unlock_rsb(r);
3587 put_rsb(r);
3588 }
3589
3590 static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
3591 {
3592 struct dlm_lkb *lkb;
3593 int error;
3594
3595 error = find_lkb(ls, ms->m_remid, &lkb);
3596 if (error) {
3597 log_debug(ls, "receive_unlock_reply from %d no lkb %x",
3598 ms->m_header.h_nodeid, ms->m_remid);
3599 return;
3600 }
3601
3602 _receive_unlock_reply(lkb, ms);
3603 dlm_put_lkb(lkb);
3604 }
3605
3606 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3607 {
3608 struct dlm_rsb *r = lkb->lkb_resource;
3609 int error;
3610
3611 hold_rsb(r);
3612 lock_rsb(r);
3613
3614 error = validate_message(lkb, ms);
3615 if (error)
3616 goto out;
3617
3618 /* stub reply can happen with waiters_mutex held */
3619 error = remove_from_waiters_ms(lkb, ms);
3620 if (error)
3621 goto out;
3622
3623 /* this is the value returned from do_cancel() on the master */
3624
3625 switch (ms->m_result) {
3626 case -DLM_ECANCEL:
3627 receive_flags_reply(lkb, ms);
3628 revert_lock_pc(r, lkb);
3629 queue_cast(r, lkb, -DLM_ECANCEL);
3630 break;
3631 case 0:
3632 break;
3633 default:
3634 log_error(r->res_ls, "receive_cancel_reply %x error %d",
3635 lkb->lkb_id, ms->m_result);
3636 }
3637 out:
3638 unlock_rsb(r);
3639 put_rsb(r);
3640 }
3641
3642 static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
3643 {
3644 struct dlm_lkb *lkb;
3645 int error;
3646
3647 error = find_lkb(ls, ms->m_remid, &lkb);
3648 if (error) {
3649 log_debug(ls, "receive_cancel_reply from %d no lkb %x",
3650 ms->m_header.h_nodeid, ms->m_remid);
3651 return;
3652 }
3653
3654 _receive_cancel_reply(lkb, ms);
3655 dlm_put_lkb(lkb);
3656 }
3657
3658 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
3659 {
3660 struct dlm_lkb *lkb;
3661 struct dlm_rsb *r;
3662 int error, ret_nodeid;
3663
3664 error = find_lkb(ls, ms->m_lkid, &lkb);
3665 if (error) {
3666 log_error(ls, "receive_lookup_reply no lkb");
3667 return;
3668 }
3669
3670 /* ms->m_result is the value returned by dlm_dir_lookup on dir node
3671 FIXME: will a non-zero error ever be returned? */
3672
3673 r = lkb->lkb_resource;
3674 hold_rsb(r);
3675 lock_rsb(r);
3676
3677 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3678 if (error)
3679 goto out;
3680
3681 ret_nodeid = ms->m_nodeid;
3682 if (ret_nodeid == dlm_our_nodeid()) {
3683 r->res_nodeid = 0;
3684 ret_nodeid = 0;
3685 r->res_first_lkid = 0;
3686 } else {
3687 /* set_master() will copy res_nodeid to lkb_nodeid */
3688 r->res_nodeid = ret_nodeid;
3689 }
3690
3691 if (is_overlap(lkb)) {
3692 log_debug(ls, "receive_lookup_reply %x unlock %x",
3693 lkb->lkb_id, lkb->lkb_flags);
3694 queue_cast_overlap(r, lkb);
3695 unhold_lkb(lkb); /* undoes create_lkb() */
3696 goto out_list;
3697 }
3698
3699 _request_lock(r, lkb);
3700
3701 out_list:
3702 if (!ret_nodeid)
3703 process_lookup_list(r);
3704 out:
3705 unlock_rsb(r);
3706 put_rsb(r);
3707 dlm_put_lkb(lkb);
3708 }
3709
3710 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
3711 {
3712 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
3713 log_debug(ls, "ignore non-member message %d from %d %x %x %d",
3714 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
3715 ms->m_remid, ms->m_result);
3716 return;
3717 }
3718
3719 switch (ms->m_type) {
3720
3721 /* messages sent to a master node */
3722
3723 case DLM_MSG_REQUEST:
3724 receive_request(ls, ms);
3725 break;
3726
3727 case DLM_MSG_CONVERT:
3728 receive_convert(ls, ms);
3729 break;
3730
3731 case DLM_MSG_UNLOCK:
3732 receive_unlock(ls, ms);
3733 break;
3734
3735 case DLM_MSG_CANCEL:
3736 receive_cancel(ls, ms);
3737 break;
3738
3739 /* messages sent from a master node (replies to above) */
3740
3741 case DLM_MSG_REQUEST_REPLY:
3742 receive_request_reply(ls, ms);
3743 break;
3744
3745 case DLM_MSG_CONVERT_REPLY:
3746 receive_convert_reply(ls, ms);
3747 break;
3748
3749 case DLM_MSG_UNLOCK_REPLY:
3750 receive_unlock_reply(ls, ms);
3751 break;
3752
3753 case DLM_MSG_CANCEL_REPLY:
3754 receive_cancel_reply(ls, ms);
3755 break;
3756
3757 /* messages sent from a master node (only two types of async msg) */
3758
3759 case DLM_MSG_GRANT:
3760 receive_grant(ls, ms);
3761 break;
3762
3763 case DLM_MSG_BAST:
3764 receive_bast(ls, ms);
3765 break;
3766
3767 /* messages sent to a dir node */
3768
3769 case DLM_MSG_LOOKUP:
3770 receive_lookup(ls, ms);
3771 break;
3772
3773 case DLM_MSG_REMOVE:
3774 receive_remove(ls, ms);
3775 break;
3776
3777 /* messages sent from a dir node (remove has no reply) */
3778
3779 case DLM_MSG_LOOKUP_REPLY:
3780 receive_lookup_reply(ls, ms);
3781 break;
3782
3783 /* other messages */
3784
3785 case DLM_MSG_PURGE:
3786 receive_purge(ls, ms);
3787 break;
3788
3789 default:
3790 log_error(ls, "unknown message type %d", ms->m_type);
3791 }
3792
3793 dlm_astd_wake();
3794 }
3795
3796 /* If the lockspace is in recovery mode (locking stopped), then normal
3797 messages are saved on the requestqueue for processing after recovery is
3798 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
3799 messages off the requestqueue before we process new ones. This occurs right
3800 after recovery completes when we transition from saving all messages on
3801 requestqueue, to processing all the saved messages, to processing new
3802 messages as they arrive. */
3803
3804 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
3805 int nodeid)
3806 {
3807 if (dlm_locking_stopped(ls)) {
3808 dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms);
3809 } else {
3810 dlm_wait_requestqueue(ls);
3811 _receive_message(ls, ms);
3812 }
3813 }
3814
3815 /* This is called by dlm_recoverd to process messages that were saved on
3816 the requestqueue. */
3817
3818 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
3819 {
3820 _receive_message(ls, ms);
3821 }
3822
3823 /* This is called by the midcomms layer when something is received for
3824 the lockspace. It could be either a MSG (normal message sent as part of
3825 standard locking activity) or an RCOM (recovery message sent as part of
3826 lockspace recovery). */
3827
3828 void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
3829 {
3830 struct dlm_message *ms = (struct dlm_message *) hd;
3831 struct dlm_rcom *rc = (struct dlm_rcom *) hd;
3832 struct dlm_ls *ls;
3833 int type = 0;
3834
3835 switch (hd->h_cmd) {
3836 case DLM_MSG:
3837 dlm_message_in(ms);
3838 type = ms->m_type;
3839 break;
3840 case DLM_RCOM:
3841 dlm_rcom_in(rc);
3842 type = rc->rc_type;
3843 break;
3844 default:
3845 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
3846 return;
3847 }
3848
3849 if (hd->h_nodeid != nodeid) {
3850 log_print("invalid h_nodeid %d from %d lockspace %x",
3851 hd->h_nodeid, nodeid, hd->h_lockspace);
3852 return;
3853 }
3854
3855 ls = dlm_find_lockspace_global(hd->h_lockspace);
3856 if (!ls) {
3857 log_print("invalid h_lockspace %x from %d cmd %d type %d",
3858 hd->h_lockspace, nodeid, hd->h_cmd, type);
3859
3860 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
3861 dlm_send_ls_not_ready(nodeid, rc);
3862 return;
3863 }
3864
3865 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
3866 be inactive (in this ls) before transitioning to recovery mode */
3867
3868 down_read(&ls->ls_recv_active);
3869 if (hd->h_cmd == DLM_MSG)
3870 dlm_receive_message(ls, ms, nodeid);
3871 else
3872 dlm_receive_rcom(ls, rc, nodeid);
3873 up_read(&ls->ls_recv_active);
3874
3875 dlm_put_lockspace(ls);
3876 }
3877
3878 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3879 {
3880 if (middle_conversion(lkb)) {
3881 hold_lkb(lkb);
3882 ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3883 ls->ls_stub_ms.m_result = -EINPROGRESS;
3884 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3885 ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
3886 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3887
3888 /* Same special case as in receive_rcom_lock_args() */
3889 lkb->lkb_grmode = DLM_LOCK_IV;
3890 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3891 unhold_lkb(lkb);
3892
3893 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3894 lkb->lkb_flags |= DLM_IFL_RESEND;
3895 }
3896
3897 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3898 conversions are async; there's no reply from the remote master */
3899 }
3900
3901 /* A waiting lkb needs recovery if the master node has failed, or
3902 the master node is changing (only when no directory is used) */
3903
3904 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3905 {
3906 if (dlm_is_removed(ls, lkb->lkb_nodeid))
3907 return 1;
3908
3909 if (!dlm_no_directory(ls))
3910 return 0;
3911
3912 if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3913 return 1;
3914
3915 return 0;
3916 }
3917
3918 /* Recovery for locks that are waiting for replies from nodes that are now
3919 gone. We can just complete unlocks and cancels by faking a reply from the
3920 dead node. Requests and up-conversions we flag to be resent after
3921 recovery. Down-conversions can just be completed with a fake reply like
3922 unlocks. Conversions between PR and CW need special attention. */
3923
3924 void dlm_recover_waiters_pre(struct dlm_ls *ls)
3925 {
3926 struct dlm_lkb *lkb, *safe;
3927 int wait_type, stub_unlock_result, stub_cancel_result;
3928
3929 mutex_lock(&ls->ls_waiters_mutex);
3930
3931 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3932 log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3933 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3934
3935 /* all outstanding lookups, regardless of destination will be
3936 resent after recovery is done */
3937
3938 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3939 lkb->lkb_flags |= DLM_IFL_RESEND;
3940 continue;
3941 }
3942
3943 if (!waiter_needs_recovery(ls, lkb))
3944 continue;
3945
3946 wait_type = lkb->lkb_wait_type;
3947 stub_unlock_result = -DLM_EUNLOCK;
3948 stub_cancel_result = -DLM_ECANCEL;
3949
3950 /* Main reply may have been received leaving a zero wait_type,
3951 but a reply for the overlapping op may not have been
3952 received. In that case we need to fake the appropriate
3953 reply for the overlap op. */
3954
3955 if (!wait_type) {
3956 if (is_overlap_cancel(lkb)) {
3957 wait_type = DLM_MSG_CANCEL;
3958 if (lkb->lkb_grmode == DLM_LOCK_IV)
3959 stub_cancel_result = 0;
3960 }
3961 if (is_overlap_unlock(lkb)) {
3962 wait_type = DLM_MSG_UNLOCK;
3963 if (lkb->lkb_grmode == DLM_LOCK_IV)
3964 stub_unlock_result = -ENOENT;
3965 }
3966
3967 log_debug(ls, "rwpre overlap %x %x %d %d %d",
3968 lkb->lkb_id, lkb->lkb_flags, wait_type,
3969 stub_cancel_result, stub_unlock_result);
3970 }
3971
3972 switch (wait_type) {
3973
3974 case DLM_MSG_REQUEST:
3975 lkb->lkb_flags |= DLM_IFL_RESEND;
3976 break;
3977
3978 case DLM_MSG_CONVERT:
3979 recover_convert_waiter(ls, lkb);
3980 break;
3981
3982 case DLM_MSG_UNLOCK:
3983 hold_lkb(lkb);
3984 ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY;
3985 ls->ls_stub_ms.m_result = stub_unlock_result;
3986 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3987 ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
3988 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3989 dlm_put_lkb(lkb);
3990 break;
3991
3992 case DLM_MSG_CANCEL:
3993 hold_lkb(lkb);
3994 ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY;
3995 ls->ls_stub_ms.m_result = stub_cancel_result;
3996 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3997 ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
3998 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3999 dlm_put_lkb(lkb);
4000 break;
4001
4002 default:
4003 log_error(ls, "invalid lkb wait_type %d %d",
4004 lkb->lkb_wait_type, wait_type);
4005 }
4006 schedule();
4007 }
4008 mutex_unlock(&ls->ls_waiters_mutex);
4009 }
4010
4011 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
4012 {
4013 struct dlm_lkb *lkb;
4014 int found = 0;
4015
4016 mutex_lock(&ls->ls_waiters_mutex);
4017 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
4018 if (lkb->lkb_flags & DLM_IFL_RESEND) {
4019 hold_lkb(lkb);
4020 found = 1;
4021 break;
4022 }
4023 }
4024 mutex_unlock(&ls->ls_waiters_mutex);
4025
4026 if (!found)
4027 lkb = NULL;
4028 return lkb;
4029 }
4030
4031 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
4032 master or dir-node for r. Processing the lkb may result in it being placed
4033 back on waiters. */
4034
4035 /* We do this after normal locking has been enabled and any saved messages
4036 (in requestqueue) have been processed. We should be confident that at
4037 this point we won't get or process a reply to any of these waiting
4038 operations. But, new ops may be coming in on the rsbs/locks here from
4039 userspace or remotely. */
4040
4041 /* there may have been an overlap unlock/cancel prior to recovery or after
4042 recovery. if before, the lkb may still have a pos wait_count; if after, the
4043 overlap flag would just have been set and nothing new sent. we can be
4044 confident here than any replies to either the initial op or overlap ops
4045 prior to recovery have been received. */
4046
4047 int dlm_recover_waiters_post(struct dlm_ls *ls)
4048 {
4049 struct dlm_lkb *lkb;
4050 struct dlm_rsb *r;
4051 int error = 0, mstype, err, oc, ou;
4052
4053 while (1) {
4054 if (dlm_locking_stopped(ls)) {
4055 log_debug(ls, "recover_waiters_post aborted");
4056 error = -EINTR;
4057 break;
4058 }
4059
4060 lkb = find_resend_waiter(ls);
4061 if (!lkb)
4062 break;
4063
4064 r = lkb->lkb_resource;
4065 hold_rsb(r);
4066 lock_rsb(r);
4067
4068 mstype = lkb->lkb_wait_type;
4069 oc = is_overlap_cancel(lkb);
4070 ou = is_overlap_unlock(lkb);
4071 err = 0;
4072
4073 log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
4074 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
4075
4076 /* At this point we assume that we won't get a reply to any
4077 previous op or overlap op on this lock. First, do a big
4078 remove_from_waiters() for all previous ops. */
4079
4080 lkb->lkb_flags &= ~DLM_IFL_RESEND;
4081 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4082 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4083 lkb->lkb_wait_type = 0;
4084 lkb->lkb_wait_count = 0;
4085 mutex_lock(&ls->ls_waiters_mutex);
4086 list_del_init(&lkb->lkb_wait_reply);
4087 mutex_unlock(&ls->ls_waiters_mutex);
4088 unhold_lkb(lkb); /* for waiters list */
4089
4090 if (oc || ou) {
4091 /* do an unlock or cancel instead of resending */
4092 switch (mstype) {
4093 case DLM_MSG_LOOKUP:
4094 case DLM_MSG_REQUEST:
4095 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
4096 -DLM_ECANCEL);
4097 unhold_lkb(lkb); /* undoes create_lkb() */
4098 break;
4099 case DLM_MSG_CONVERT:
4100 if (oc) {
4101 queue_cast(r, lkb, -DLM_ECANCEL);
4102 } else {
4103 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
4104 _unlock_lock(r, lkb);
4105 }
4106 break;
4107 default:
4108 err = 1;
4109 }
4110 } else {
4111 switch (mstype) {
4112 case DLM_MSG_LOOKUP:
4113 case DLM_MSG_REQUEST:
4114 _request_lock(r, lkb);
4115 if (is_master(r))
4116 confirm_master(r, 0);
4117 break;
4118 case DLM_MSG_CONVERT:
4119 _convert_lock(r, lkb);
4120 break;
4121 default:
4122 err = 1;
4123 }
4124 }
4125
4126 if (err)
4127 log_error(ls, "recover_waiters_post %x %d %x %d %d",
4128 lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
4129 unlock_rsb(r);
4130 put_rsb(r);
4131 dlm_put_lkb(lkb);
4132 }
4133
4134 return error;
4135 }
4136
4137 static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
4138 int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
4139 {
4140 struct dlm_ls *ls = r->res_ls;
4141 struct dlm_lkb *lkb, *safe;
4142
4143 list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
4144 if (test(ls, lkb)) {
4145 rsb_set_flag(r, RSB_LOCKS_PURGED);
4146 del_lkb(r, lkb);
4147 /* this put should free the lkb */
4148 if (!dlm_put_lkb(lkb))
4149 log_error(ls, "purged lkb not released");
4150 }
4151 }
4152 }
4153
4154 static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4155 {
4156 return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
4157 }
4158
4159 static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4160 {
4161 return is_master_copy(lkb);
4162 }
4163
4164 static void purge_dead_locks(struct dlm_rsb *r)
4165 {
4166 purge_queue(r, &r->res_grantqueue, &purge_dead_test);
4167 purge_queue(r, &r->res_convertqueue, &purge_dead_test);
4168 purge_queue(r, &r->res_waitqueue, &purge_dead_test);
4169 }
4170
4171 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
4172 {
4173 purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
4174 purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
4175 purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
4176 }
4177
4178 /* Get rid of locks held by nodes that are gone. */
4179
4180 int dlm_purge_locks(struct dlm_ls *ls)
4181 {
4182 struct dlm_rsb *r;
4183
4184 log_debug(ls, "dlm_purge_locks");
4185
4186 down_write(&ls->ls_root_sem);
4187 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
4188 hold_rsb(r);
4189 lock_rsb(r);
4190 if (is_master(r))
4191 purge_dead_locks(r);
4192 unlock_rsb(r);
4193 unhold_rsb(r);
4194
4195 schedule();
4196 }
4197 up_write(&ls->ls_root_sem);
4198
4199 return 0;
4200 }
4201
4202 static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
4203 {
4204 struct dlm_rsb *r, *r_ret = NULL;
4205
4206 read_lock(&ls->ls_rsbtbl[bucket].lock);
4207 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
4208 if (!rsb_flag(r, RSB_LOCKS_PURGED))
4209 continue;
4210 hold_rsb(r);
4211 rsb_clear_flag(r, RSB_LOCKS_PURGED);
4212 r_ret = r;
4213 break;
4214 }
4215 read_unlock(&ls->ls_rsbtbl[bucket].lock);
4216 return r_ret;
4217 }
4218
4219 void dlm_grant_after_purge(struct dlm_ls *ls)
4220 {
4221 struct dlm_rsb *r;
4222 int bucket = 0;
4223
4224 while (1) {
4225 r = find_purged_rsb(ls, bucket);
4226 if (!r) {
4227 if (bucket == ls->ls_rsbtbl_size - 1)
4228 break;
4229 bucket++;
4230 continue;
4231 }
4232 lock_rsb(r);
4233 if (is_master(r)) {
4234 grant_pending_locks(r);
4235 confirm_master(r, 0);
4236 }
4237 unlock_rsb(r);
4238 put_rsb(r);
4239 schedule();
4240 }
4241 }
4242
4243 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
4244 uint32_t remid)
4245 {
4246 struct dlm_lkb *lkb;
4247
4248 list_for_each_entry(lkb, head, lkb_statequeue) {
4249 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
4250 return lkb;
4251 }
4252 return NULL;
4253 }
4254
4255 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
4256 uint32_t remid)
4257 {
4258 struct dlm_lkb *lkb;
4259
4260 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
4261 if (lkb)
4262 return lkb;
4263 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
4264 if (lkb)
4265 return lkb;
4266 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
4267 if (lkb)
4268 return lkb;
4269 return NULL;
4270 }
4271
4272 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4273 struct dlm_rsb *r, struct dlm_rcom *rc)
4274 {
4275 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4276 int lvblen;
4277
4278 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
4279 lkb->lkb_ownpid = rl->rl_ownpid;
4280 lkb->lkb_remid = rl->rl_lkid;
4281 lkb->lkb_exflags = rl->rl_exflags;
4282 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
4283 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4284 lkb->lkb_lvbseq = rl->rl_lvbseq;
4285 lkb->lkb_rqmode = rl->rl_rqmode;
4286 lkb->lkb_grmode = rl->rl_grmode;
4287 /* don't set lkb_status because add_lkb wants to itself */
4288
4289 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
4290 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
4291
4292 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
4293 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
4294 if (!lkb->lkb_lvbptr)
4295 return -ENOMEM;
4296 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4297 sizeof(struct rcom_lock);
4298 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
4299 }
4300
4301 /* Conversions between PR and CW (middle modes) need special handling.
4302 The real granted mode of these converting locks cannot be determined
4303 until all locks have been rebuilt on the rsb (recover_conversion) */
4304
4305 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
4306 rl->rl_status = DLM_LKSTS_CONVERT;
4307 lkb->lkb_grmode = DLM_LOCK_IV;
4308 rsb_set_flag(r, RSB_RECOVER_CONVERT);
4309 }
4310
4311 return 0;
4312 }
4313
4314 /* This lkb may have been recovered in a previous aborted recovery so we need
4315 to check if the rsb already has an lkb with the given remote nodeid/lkid.
4316 If so we just send back a standard reply. If not, we create a new lkb with
4317 the given values and send back our lkid. We send back our lkid by sending
4318 back the rcom_lock struct we got but with the remid field filled in. */
4319
4320 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4321 {
4322 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4323 struct dlm_rsb *r;
4324 struct dlm_lkb *lkb;
4325 int error;
4326
4327 if (rl->rl_parent_lkid) {
4328 error = -EOPNOTSUPP;
4329 goto out;
4330 }
4331
4332 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
4333 if (error)
4334 goto out;
4335
4336 lock_rsb(r);
4337
4338 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
4339 if (lkb) {
4340 error = -EEXIST;
4341 goto out_remid;
4342 }
4343
4344 error = create_lkb(ls, &lkb);
4345 if (error)
4346 goto out_unlock;
4347
4348 error = receive_rcom_lock_args(ls, lkb, r, rc);
4349 if (error) {
4350 __put_lkb(ls, lkb);
4351 goto out_unlock;
4352 }
4353
4354 attach_lkb(r, lkb);
4355 add_lkb(r, lkb, rl->rl_status);
4356 error = 0;
4357
4358 out_remid:
4359 /* this is the new value returned to the lock holder for
4360 saving in its process-copy lkb */
4361 rl->rl_remid = lkb->lkb_id;
4362
4363 out_unlock:
4364 unlock_rsb(r);
4365 put_rsb(r);
4366 out:
4367 if (error)
4368 log_debug(ls, "recover_master_copy %d %x", error, rl->rl_lkid);
4369 rl->rl_result = error;
4370 return error;
4371 }
4372
4373 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4374 {
4375 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4376 struct dlm_rsb *r;
4377 struct dlm_lkb *lkb;
4378 int error;
4379
4380 error = find_lkb(ls, rl->rl_lkid, &lkb);
4381 if (error) {
4382 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
4383 return error;
4384 }
4385
4386 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
4387
4388 error = rl->rl_result;
4389
4390 r = lkb->lkb_resource;
4391 hold_rsb(r);
4392 lock_rsb(r);
4393
4394 switch (error) {
4395 case -EBADR:
4396 /* There's a chance the new master received our lock before
4397 dlm_recover_master_reply(), this wouldn't happen if we did
4398 a barrier between recover_masters and recover_locks. */
4399 log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
4400 (unsigned long)r, r->res_name);
4401 dlm_send_rcom_lock(r, lkb);
4402 goto out;
4403 case -EEXIST:
4404 log_debug(ls, "master copy exists %x", lkb->lkb_id);
4405 /* fall through */
4406 case 0:
4407 lkb->lkb_remid = rl->rl_remid;
4408 break;
4409 default:
4410 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
4411 error, lkb->lkb_id);
4412 }
4413
4414 /* an ack for dlm_recover_locks() which waits for replies from
4415 all the locks it sends to new masters */
4416 dlm_recovered_lock(r);
4417 out:
4418 unlock_rsb(r);
4419 put_rsb(r);
4420 dlm_put_lkb(lkb);
4421
4422 return 0;
4423 }
4424
4425 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
4426 int mode, uint32_t flags, void *name, unsigned int namelen,
4427 unsigned long timeout_cs)
4428 {
4429 struct dlm_lkb *lkb;
4430 struct dlm_args args;
4431 int error;
4432
4433 dlm_lock_recovery(ls);
4434
4435 error = create_lkb(ls, &lkb);
4436 if (error) {
4437 kfree(ua);
4438 goto out;
4439 }
4440
4441 if (flags & DLM_LKF_VALBLK) {
4442 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
4443 if (!ua->lksb.sb_lvbptr) {
4444 kfree(ua);
4445 __put_lkb(ls, lkb);
4446 error = -ENOMEM;
4447 goto out;
4448 }
4449 }
4450
4451 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
4452 When DLM_IFL_USER is set, the dlm knows that this is a userspace
4453 lock and that lkb_astparam is the dlm_user_args structure. */
4454
4455 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
4456 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
4457 lkb->lkb_flags |= DLM_IFL_USER;
4458 ua->old_mode = DLM_LOCK_IV;
4459
4460 if (error) {
4461 __put_lkb(ls, lkb);
4462 goto out;
4463 }
4464
4465 error = request_lock(ls, lkb, name, namelen, &args);
4466
4467 switch (error) {
4468 case 0:
4469 break;
4470 case -EINPROGRESS:
4471 error = 0;
4472 break;
4473 case -EAGAIN:
4474 error = 0;
4475 /* fall through */
4476 default:
4477 __put_lkb(ls, lkb);
4478 goto out;
4479 }
4480
4481 /* add this new lkb to the per-process list of locks */
4482 spin_lock(&ua->proc->locks_spin);
4483 hold_lkb(lkb);
4484 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
4485 spin_unlock(&ua->proc->locks_spin);
4486 out:
4487 dlm_unlock_recovery(ls);
4488 return error;
4489 }
4490
4491 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4492 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
4493 unsigned long timeout_cs)
4494 {
4495 struct dlm_lkb *lkb;
4496 struct dlm_args args;
4497 struct dlm_user_args *ua;
4498 int error;
4499
4500 dlm_lock_recovery(ls);
4501
4502 error = find_lkb(ls, lkid, &lkb);
4503 if (error)
4504 goto out;
4505
4506 /* user can change the params on its lock when it converts it, or
4507 add an lvb that didn't exist before */
4508
4509 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4510
4511 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
4512 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
4513 if (!ua->lksb.sb_lvbptr) {
4514 error = -ENOMEM;
4515 goto out_put;
4516 }
4517 }
4518 if (lvb_in && ua->lksb.sb_lvbptr)
4519 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4520
4521 ua->xid = ua_tmp->xid;
4522 ua->castparam = ua_tmp->castparam;
4523 ua->castaddr = ua_tmp->castaddr;
4524 ua->bastparam = ua_tmp->bastparam;
4525 ua->bastaddr = ua_tmp->bastaddr;
4526 ua->user_lksb = ua_tmp->user_lksb;
4527 ua->old_mode = lkb->lkb_grmode;
4528
4529 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
4530 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
4531 if (error)
4532 goto out_put;
4533
4534 error = convert_lock(ls, lkb, &args);
4535
4536 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
4537 error = 0;
4538 out_put:
4539 dlm_put_lkb(lkb);
4540 out:
4541 dlm_unlock_recovery(ls);
4542 kfree(ua_tmp);
4543 return error;
4544 }
4545
4546 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4547 uint32_t flags, uint32_t lkid, char *lvb_in)
4548 {
4549 struct dlm_lkb *lkb;
4550 struct dlm_args args;
4551 struct dlm_user_args *ua;
4552 int error;
4553
4554 dlm_lock_recovery(ls);
4555
4556 error = find_lkb(ls, lkid, &lkb);
4557 if (error)
4558 goto out;
4559
4560 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4561
4562 if (lvb_in && ua->lksb.sb_lvbptr)
4563 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4564 if (ua_tmp->castparam)
4565 ua->castparam = ua_tmp->castparam;
4566 ua->user_lksb = ua_tmp->user_lksb;
4567
4568 error = set_unlock_args(flags, ua, &args);
4569 if (error)
4570 goto out_put;
4571
4572 error = unlock_lock(ls, lkb, &args);
4573
4574 if (error == -DLM_EUNLOCK)
4575 error = 0;
4576 /* from validate_unlock_args() */
4577 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
4578 error = 0;
4579 if (error)
4580 goto out_put;
4581
4582 spin_lock(&ua->proc->locks_spin);
4583 /* dlm_user_add_ast() may have already taken lkb off the proc list */
4584 if (!list_empty(&lkb->lkb_ownqueue))
4585 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
4586 spin_unlock(&ua->proc->locks_spin);
4587 out_put:
4588 dlm_put_lkb(lkb);
4589 out:
4590 dlm_unlock_recovery(ls);
4591 kfree(ua_tmp);
4592 return error;
4593 }
4594
4595 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4596 uint32_t flags, uint32_t lkid)
4597 {
4598 struct dlm_lkb *lkb;
4599 struct dlm_args args;
4600 struct dlm_user_args *ua;
4601 int error;
4602
4603 dlm_lock_recovery(ls);
4604
4605 error = find_lkb(ls, lkid, &lkb);
4606 if (error)
4607 goto out;
4608
4609 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4610 if (ua_tmp->castparam)
4611 ua->castparam = ua_tmp->castparam;
4612 ua->user_lksb = ua_tmp->user_lksb;
4613
4614 error = set_unlock_args(flags, ua, &args);
4615 if (error)
4616 goto out_put;
4617
4618 error = cancel_lock(ls, lkb, &args);
4619
4620 if (error == -DLM_ECANCEL)
4621 error = 0;
4622 /* from validate_unlock_args() */
4623 if (error == -EBUSY)
4624 error = 0;
4625 out_put:
4626 dlm_put_lkb(lkb);
4627 out:
4628 dlm_unlock_recovery(ls);
4629 kfree(ua_tmp);
4630 return error;
4631 }
4632
4633 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4634 {
4635 struct dlm_lkb *lkb;
4636 struct dlm_args args;
4637 struct dlm_user_args *ua;
4638 struct dlm_rsb *r;
4639 int error;
4640
4641 dlm_lock_recovery(ls);
4642
4643 error = find_lkb(ls, lkid, &lkb);
4644 if (error)
4645 goto out;
4646
4647 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4648
4649 error = set_unlock_args(flags, ua, &args);
4650 if (error)
4651 goto out_put;
4652
4653 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
4654
4655 r = lkb->lkb_resource;
4656 hold_rsb(r);
4657 lock_rsb(r);
4658
4659 error = validate_unlock_args(lkb, &args);
4660 if (error)
4661 goto out_r;
4662 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
4663
4664 error = _cancel_lock(r, lkb);
4665 out_r:
4666 unlock_rsb(r);
4667 put_rsb(r);
4668
4669 if (error == -DLM_ECANCEL)
4670 error = 0;
4671 /* from validate_unlock_args() */
4672 if (error == -EBUSY)
4673 error = 0;
4674 out_put:
4675 dlm_put_lkb(lkb);
4676 out:
4677 dlm_unlock_recovery(ls);
4678 return error;
4679 }
4680
4681 /* lkb's that are removed from the waiters list by revert are just left on the
4682 orphans list with the granted orphan locks, to be freed by purge */
4683
4684 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4685 {
4686 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4687 struct dlm_args args;
4688 int error;
4689
4690 hold_lkb(lkb);
4691 mutex_lock(&ls->ls_orphans_mutex);
4692 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4693 mutex_unlock(&ls->ls_orphans_mutex);
4694
4695 set_unlock_args(0, ua, &args);
4696
4697 error = cancel_lock(ls, lkb, &args);
4698 if (error == -DLM_ECANCEL)
4699 error = 0;
4700 return error;
4701 }
4702
4703 /* The force flag allows the unlock to go ahead even if the lkb isn't granted.
4704 Regardless of what rsb queue the lock is on, it's removed and freed. */
4705
4706 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4707 {
4708 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4709 struct dlm_args args;
4710 int error;
4711
4712 set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
4713
4714 error = unlock_lock(ls, lkb, &args);
4715 if (error == -DLM_EUNLOCK)
4716 error = 0;
4717 return error;
4718 }
4719
4720 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
4721 (which does lock_rsb) due to deadlock with receiving a message that does
4722 lock_rsb followed by dlm_user_add_ast() */
4723
4724 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
4725 struct dlm_user_proc *proc)
4726 {
4727 struct dlm_lkb *lkb = NULL;
4728
4729 mutex_lock(&ls->ls_clear_proc_locks);
4730 if (list_empty(&proc->locks))
4731 goto out;
4732
4733 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
4734 list_del_init(&lkb->lkb_ownqueue);
4735
4736 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4737 lkb->lkb_flags |= DLM_IFL_ORPHAN;
4738 else
4739 lkb->lkb_flags |= DLM_IFL_DEAD;
4740 out:
4741 mutex_unlock(&ls->ls_clear_proc_locks);
4742 return lkb;
4743 }
4744
4745 /* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
4746 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
4747 which we clear here. */
4748
4749 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
4750 list, and no more device_writes should add lkb's to proc->locks list; so we
4751 shouldn't need to take asts_spin or locks_spin here. this assumes that
4752 device reads/writes/closes are serialized -- FIXME: we may need to serialize
4753 them ourself. */
4754
4755 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4756 {
4757 struct dlm_lkb *lkb, *safe;
4758
4759 dlm_lock_recovery(ls);
4760
4761 while (1) {
4762 lkb = del_proc_lock(ls, proc);
4763 if (!lkb)
4764 break;
4765 del_timeout(lkb);
4766 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4767 orphan_proc_lock(ls, lkb);
4768 else
4769 unlock_proc_lock(ls, lkb);
4770
4771 /* this removes the reference for the proc->locks list
4772 added by dlm_user_request, it may result in the lkb
4773 being freed */
4774
4775 dlm_put_lkb(lkb);
4776 }
4777
4778 mutex_lock(&ls->ls_clear_proc_locks);
4779
4780 /* in-progress unlocks */
4781 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4782 list_del_init(&lkb->lkb_ownqueue);
4783 lkb->lkb_flags |= DLM_IFL_DEAD;
4784 dlm_put_lkb(lkb);
4785 }
4786
4787 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4788 lkb->lkb_ast_type = 0;
4789 list_del(&lkb->lkb_astqueue);
4790 dlm_put_lkb(lkb);
4791 }
4792
4793 mutex_unlock(&ls->ls_clear_proc_locks);
4794 dlm_unlock_recovery(ls);
4795 }
4796
4797 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4798 {
4799 struct dlm_lkb *lkb, *safe;
4800
4801 while (1) {
4802 lkb = NULL;
4803 spin_lock(&proc->locks_spin);
4804 if (!list_empty(&proc->locks)) {
4805 lkb = list_entry(proc->locks.next, struct dlm_lkb,
4806 lkb_ownqueue);
4807 list_del_init(&lkb->lkb_ownqueue);
4808 }
4809 spin_unlock(&proc->locks_spin);
4810
4811 if (!lkb)
4812 break;
4813
4814 lkb->lkb_flags |= DLM_IFL_DEAD;
4815 unlock_proc_lock(ls, lkb);
4816 dlm_put_lkb(lkb); /* ref from proc->locks list */
4817 }
4818
4819 spin_lock(&proc->locks_spin);
4820 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4821 list_del_init(&lkb->lkb_ownqueue);
4822 lkb->lkb_flags |= DLM_IFL_DEAD;
4823 dlm_put_lkb(lkb);
4824 }
4825 spin_unlock(&proc->locks_spin);
4826
4827 spin_lock(&proc->asts_spin);
4828 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4829 list_del(&lkb->lkb_astqueue);
4830 dlm_put_lkb(lkb);
4831 }
4832 spin_unlock(&proc->asts_spin);
4833 }
4834
4835 /* pid of 0 means purge all orphans */
4836
4837 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
4838 {
4839 struct dlm_lkb *lkb, *safe;
4840
4841 mutex_lock(&ls->ls_orphans_mutex);
4842 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
4843 if (pid && lkb->lkb_ownpid != pid)
4844 continue;
4845 unlock_proc_lock(ls, lkb);
4846 list_del_init(&lkb->lkb_ownqueue);
4847 dlm_put_lkb(lkb);
4848 }
4849 mutex_unlock(&ls->ls_orphans_mutex);
4850 }
4851
4852 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
4853 {
4854 struct dlm_message *ms;
4855 struct dlm_mhandle *mh;
4856 int error;
4857
4858 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
4859 DLM_MSG_PURGE, &ms, &mh);
4860 if (error)
4861 return error;
4862 ms->m_nodeid = nodeid;
4863 ms->m_pid = pid;
4864
4865 return send_message(mh, ms);
4866 }
4867
4868 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
4869 int nodeid, int pid)
4870 {
4871 int error = 0;
4872
4873 if (nodeid != dlm_our_nodeid()) {
4874 error = send_purge(ls, nodeid, pid);
4875 } else {
4876 dlm_lock_recovery(ls);
4877 if (pid == current->pid)
4878 purge_proc_locks(ls, proc);
4879 else
4880 do_purge(ls, nodeid, pid);
4881 dlm_unlock_recovery(ls);
4882 }
4883 return error;
4884 }
4885
This page took 0.152317 seconds and 5 git commands to generate.