GFS2: Move glock superblock pointer to field gl_name
[deliverable/linux.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
cf45b752 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
d77d1b58
JP
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
b3b94faa
DT
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
b3b94faa
DT
15#include <linux/buffer_head.h>
16#include <linux/delay.h>
17#include <linux/sort.h>
18#include <linux/jhash.h>
d0dc80db 19#include <linux/kallsyms.h>
5c676f6d 20#include <linux/gfs2_ondisk.h>
24264434 21#include <linux/list.h>
fee852e3 22#include <linux/wait.h>
95d97b7d 23#include <linux/module.h>
b3b94faa 24#include <asm/uaccess.h>
7c52b166
RP
25#include <linux/seq_file.h>
26#include <linux/debugfs.h>
8fbbfd21
SW
27#include <linux/kthread.h>
28#include <linux/freezer.h>
c4f68a13
BM
29#include <linux/workqueue.h>
30#include <linux/jiffies.h>
bc015cb8
SW
31#include <linux/rcupdate.h>
32#include <linux/rculist_bl.h>
33#include <linux/bit_spinlock.h>
a245769f 34#include <linux/percpu.h>
4506a519 35#include <linux/list_sort.h>
e66cf161 36#include <linux/lockref.h>
b3b94faa
DT
37
38#include "gfs2.h"
5c676f6d 39#include "incore.h"
b3b94faa
DT
40#include "glock.h"
41#include "glops.h"
42#include "inode.h"
b3b94faa
DT
43#include "lops.h"
44#include "meta_io.h"
45#include "quota.h"
46#include "super.h"
5c676f6d 47#include "util.h"
813e0c46 48#include "bmap.h"
63997775
SW
49#define CREATE_TRACE_POINTS
50#include "trace_gfs2.h"
b3b94faa 51
6802e340 52struct gfs2_glock_iter {
ba1ddcb6
SW
53 int hash; /* hash bucket index */
54 unsigned nhash; /* Index within current bucket */
55 struct gfs2_sbd *sdp; /* incore superblock */
56 struct gfs2_glock *gl; /* current glock struct */
57 loff_t last_pos; /* last position */
7c52b166
RP
58};
59
b3b94faa
DT
60typedef void (*glock_examiner) (struct gfs2_glock * gl);
61
6802e340 62static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
c4f68a13 63
7c52b166 64static struct dentry *gfs2_root;
c4f68a13 65static struct workqueue_struct *glock_workqueue;
b94a170e 66struct workqueue_struct *gfs2_delete_workqueue;
97cc1025
SW
67static LIST_HEAD(lru_list);
68static atomic_t lru_count = ATOMIC_INIT(0);
eb8374e7 69static DEFINE_SPINLOCK(lru_lock);
08bc2dbc 70
b6397893 71#define GFS2_GL_HASH_SHIFT 15
087efdd3
SW
72#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74
bc015cb8 75static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
04b933f2 76static struct dentry *gfs2_root;
087efdd3 77
b3b94faa
DT
78/**
79 * gl_hash() - Turn glock number into hash bucket number
80 * @lock: The glock number
81 *
82 * Returns: The number of the corresponding hash bucket
83 */
84
b8547856
SW
85static unsigned int gl_hash(const struct gfs2_sbd *sdp,
86 const struct lm_lockname *name)
b3b94faa
DT
87{
88 unsigned int h;
89
cd915493 90 h = jhash(&name->ln_number, sizeof(u64), 0);
b3b94faa 91 h = jhash(&name->ln_type, sizeof(unsigned int), h);
b8547856 92 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
b3b94faa
DT
93 h &= GFS2_GL_HASH_MASK;
94
95 return h;
96}
97
bc015cb8
SW
98static inline void spin_lock_bucket(unsigned int hash)
99{
1879fd6a 100 hlist_bl_lock(&gl_hash_table[hash]);
bc015cb8 101}
b3b94faa 102
bc015cb8
SW
103static inline void spin_unlock_bucket(unsigned int hash)
104{
1879fd6a 105 hlist_bl_unlock(&gl_hash_table[hash]);
bc015cb8 106}
b3b94faa 107
fc0e38da 108static void gfs2_glock_dealloc(struct rcu_head *rcu)
b3b94faa 109{
bc015cb8 110 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
b3b94faa 111
dba2d70c 112 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
bc015cb8 113 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
dba2d70c 114 } else {
4e2f8849 115 kfree(gl->gl_lksb.sb_lvbptr);
bc015cb8 116 kmem_cache_free(gfs2_glock_cachep, gl);
dba2d70c 117 }
fc0e38da
SW
118}
119
120void gfs2_glock_free(struct gfs2_glock *gl)
b3b94faa 121{
15562c43 122 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3b94faa 123
fc0e38da 124 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
bc015cb8
SW
125 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
126 wake_up(&sdp->sd_glock_wait);
b3b94faa
DT
127}
128
129/**
130 * gfs2_glock_hold() - increment reference count on glock
131 * @gl: The glock to hold
132 *
133 */
134
e66cf161 135static void gfs2_glock_hold(struct gfs2_glock *gl)
b3b94faa 136{
e66cf161
SW
137 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
138 lockref_get(&gl->gl_lockref);
b3b94faa
DT
139}
140
8ff22a6f
BM
141/**
142 * demote_ok - Check to see if it's ok to unlock a glock
143 * @gl: the glock
144 *
145 * Returns: 1 if it's ok
146 */
147
148static int demote_ok(const struct gfs2_glock *gl)
149{
150 const struct gfs2_glock_operations *glops = gl->gl_ops;
151
152 if (gl->gl_state == LM_ST_UNLOCKED)
153 return 0;
f42ab085 154 if (!list_empty(&gl->gl_holders))
8ff22a6f
BM
155 return 0;
156 if (glops->go_demote_ok)
157 return glops->go_demote_ok(gl);
158 return 1;
159}
160
bc015cb8 161
29687a2a
SW
162void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
163{
164 spin_lock(&lru_lock);
165
166 if (!list_empty(&gl->gl_lru))
167 list_del_init(&gl->gl_lru);
168 else
169 atomic_inc(&lru_count);
170
171 list_add_tail(&gl->gl_lru, &lru_list);
627c10b7 172 set_bit(GLF_LRU, &gl->gl_flags);
29687a2a
SW
173 spin_unlock(&lru_lock);
174}
175
8f6cb409 176static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
f42ab085 177{
8f6cb409 178 spin_lock(&lru_lock);
f42ab085
SW
179 if (!list_empty(&gl->gl_lru)) {
180 list_del_init(&gl->gl_lru);
181 atomic_dec(&lru_count);
182 clear_bit(GLF_LRU, &gl->gl_flags);
183 }
184 spin_unlock(&lru_lock);
185}
186
b3b94faa
DT
187/**
188 * gfs2_glock_put() - Decrement reference count on glock
189 * @gl: The glock to put
190 *
191 */
192
bc015cb8 193void gfs2_glock_put(struct gfs2_glock *gl)
b3b94faa 194{
15562c43 195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
bc015cb8 196 struct address_space *mapping = gfs2_glock2aspace(gl);
b3b94faa 197
e66cf161
SW
198 if (lockref_put_or_lock(&gl->gl_lockref))
199 return;
200
201 lockref_mark_dead(&gl->gl_lockref);
202
8f6cb409 203 gfs2_glock_remove_from_lru(gl);
e66cf161
SW
204 spin_unlock(&gl->gl_lockref.lock);
205 spin_lock_bucket(gl->gl_hash);
206 hlist_bl_del_rcu(&gl->gl_list);
207 spin_unlock_bucket(gl->gl_hash);
208 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
209 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
210 trace_gfs2_glock_put(gl);
211 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
b3b94faa
DT
212}
213
b3b94faa
DT
214/**
215 * search_bucket() - Find struct gfs2_glock by lock number
216 * @bucket: the bucket to search
217 * @name: The lock name
218 *
219 * Returns: NULL, or the struct gfs2_glock with the requested number
220 */
221
37b2fa6a 222static struct gfs2_glock *search_bucket(unsigned int hash,
d6a53727 223 const struct lm_lockname *name)
b3b94faa
DT
224{
225 struct gfs2_glock *gl;
bc015cb8 226 struct hlist_bl_node *h;
b3b94faa 227
bc015cb8 228 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
b3b94faa
DT
229 if (!lm_name_equal(&gl->gl_name, name))
230 continue;
e66cf161 231 if (lockref_get_not_dead(&gl->gl_lockref))
bc015cb8 232 return gl;
b3b94faa
DT
233 }
234
235 return NULL;
236}
237
6802e340
SW
238/**
239 * may_grant - check if its ok to grant a new lock
240 * @gl: The glock
241 * @gh: The lock request which we wish to grant
242 *
243 * Returns: true if its ok to grant the lock
244 */
245
246static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
247{
248 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
249 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
250 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
251 return 0;
252 if (gl->gl_state == gh->gh_state)
253 return 1;
254 if (gh->gh_flags & GL_EXACT)
255 return 0;
209806ab
SW
256 if (gl->gl_state == LM_ST_EXCLUSIVE) {
257 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
258 return 1;
259 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
260 return 1;
261 }
6802e340
SW
262 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
263 return 1;
264 return 0;
265}
266
267static void gfs2_holder_wake(struct gfs2_holder *gh)
268{
269 clear_bit(HIF_WAIT, &gh->gh_iflags);
4e857c58 270 smp_mb__after_atomic();
6802e340
SW
271 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
272}
273
d5341a92
SW
274/**
275 * do_error - Something unexpected has happened during a lock request
276 *
277 */
278
279static inline void do_error(struct gfs2_glock *gl, const int ret)
280{
281 struct gfs2_holder *gh, *tmp;
282
283 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
284 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
285 continue;
286 if (ret & LM_OUT_ERROR)
287 gh->gh_error = -EIO;
288 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
289 gh->gh_error = GLR_TRYFAILED;
290 else
291 continue;
292 list_del_init(&gh->gh_list);
293 trace_gfs2_glock_queue(gh, 0);
294 gfs2_holder_wake(gh);
295 }
296}
297
6802e340
SW
298/**
299 * do_promote - promote as many requests as possible on the current queue
300 * @gl: The glock
301 *
813e0c46
SW
302 * Returns: 1 if there is a blocked holder at the head of the list, or 2
303 * if a type specific operation is underway.
6802e340
SW
304 */
305
306static int do_promote(struct gfs2_glock *gl)
55ba474d
HH
307__releases(&gl->gl_spin)
308__acquires(&gl->gl_spin)
6802e340
SW
309{
310 const struct gfs2_glock_operations *glops = gl->gl_ops;
311 struct gfs2_holder *gh, *tmp;
312 int ret;
313
314restart:
315 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
316 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
317 continue;
318 if (may_grant(gl, gh)) {
319 if (gh->gh_list.prev == &gl->gl_holders &&
320 glops->go_lock) {
321 spin_unlock(&gl->gl_spin);
322 /* FIXME: eliminate this eventually */
323 ret = glops->go_lock(gh);
324 spin_lock(&gl->gl_spin);
325 if (ret) {
813e0c46
SW
326 if (ret == 1)
327 return 2;
6802e340
SW
328 gh->gh_error = ret;
329 list_del_init(&gh->gh_list);
63997775 330 trace_gfs2_glock_queue(gh, 0);
6802e340
SW
331 gfs2_holder_wake(gh);
332 goto restart;
333 }
334 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 335 trace_gfs2_promote(gh, 1);
6802e340
SW
336 gfs2_holder_wake(gh);
337 goto restart;
338 }
339 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 340 trace_gfs2_promote(gh, 0);
6802e340
SW
341 gfs2_holder_wake(gh);
342 continue;
343 }
344 if (gh->gh_list.prev == &gl->gl_holders)
345 return 1;
d5341a92 346 do_error(gl, 0);
6802e340
SW
347 break;
348 }
349 return 0;
350}
351
6802e340
SW
352/**
353 * find_first_waiter - find the first gh that's waiting for the glock
354 * @gl: the glock
355 */
356
357static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
358{
359 struct gfs2_holder *gh;
360
361 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
362 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
363 return gh;
364 }
365 return NULL;
366}
367
368/**
369 * state_change - record that the glock is now in a different state
370 * @gl: the glock
371 * @new_state the new state
372 *
373 */
374
375static void state_change(struct gfs2_glock *gl, unsigned int new_state)
376{
377 int held1, held2;
378
379 held1 = (gl->gl_state != LM_ST_UNLOCKED);
380 held2 = (new_state != LM_ST_UNLOCKED);
381
382 if (held1 != held2) {
e66cf161 383 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
6802e340 384 if (held2)
e66cf161 385 gl->gl_lockref.count++;
6802e340 386 else
e66cf161 387 gl->gl_lockref.count--;
6802e340 388 }
7b5e3d5f
SW
389 if (held1 && held2 && list_empty(&gl->gl_holders))
390 clear_bit(GLF_QUEUED, &gl->gl_flags);
6802e340 391
7cf8dcd3
BP
392 if (new_state != gl->gl_target)
393 /* shorten our minimum hold time */
394 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
395 GL_GLOCK_MIN_HOLD);
6802e340
SW
396 gl->gl_state = new_state;
397 gl->gl_tchange = jiffies;
398}
399
400static void gfs2_demote_wake(struct gfs2_glock *gl)
401{
402 gl->gl_demote_state = LM_ST_EXCLUSIVE;
403 clear_bit(GLF_DEMOTE, &gl->gl_flags);
4e857c58 404 smp_mb__after_atomic();
6802e340
SW
405 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
406}
407
408/**
409 * finish_xmote - The DLM has replied to one of our lock requests
410 * @gl: The glock
411 * @ret: The status from the DLM
412 *
413 */
414
415static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
416{
417 const struct gfs2_glock_operations *glops = gl->gl_ops;
418 struct gfs2_holder *gh;
419 unsigned state = ret & LM_OUT_ST_MASK;
813e0c46 420 int rv;
6802e340
SW
421
422 spin_lock(&gl->gl_spin);
63997775 423 trace_gfs2_glock_state_change(gl, state);
6802e340
SW
424 state_change(gl, state);
425 gh = find_first_waiter(gl);
426
427 /* Demote to UN request arrived during demote to SH or DF */
428 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
429 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
430 gl->gl_target = LM_ST_UNLOCKED;
431
432 /* Check for state != intended state */
433 if (unlikely(state != gl->gl_target)) {
434 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
435 /* move to back of queue and try next entry */
436 if (ret & LM_OUT_CANCELED) {
437 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
438 list_move_tail(&gh->gh_list, &gl->gl_holders);
439 gh = find_first_waiter(gl);
440 gl->gl_target = gh->gh_state;
441 goto retry;
442 }
443 /* Some error or failed "try lock" - report it */
444 if ((ret & LM_OUT_ERROR) ||
445 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
446 gl->gl_target = gl->gl_state;
447 do_error(gl, ret);
448 goto out;
449 }
450 }
451 switch(state) {
452 /* Unlocked due to conversion deadlock, try again */
453 case LM_ST_UNLOCKED:
454retry:
455 do_xmote(gl, gh, gl->gl_target);
456 break;
457 /* Conversion fails, unlock and try again */
458 case LM_ST_SHARED:
459 case LM_ST_DEFERRED:
460 do_xmote(gl, gh, LM_ST_UNLOCKED);
461 break;
462 default: /* Everything else */
d77d1b58 463 pr_err("wanted %u got %u\n", gl->gl_target, state);
6802e340
SW
464 GLOCK_BUG_ON(gl, 1);
465 }
466 spin_unlock(&gl->gl_spin);
6802e340
SW
467 return;
468 }
469
470 /* Fast path - we got what we asked for */
471 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
472 gfs2_demote_wake(gl);
473 if (state != LM_ST_UNLOCKED) {
474 if (glops->go_xmote_bh) {
6802e340
SW
475 spin_unlock(&gl->gl_spin);
476 rv = glops->go_xmote_bh(gl, gh);
6802e340
SW
477 spin_lock(&gl->gl_spin);
478 if (rv) {
479 do_error(gl, rv);
480 goto out;
481 }
482 }
813e0c46
SW
483 rv = do_promote(gl);
484 if (rv == 2)
485 goto out_locked;
6802e340
SW
486 }
487out:
488 clear_bit(GLF_LOCK, &gl->gl_flags);
813e0c46 489out_locked:
6802e340 490 spin_unlock(&gl->gl_spin);
6802e340
SW
491}
492
6802e340
SW
493/**
494 * do_xmote - Calls the DLM to change the state of a lock
495 * @gl: The lock state
496 * @gh: The holder (only for promotes)
497 * @target: The target lock state
498 *
499 */
500
501static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
55ba474d
HH
502__releases(&gl->gl_spin)
503__acquires(&gl->gl_spin)
6802e340
SW
504{
505 const struct gfs2_glock_operations *glops = gl->gl_ops;
15562c43 506 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
6802e340
SW
507 unsigned int lck_flags = gh ? gh->gh_flags : 0;
508 int ret;
509
510 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
511 LM_FLAG_PRIORITY);
921169ca
SW
512 GLOCK_BUG_ON(gl, gl->gl_state == target);
513 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
6802e340
SW
514 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
515 glops->go_inval) {
516 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
517 do_error(gl, 0); /* Fail queued try locks */
518 }
47a25380 519 gl->gl_req = target;
a245769f
SW
520 set_bit(GLF_BLOCKING, &gl->gl_flags);
521 if ((gl->gl_req == LM_ST_UNLOCKED) ||
522 (gl->gl_state == LM_ST_EXCLUSIVE) ||
523 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
524 clear_bit(GLF_BLOCKING, &gl->gl_flags);
6802e340 525 spin_unlock(&gl->gl_spin);
06dfc306
BP
526 if (glops->go_sync)
527 glops->go_sync(gl);
6802e340
SW
528 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
529 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
530 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
531
532 gfs2_glock_hold(gl);
921169ca
SW
533 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
534 /* lock_dlm */
535 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
dba2d70c 536 if (ret) {
d77d1b58 537 pr_err("lm_lock ret %d\n", ret);
dba2d70c
DT
538 GLOCK_BUG_ON(gl, 1);
539 }
921169ca
SW
540 } else { /* lock_nolock */
541 finish_xmote(gl, target);
6802e340
SW
542 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
543 gfs2_glock_put(gl);
6802e340 544 }
921169ca 545
6802e340
SW
546 spin_lock(&gl->gl_spin);
547}
548
549/**
550 * find_first_holder - find the first "holder" gh
551 * @gl: the glock
552 */
553
554static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
555{
556 struct gfs2_holder *gh;
557
558 if (!list_empty(&gl->gl_holders)) {
559 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
560 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
561 return gh;
562 }
563 return NULL;
564}
565
566/**
567 * run_queue - do all outstanding tasks related to a glock
568 * @gl: The glock in question
569 * @nonblock: True if we must not block in run_queue
570 *
571 */
572
573static void run_queue(struct gfs2_glock *gl, const int nonblock)
55ba474d
HH
574__releases(&gl->gl_spin)
575__acquires(&gl->gl_spin)
6802e340
SW
576{
577 struct gfs2_holder *gh = NULL;
813e0c46 578 int ret;
6802e340
SW
579
580 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
581 return;
582
583 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
584
585 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
586 gl->gl_demote_state != gl->gl_state) {
587 if (find_first_holder(gl))
d8348de0 588 goto out_unlock;
6802e340
SW
589 if (nonblock)
590 goto out_sched;
591 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
265d529c 592 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
6802e340
SW
593 gl->gl_target = gl->gl_demote_state;
594 } else {
595 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
596 gfs2_demote_wake(gl);
813e0c46
SW
597 ret = do_promote(gl);
598 if (ret == 0)
d8348de0 599 goto out_unlock;
813e0c46 600 if (ret == 2)
a228df63 601 goto out;
6802e340
SW
602 gh = find_first_waiter(gl);
603 gl->gl_target = gh->gh_state;
604 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
605 do_error(gl, 0); /* Fail queued try locks */
606 }
607 do_xmote(gl, gh, gl->gl_target);
a228df63 608out:
6802e340
SW
609 return;
610
611out_sched:
7e71c55e 612 clear_bit(GLF_LOCK, &gl->gl_flags);
4e857c58 613 smp_mb__after_atomic();
e66cf161 614 gl->gl_lockref.count++;
6802e340 615 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
e66cf161 616 gl->gl_lockref.count--;
7e71c55e
SW
617 return;
618
d8348de0 619out_unlock:
6802e340 620 clear_bit(GLF_LOCK, &gl->gl_flags);
4e857c58 621 smp_mb__after_atomic();
7e71c55e 622 return;
6802e340
SW
623}
624
b94a170e
BM
625static void delete_work_func(struct work_struct *work)
626{
627 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
15562c43 628 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
044b9414 629 struct gfs2_inode *ip;
b94a170e 630 struct inode *inode;
044b9414
SW
631 u64 no_addr = gl->gl_name.ln_number;
632
633 ip = gl->gl_object;
634 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
b94a170e 635
b94a170e 636 if (ip)
4667a0ec 637 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
044b9414
SW
638 else
639 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
640 if (inode && !IS_ERR(inode)) {
641 d_prune_aliases(inode);
642 iput(inode);
b94a170e
BM
643 }
644 gfs2_glock_put(gl);
645}
646
c4f68a13
BM
647static void glock_work_func(struct work_struct *work)
648{
6802e340 649 unsigned long delay = 0;
c4f68a13 650 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
26bb7505 651 int drop_ref = 0;
c4f68a13 652
26bb7505 653 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
6802e340 654 finish_xmote(gl, gl->gl_reply);
26bb7505
SW
655 drop_ref = 1;
656 }
c4f68a13 657 spin_lock(&gl->gl_spin);
f90e5b5b 658 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
265d529c
SW
659 gl->gl_state != LM_ST_UNLOCKED &&
660 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
6802e340 661 unsigned long holdtime, now = jiffies;
f90e5b5b 662
7cf8dcd3 663 holdtime = gl->gl_tchange + gl->gl_hold_time;
6802e340
SW
664 if (time_before(now, holdtime))
665 delay = holdtime - now;
f90e5b5b
BP
666
667 if (!delay) {
668 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
669 set_bit(GLF_DEMOTE, &gl->gl_flags);
670 }
6802e340
SW
671 }
672 run_queue(gl, 0);
c4f68a13 673 spin_unlock(&gl->gl_spin);
7cf8dcd3 674 if (!delay)
6802e340 675 gfs2_glock_put(gl);
7cf8dcd3
BP
676 else {
677 if (gl->gl_name.ln_type != LM_TYPE_INODE)
678 delay = 0;
679 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
680 gfs2_glock_put(gl);
681 }
26bb7505
SW
682 if (drop_ref)
683 gfs2_glock_put(gl);
c4f68a13
BM
684}
685
b3b94faa
DT
686/**
687 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
688 * @sdp: The GFS2 superblock
689 * @number: the lock number
690 * @glops: The glock_operations to use
691 * @create: If 0, don't create the glock if it doesn't exist
692 * @glp: the glock is returned here
693 *
694 * This does not lock a glock, just finds/creates structures for one.
695 *
696 * Returns: errno
697 */
698
cd915493 699int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
8fb4b536 700 const struct gfs2_glock_operations *glops, int create,
b3b94faa
DT
701 struct gfs2_glock **glp)
702{
009d8518 703 struct super_block *s = sdp->sd_vfs;
15562c43
BP
704 struct lm_lockname name = { .ln_number = number,
705 .ln_type = glops->go_type,
706 .ln_sbd = sdp };
b3b94faa 707 struct gfs2_glock *gl, *tmp;
37b2fa6a 708 unsigned int hash = gl_hash(sdp, &name);
009d8518 709 struct address_space *mapping;
bc015cb8 710 struct kmem_cache *cachep;
b3b94faa 711
bc015cb8 712 rcu_read_lock();
15562c43 713 gl = search_bucket(hash, &name);
bc015cb8 714 rcu_read_unlock();
b3b94faa 715
64d576ba
SW
716 *glp = gl;
717 if (gl)
b3b94faa 718 return 0;
64d576ba
SW
719 if (!create)
720 return -ENOENT;
b3b94faa 721
009d8518 722 if (glops->go_flags & GLOF_ASPACE)
bc015cb8 723 cachep = gfs2_glock_aspace_cachep;
009d8518 724 else
bc015cb8 725 cachep = gfs2_glock_cachep;
fe0bbd29 726 gl = kmem_cache_alloc(cachep, GFP_NOFS);
b3b94faa
DT
727 if (!gl)
728 return -ENOMEM;
729
dba2d70c 730 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
dba2d70c
DT
731
732 if (glops->go_flags & GLOF_LVB) {
fe0bbd29 733 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
4e2f8849 734 if (!gl->gl_lksb.sb_lvbptr) {
dba2d70c
DT
735 kmem_cache_free(cachep, gl);
736 return -ENOMEM;
737 }
dba2d70c
DT
738 }
739
8f05228e 740 atomic_inc(&sdp->sd_glock_disposal);
ec45d9f5 741 gl->gl_flags = 0;
b3b94faa 742 gl->gl_name = name;
e66cf161 743 gl->gl_lockref.count = 1;
b3b94faa 744 gl->gl_state = LM_ST_UNLOCKED;
6802e340 745 gl->gl_target = LM_ST_UNLOCKED;
c4f68a13 746 gl->gl_demote_state = LM_ST_EXCLUSIVE;
37b2fa6a 747 gl->gl_hash = hash;
b3b94faa 748 gl->gl_ops = glops;
a245769f
SW
749 gl->gl_dstamp = ktime_set(0, 0);
750 preempt_disable();
751 /* We use the global stats to estimate the initial per-glock stats */
752 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
753 preempt_enable();
754 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
755 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
c4f68a13 756 gl->gl_tchange = jiffies;
ec45d9f5 757 gl->gl_object = NULL;
7cf8dcd3 758 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
c4f68a13 759 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
b94a170e 760 INIT_WORK(&gl->gl_delete, delete_work_func);
b3b94faa 761
009d8518
SW
762 mapping = gfs2_glock2aspace(gl);
763 if (mapping) {
764 mapping->a_ops = &gfs2_meta_aops;
765 mapping->host = s->s_bdev->bd_inode;
766 mapping->flags = 0;
767 mapping_set_gfp_mask(mapping, GFP_NOFS);
252aa6f5 768 mapping->private_data = NULL;
009d8518 769 mapping->writeback_index = 0;
b3b94faa
DT
770 }
771
bc015cb8 772 spin_lock_bucket(hash);
15562c43 773 tmp = search_bucket(hash, &name);
b3b94faa 774 if (tmp) {
bc015cb8 775 spin_unlock_bucket(hash);
4e2f8849 776 kfree(gl->gl_lksb.sb_lvbptr);
bc015cb8 777 kmem_cache_free(cachep, gl);
fc0e38da 778 atomic_dec(&sdp->sd_glock_disposal);
b3b94faa
DT
779 gl = tmp;
780 } else {
bc015cb8
SW
781 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
782 spin_unlock_bucket(hash);
b3b94faa
DT
783 }
784
785 *glp = gl;
786
787 return 0;
b3b94faa
DT
788}
789
790/**
791 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
792 * @gl: the glock
793 * @state: the state we're requesting
794 * @flags: the modifier flags
795 * @gh: the holder structure
796 *
797 */
798
190562bd 799void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
b3b94faa
DT
800 struct gfs2_holder *gh)
801{
802 INIT_LIST_HEAD(&gh->gh_list);
803 gh->gh_gl = gl;
d29c0afe 804 gh->gh_ip = _RET_IP_;
b1e058da 805 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
806 gh->gh_state = state;
807 gh->gh_flags = flags;
808 gh->gh_error = 0;
809 gh->gh_iflags = 0;
b3b94faa
DT
810 gfs2_glock_hold(gl);
811}
812
813/**
814 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
815 * @state: the state we're requesting
816 * @flags: the modifier flags
817 * @gh: the holder structure
818 *
819 * Don't mess with the glock.
820 *
821 */
822
190562bd 823void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
b3b94faa
DT
824{
825 gh->gh_state = state;
579b78a4 826 gh->gh_flags = flags;
3b8249f6 827 gh->gh_iflags = 0;
d29c0afe 828 gh->gh_ip = _RET_IP_;
30badc95 829 put_pid(gh->gh_owner_pid);
1a0eae88 830 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
831}
832
833/**
834 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
835 * @gh: the holder structure
836 *
837 */
838
839void gfs2_holder_uninit(struct gfs2_holder *gh)
840{
b1e058da 841 put_pid(gh->gh_owner_pid);
b3b94faa
DT
842 gfs2_glock_put(gh->gh_gl);
843 gh->gh_gl = NULL;
d0dc80db 844 gh->gh_ip = 0;
b3b94faa
DT
845}
846
07a79049
BP
847/**
848 * gfs2_glock_wait - wait on a glock acquisition
849 * @gh: the glock holder
850 *
851 * Returns: 0 on success
852 */
853
854int gfs2_glock_wait(struct gfs2_holder *gh)
da755fdb 855{
7cf8dcd3
BP
856 unsigned long time1 = jiffies;
857
6802e340 858 might_sleep();
74316201 859 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
7cf8dcd3
BP
860 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
861 /* Lengthen the minimum hold time. */
862 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
863 GL_GLOCK_HOLD_INCR,
864 GL_GLOCK_MAX_HOLD);
07a79049 865 return gh->gh_error;
da755fdb
SW
866}
867
b3b94faa 868/**
6802e340
SW
869 * handle_callback - process a demote request
870 * @gl: the glock
871 * @state: the state the caller wants us to change to
b3b94faa 872 *
6802e340
SW
873 * There are only two requests that we are going to see in actual
874 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
b3b94faa
DT
875 */
876
6802e340 877static void handle_callback(struct gfs2_glock *gl, unsigned int state,
81ffbf65 878 unsigned long delay, bool remote)
b3b94faa 879{
6802e340 880 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
b3b94faa 881
6802e340
SW
882 set_bit(bit, &gl->gl_flags);
883 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
884 gl->gl_demote_state = state;
885 gl->gl_demote_time = jiffies;
6802e340
SW
886 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
887 gl->gl_demote_state != state) {
888 gl->gl_demote_state = LM_ST_UNLOCKED;
b3b94faa 889 }
b94a170e 890 if (gl->gl_ops->go_callback)
81ffbf65 891 gl->gl_ops->go_callback(gl, remote);
7bd8b2eb 892 trace_gfs2_demote_rq(gl, remote);
b3b94faa
DT
893}
894
6802e340 895void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
7c52b166 896{
5e69069c 897 struct va_format vaf;
7c52b166
RP
898 va_list args;
899
900 va_start(args, fmt);
5e69069c 901
6802e340 902 if (seq) {
1bb49303 903 seq_vprintf(seq, fmt, args);
6802e340 904 } else {
5e69069c
JP
905 vaf.fmt = fmt;
906 vaf.va = &args;
907
d77d1b58 908 pr_err("%pV", &vaf);
6802e340 909 }
5e69069c 910
7c52b166
RP
911 va_end(args);
912}
913
b3b94faa
DT
914/**
915 * add_to_queue - Add a holder to the wait queue (but look for recursion)
916 * @gh: the holder structure to add
917 *
6802e340
SW
918 * Eventually we should move the recursive locking trap to a
919 * debugging option or something like that. This is the fast
920 * path and needs to have the minimum number of distractions.
921 *
b3b94faa
DT
922 */
923
6802e340 924static inline void add_to_queue(struct gfs2_holder *gh)
55ba474d
HH
925__releases(&gl->gl_spin)
926__acquires(&gl->gl_spin)
b3b94faa
DT
927{
928 struct gfs2_glock *gl = gh->gh_gl;
15562c43 929 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
6802e340
SW
930 struct list_head *insert_pt = NULL;
931 struct gfs2_holder *gh2;
e5dc76b9 932 int try_futile = 0;
b3b94faa 933
b1e058da 934 BUG_ON(gh->gh_owner_pid == NULL);
fee852e3
SW
935 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
936 BUG();
190562bd 937
6802e340
SW
938 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
939 if (test_bit(GLF_LOCK, &gl->gl_flags))
e5dc76b9 940 try_futile = !may_grant(gl, gh);
6802e340
SW
941 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
942 goto fail;
943 }
944
945 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
946 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
947 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
948 goto trap_recursive;
e5dc76b9
BP
949 if (try_futile &&
950 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
6802e340
SW
951fail:
952 gh->gh_error = GLR_TRYFAILED;
953 gfs2_holder_wake(gh);
954 return;
b4c20166 955 }
6802e340
SW
956 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
957 continue;
958 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
959 insert_pt = &gh2->gh_list;
960 }
7b5e3d5f 961 set_bit(GLF_QUEUED, &gl->gl_flags);
edae38a6 962 trace_gfs2_glock_queue(gh, 1);
a245769f
SW
963 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
964 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
6802e340
SW
965 if (likely(insert_pt == NULL)) {
966 list_add_tail(&gh->gh_list, &gl->gl_holders);
967 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
968 goto do_cancel;
969 return;
970 }
971 list_add_tail(&gh->gh_list, insert_pt);
972do_cancel:
973 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
974 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
975 spin_unlock(&gl->gl_spin);
048bca22 976 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
f057f6cd 977 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
6802e340 978 spin_lock(&gl->gl_spin);
b3b94faa 979 }
6802e340 980 return;
b3b94faa 981
6802e340 982trap_recursive:
fc554ed3
FF
983 pr_err("original: %pSR\n", (void *)gh2->gh_ip);
984 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
985 pr_err("lock type: %d req lock state : %d\n",
6802e340 986 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
fc554ed3
FF
987 pr_err("new: %pSR\n", (void *)gh->gh_ip);
988 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
989 pr_err("lock type: %d req lock state : %d\n",
6802e340 990 gh->gh_gl->gl_name.ln_type, gh->gh_state);
8eae1ca0 991 gfs2_dump_glock(NULL, gl);
6802e340 992 BUG();
b3b94faa
DT
993}
994
995/**
996 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
997 * @gh: the holder structure
998 *
999 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1000 *
1001 * Returns: 0, GLR_TRYFAILED, or errno on failure
1002 */
1003
1004int gfs2_glock_nq(struct gfs2_holder *gh)
1005{
1006 struct gfs2_glock *gl = gh->gh_gl;
15562c43 1007 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3b94faa
DT
1008 int error = 0;
1009
6802e340 1010 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
b3b94faa 1011 return -EIO;
b3b94faa 1012
f42ab085
SW
1013 if (test_bit(GLF_LRU, &gl->gl_flags))
1014 gfs2_glock_remove_from_lru(gl);
1015
b3b94faa
DT
1016 spin_lock(&gl->gl_spin);
1017 add_to_queue(gh);
01b172b7
BP
1018 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1019 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
0809f6ec 1020 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
01b172b7
BP
1021 gl->gl_lockref.count++;
1022 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1023 gl->gl_lockref.count--;
1024 }
6802e340 1025 run_queue(gl, 1);
b3b94faa
DT
1026 spin_unlock(&gl->gl_spin);
1027
6802e340
SW
1028 if (!(gh->gh_flags & GL_ASYNC))
1029 error = gfs2_glock_wait(gh);
b3b94faa 1030
b3b94faa
DT
1031 return error;
1032}
1033
1034/**
1035 * gfs2_glock_poll - poll to see if an async request has been completed
1036 * @gh: the holder
1037 *
1038 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1039 */
1040
1041int gfs2_glock_poll(struct gfs2_holder *gh)
1042{
6802e340 1043 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
b3b94faa
DT
1044}
1045
1046/**
1047 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1048 * @gh: the glock holder
1049 *
1050 */
1051
1052void gfs2_glock_dq(struct gfs2_holder *gh)
1053{
1054 struct gfs2_glock *gl = gh->gh_gl;
8fb4b536 1055 const struct gfs2_glock_operations *glops = gl->gl_ops;
c4f68a13 1056 unsigned delay = 0;
6802e340 1057 int fast_path = 0;
b3b94faa 1058
6802e340 1059 spin_lock(&gl->gl_spin);
b3b94faa 1060 if (gh->gh_flags & GL_NOCACHE)
81ffbf65 1061 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
b3b94faa 1062
b3b94faa 1063 list_del_init(&gh->gh_list);
6802e340 1064 if (find_first_holder(gl) == NULL) {
3042a2cc 1065 if (glops->go_unlock) {
6802e340 1066 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
3042a2cc 1067 spin_unlock(&gl->gl_spin);
b3b94faa 1068 glops->go_unlock(gh);
3042a2cc 1069 spin_lock(&gl->gl_spin);
6802e340 1070 clear_bit(GLF_LOCK, &gl->gl_flags);
3042a2cc 1071 }
6802e340
SW
1072 if (list_empty(&gl->gl_holders) &&
1073 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1074 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1075 fast_path = 1;
b3b94faa 1076 }
e7ccaf5f
BP
1077 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1078 (glops->go_flags & GLOF_LRU))
4abb6ad9
BP
1079 gfs2_glock_add_to_lru(gl);
1080
63997775 1081 trace_gfs2_glock_queue(gh, 0);
b3b94faa 1082 spin_unlock(&gl->gl_spin);
6802e340
SW
1083 if (likely(fast_path))
1084 return;
c4f68a13
BM
1085
1086 gfs2_glock_hold(gl);
1087 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
7cf8dcd3
BP
1088 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1089 gl->gl_name.ln_type == LM_TYPE_INODE)
1090 delay = gl->gl_hold_time;
c4f68a13
BM
1091 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1092 gfs2_glock_put(gl);
b3b94faa
DT
1093}
1094
d93cfa98
AD
1095void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1096{
1097 struct gfs2_glock *gl = gh->gh_gl;
1098 gfs2_glock_dq(gh);
81e1d450 1099 might_sleep();
74316201 1100 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
d93cfa98
AD
1101}
1102
b3b94faa
DT
1103/**
1104 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1105 * @gh: the holder structure
1106 *
1107 */
1108
1109void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1110{
1111 gfs2_glock_dq(gh);
1112 gfs2_holder_uninit(gh);
1113}
1114
1115/**
1116 * gfs2_glock_nq_num - acquire a glock based on lock number
1117 * @sdp: the filesystem
1118 * @number: the lock number
1119 * @glops: the glock operations for the type of glock
1120 * @state: the state to acquire the glock in
25985edc 1121 * @flags: modifier flags for the acquisition
b3b94faa
DT
1122 * @gh: the struct gfs2_holder
1123 *
1124 * Returns: errno
1125 */
1126
cd915493 1127int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
8fb4b536
SW
1128 const struct gfs2_glock_operations *glops,
1129 unsigned int state, int flags, struct gfs2_holder *gh)
b3b94faa
DT
1130{
1131 struct gfs2_glock *gl;
1132 int error;
1133
1134 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1135 if (!error) {
1136 error = gfs2_glock_nq_init(gl, state, flags, gh);
1137 gfs2_glock_put(gl);
1138 }
1139
1140 return error;
1141}
1142
1143/**
1144 * glock_compare - Compare two struct gfs2_glock structures for sorting
1145 * @arg_a: the first structure
1146 * @arg_b: the second structure
1147 *
1148 */
1149
1150static int glock_compare(const void *arg_a, const void *arg_b)
1151{
a5e08a9e
SW
1152 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1153 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1154 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1155 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
b3b94faa
DT
1156
1157 if (a->ln_number > b->ln_number)
a5e08a9e
SW
1158 return 1;
1159 if (a->ln_number < b->ln_number)
1160 return -1;
1c0f4872 1161 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
a5e08a9e 1162 return 0;
b3b94faa
DT
1163}
1164
1165/**
1166 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1167 * @num_gh: the number of structures
1168 * @ghs: an array of struct gfs2_holder structures
1169 *
1170 * Returns: 0 on success (all glocks acquired),
1171 * errno on failure (no glocks acquired)
1172 */
1173
1174static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1175 struct gfs2_holder **p)
1176{
1177 unsigned int x;
1178 int error = 0;
1179
1180 for (x = 0; x < num_gh; x++)
1181 p[x] = &ghs[x];
1182
1183 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1184
1185 for (x = 0; x < num_gh; x++) {
1186 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1187
1188 error = gfs2_glock_nq(p[x]);
1189 if (error) {
1190 while (x--)
1191 gfs2_glock_dq(p[x]);
1192 break;
1193 }
1194 }
1195
1196 return error;
1197}
1198
1199/**
1200 * gfs2_glock_nq_m - acquire multiple glocks
1201 * @num_gh: the number of structures
1202 * @ghs: an array of struct gfs2_holder structures
1203 *
b3b94faa
DT
1204 *
1205 * Returns: 0 on success (all glocks acquired),
1206 * errno on failure (no glocks acquired)
1207 */
1208
1209int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1210{
eaf5bd3c
SW
1211 struct gfs2_holder *tmp[4];
1212 struct gfs2_holder **pph = tmp;
b3b94faa
DT
1213 int error = 0;
1214
eaf5bd3c
SW
1215 switch(num_gh) {
1216 case 0:
b3b94faa 1217 return 0;
eaf5bd3c 1218 case 1:
b3b94faa
DT
1219 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1220 return gfs2_glock_nq(ghs);
eaf5bd3c
SW
1221 default:
1222 if (num_gh <= 4)
b3b94faa 1223 break;
eaf5bd3c
SW
1224 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1225 if (!pph)
1226 return -ENOMEM;
b3b94faa
DT
1227 }
1228
eaf5bd3c 1229 error = nq_m_sync(num_gh, ghs, pph);
b3b94faa 1230
eaf5bd3c
SW
1231 if (pph != tmp)
1232 kfree(pph);
b3b94faa
DT
1233
1234 return error;
1235}
1236
1237/**
1238 * gfs2_glock_dq_m - release multiple glocks
1239 * @num_gh: the number of structures
1240 * @ghs: an array of struct gfs2_holder structures
1241 *
1242 */
1243
1244void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1245{
fa1bbdea
BP
1246 while (num_gh--)
1247 gfs2_glock_dq(&ghs[num_gh]);
b3b94faa
DT
1248}
1249
f057f6cd 1250void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
da755fdb 1251{
c4f68a13
BM
1252 unsigned long delay = 0;
1253 unsigned long holdtime;
1254 unsigned long now = jiffies;
b3b94faa 1255
f057f6cd 1256 gfs2_glock_hold(gl);
7cf8dcd3
BP
1257 holdtime = gl->gl_tchange + gl->gl_hold_time;
1258 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1259 gl->gl_name.ln_type == LM_TYPE_INODE) {
7b5e3d5f
SW
1260 if (time_before(now, holdtime))
1261 delay = holdtime - now;
1262 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
7cf8dcd3 1263 delay = gl->gl_hold_time;
7b5e3d5f 1264 }
b3b94faa 1265
6802e340 1266 spin_lock(&gl->gl_spin);
81ffbf65 1267 handle_callback(gl, state, delay, true);
6802e340 1268 spin_unlock(&gl->gl_spin);
c4f68a13
BM
1269 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1270 gfs2_glock_put(gl);
b3b94faa
DT
1271}
1272
0809f6ec
SW
1273/**
1274 * gfs2_should_freeze - Figure out if glock should be frozen
1275 * @gl: The glock in question
1276 *
1277 * Glocks are not frozen if (a) the result of the dlm operation is
1278 * an error, (b) the locking operation was an unlock operation or
1279 * (c) if there is a "noexp" flagged request anywhere in the queue
1280 *
1281 * Returns: 1 if freezing should occur, 0 otherwise
1282 */
1283
1284static int gfs2_should_freeze(const struct gfs2_glock *gl)
1285{
1286 const struct gfs2_holder *gh;
1287
1288 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1289 return 0;
1290 if (gl->gl_target == LM_ST_UNLOCKED)
1291 return 0;
1292
1293 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1294 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1295 continue;
1296 if (LM_FLAG_NOEXP & gh->gh_flags)
1297 return 0;
1298 }
1299
1300 return 1;
1301}
1302
b3b94faa 1303/**
f057f6cd
SW
1304 * gfs2_glock_complete - Callback used by locking
1305 * @gl: Pointer to the glock
1306 * @ret: The return value from the dlm
b3b94faa 1307 *
47a25380
SW
1308 * The gl_reply field is under the gl_spin lock so that it is ok
1309 * to use a bitfield shared with other glock state fields.
b3b94faa
DT
1310 */
1311
f057f6cd 1312void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
b3b94faa 1313{
15562c43 1314 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
0809f6ec 1315
47a25380 1316 spin_lock(&gl->gl_spin);
f057f6cd 1317 gl->gl_reply = ret;
0809f6ec 1318
e0c2a9aa 1319 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
0809f6ec 1320 if (gfs2_should_freeze(gl)) {
f057f6cd 1321 set_bit(GLF_FROZEN, &gl->gl_flags);
0809f6ec 1322 spin_unlock(&gl->gl_spin);
b3b94faa 1323 return;
0809f6ec 1324 }
b3b94faa 1325 }
47a25380 1326
e66cf161 1327 gl->gl_lockref.count++;
f057f6cd 1328 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
e66cf161
SW
1329 spin_unlock(&gl->gl_spin);
1330
f057f6cd
SW
1331 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1332 gfs2_glock_put(gl);
b3b94faa
DT
1333}
1334
4506a519
SW
1335static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1336{
1337 struct gfs2_glock *gla, *glb;
1338
1339 gla = list_entry(a, struct gfs2_glock, gl_lru);
1340 glb = list_entry(b, struct gfs2_glock, gl_lru);
1341
1342 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1343 return 1;
1344 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1345 return -1;
1346
1347 return 0;
1348}
1349
1350/**
1351 * gfs2_dispose_glock_lru - Demote a list of glocks
1352 * @list: The list to dispose of
1353 *
1354 * Disposing of glocks may involve disk accesses, so that here we sort
1355 * the glocks by number (i.e. disk location of the inodes) so that if
1356 * there are any such accesses, they'll be sent in order (mostly).
1357 *
1358 * Must be called under the lru_lock, but may drop and retake this
1359 * lock. While the lru_lock is dropped, entries may vanish from the
1360 * list, but no new entries will appear on the list (since it is
1361 * private)
1362 */
1363
1364static void gfs2_dispose_glock_lru(struct list_head *list)
1365__releases(&lru_lock)
1366__acquires(&lru_lock)
1367{
1368 struct gfs2_glock *gl;
1369
1370 list_sort(NULL, list, glock_cmp);
1371
1372 while(!list_empty(list)) {
1373 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1374 list_del_init(&gl->gl_lru);
e66cf161 1375 if (!spin_trylock(&gl->gl_spin)) {
94a09a39 1376add_back_to_lru:
e66cf161
SW
1377 list_add(&gl->gl_lru, &lru_list);
1378 atomic_inc(&lru_count);
1379 continue;
1380 }
94a09a39
SW
1381 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1382 spin_unlock(&gl->gl_spin);
1383 goto add_back_to_lru;
1384 }
4506a519 1385 clear_bit(GLF_LRU, &gl->gl_flags);
e66cf161 1386 gl->gl_lockref.count++;
4506a519 1387 if (demote_ok(gl))
81ffbf65 1388 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
4506a519 1389 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
4506a519 1390 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
e66cf161 1391 gl->gl_lockref.count--;
4506a519 1392 spin_unlock(&gl->gl_spin);
94a09a39 1393 cond_resched_lock(&lru_lock);
4506a519
SW
1394 }
1395}
1396
2a005855
SW
1397/**
1398 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1399 * @nr: The number of entries to scan
1400 *
4506a519
SW
1401 * This function selects the entries on the LRU which are able to
1402 * be demoted, and then kicks off the process by calling
1403 * gfs2_dispose_glock_lru() above.
2a005855 1404 */
b3b94faa 1405
1ab6c499 1406static long gfs2_scan_glock_lru(int nr)
b3b94faa
DT
1407{
1408 struct gfs2_glock *gl;
97cc1025 1409 LIST_HEAD(skipped);
4506a519 1410 LIST_HEAD(dispose);
1ab6c499 1411 long freed = 0;
b3b94faa 1412
97cc1025 1413 spin_lock(&lru_lock);
1ab6c499 1414 while ((nr-- >= 0) && !list_empty(&lru_list)) {
97cc1025 1415 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
97cc1025
SW
1416
1417 /* Test for being demotable */
94a09a39 1418 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
4506a519
SW
1419 list_move(&gl->gl_lru, &dispose);
1420 atomic_dec(&lru_count);
1ab6c499 1421 freed++;
2163b1e6 1422 continue;
97cc1025 1423 }
4506a519
SW
1424
1425 list_move(&gl->gl_lru, &skipped);
b3b94faa 1426 }
97cc1025 1427 list_splice(&skipped, &lru_list);
4506a519
SW
1428 if (!list_empty(&dispose))
1429 gfs2_dispose_glock_lru(&dispose);
97cc1025 1430 spin_unlock(&lru_lock);
1ab6c499
DC
1431
1432 return freed;
2a005855
SW
1433}
1434
1ab6c499
DC
1435static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1436 struct shrink_control *sc)
2a005855 1437{
1ab6c499
DC
1438 if (!(sc->gfp_mask & __GFP_FS))
1439 return SHRINK_STOP;
1440 return gfs2_scan_glock_lru(sc->nr_to_scan);
1441}
2a005855 1442
1ab6c499
DC
1443static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1444 struct shrink_control *sc)
1445{
55f841ce 1446 return vfs_pressure_ratio(atomic_read(&lru_count));
b3b94faa
DT
1447}
1448
97cc1025 1449static struct shrinker glock_shrinker = {
97cc1025 1450 .seeks = DEFAULT_SEEKS,
1ab6c499
DC
1451 .count_objects = gfs2_glock_shrink_count,
1452 .scan_objects = gfs2_glock_shrink_scan,
97cc1025
SW
1453};
1454
b3b94faa
DT
1455/**
1456 * examine_bucket - Call a function for glock in a hash bucket
1457 * @examiner: the function
1458 * @sdp: the filesystem
1459 * @bucket: the bucket
1460 *
b3b94faa
DT
1461 */
1462
bc015cb8 1463static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
37b2fa6a 1464 unsigned int hash)
b3b94faa 1465{
bc015cb8
SW
1466 struct gfs2_glock *gl;
1467 struct hlist_bl_head *head = &gl_hash_table[hash];
1468 struct hlist_bl_node *pos;
b3b94faa 1469
bc015cb8
SW
1470 rcu_read_lock();
1471 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
15562c43 1472 if ((gl->gl_name.ln_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
24264434 1473 examiner(gl);
b3b94faa 1474 }
bc015cb8 1475 rcu_read_unlock();
8fbbfd21 1476 cond_resched();
bc015cb8
SW
1477}
1478
1479static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1480{
1481 unsigned x;
1482
1483 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1484 examine_bucket(examiner, sdp, x);
b3b94faa
DT
1485}
1486
f057f6cd
SW
1487
1488/**
1489 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1490 * @gl: The glock to thaw
1491 *
f057f6cd
SW
1492 */
1493
1494static void thaw_glock(struct gfs2_glock *gl)
1495{
1496 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
7286b31e 1497 goto out;
f057f6cd 1498 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
7286b31e
SW
1499 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1500out:
f057f6cd 1501 gfs2_glock_put(gl);
7286b31e 1502 }
f057f6cd
SW
1503}
1504
b3b94faa
DT
1505/**
1506 * clear_glock - look at a glock and see if we can free it from glock cache
1507 * @gl: the glock to look at
1508 *
1509 */
1510
1511static void clear_glock(struct gfs2_glock *gl)
1512{
f42ab085 1513 gfs2_glock_remove_from_lru(gl);
b3b94faa 1514
6802e340 1515 spin_lock(&gl->gl_spin);
c741c455 1516 if (gl->gl_state != LM_ST_UNLOCKED)
81ffbf65 1517 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
6802e340 1518 spin_unlock(&gl->gl_spin);
6802e340
SW
1519 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1520 gfs2_glock_put(gl);
b3b94faa
DT
1521}
1522
f057f6cd
SW
1523/**
1524 * gfs2_glock_thaw - Thaw any frozen glocks
1525 * @sdp: The super block
1526 *
1527 */
1528
1529void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1530{
bc015cb8
SW
1531 glock_hash_walk(thaw_glock, sdp);
1532}
f057f6cd 1533
ac3beb6a 1534static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
bc015cb8 1535{
bc015cb8 1536 spin_lock(&gl->gl_spin);
ac3beb6a 1537 gfs2_dump_glock(seq, gl);
bc015cb8 1538 spin_unlock(&gl->gl_spin);
bc015cb8
SW
1539}
1540
1541static void dump_glock_func(struct gfs2_glock *gl)
1542{
1543 dump_glock(NULL, gl);
f057f6cd
SW
1544}
1545
b3b94faa
DT
1546/**
1547 * gfs2_gl_hash_clear - Empty out the glock hash table
1548 * @sdp: the filesystem
1549 * @wait: wait until it's all gone
1550 *
1bdad606 1551 * Called when unmounting the filesystem.
b3b94faa
DT
1552 */
1553
fefc03bf 1554void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
b3b94faa 1555{
fb6791d1 1556 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
222cb538 1557 flush_workqueue(glock_workqueue);
bc015cb8 1558 glock_hash_walk(clear_glock, sdp);
8f05228e
SW
1559 flush_workqueue(glock_workqueue);
1560 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
bc015cb8 1561 glock_hash_walk(dump_glock_func, sdp);
b3b94faa
DT
1562}
1563
813e0c46
SW
1564void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1565{
1566 struct gfs2_glock *gl = ip->i_gl;
1567 int ret;
1568
1569 ret = gfs2_truncatei_resume(ip);
15562c43 1570 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
813e0c46
SW
1571
1572 spin_lock(&gl->gl_spin);
1573 clear_bit(GLF_LOCK, &gl->gl_flags);
1574 run_queue(gl, 1);
1575 spin_unlock(&gl->gl_spin);
1576}
1577
6802e340 1578static const char *state2str(unsigned state)
04b933f2 1579{
6802e340
SW
1580 switch(state) {
1581 case LM_ST_UNLOCKED:
1582 return "UN";
1583 case LM_ST_SHARED:
1584 return "SH";
1585 case LM_ST_DEFERRED:
1586 return "DF";
1587 case LM_ST_EXCLUSIVE:
1588 return "EX";
1589 }
1590 return "??";
1591}
1592
1593static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1594{
1595 char *p = buf;
1596 if (flags & LM_FLAG_TRY)
1597 *p++ = 't';
1598 if (flags & LM_FLAG_TRY_1CB)
1599 *p++ = 'T';
1600 if (flags & LM_FLAG_NOEXP)
1601 *p++ = 'e';
1602 if (flags & LM_FLAG_ANY)
f057f6cd 1603 *p++ = 'A';
6802e340
SW
1604 if (flags & LM_FLAG_PRIORITY)
1605 *p++ = 'p';
1606 if (flags & GL_ASYNC)
1607 *p++ = 'a';
1608 if (flags & GL_EXACT)
1609 *p++ = 'E';
6802e340
SW
1610 if (flags & GL_NOCACHE)
1611 *p++ = 'c';
1612 if (test_bit(HIF_HOLDER, &iflags))
1613 *p++ = 'H';
1614 if (test_bit(HIF_WAIT, &iflags))
1615 *p++ = 'W';
1616 if (test_bit(HIF_FIRST, &iflags))
1617 *p++ = 'F';
1618 *p = 0;
1619 return buf;
04b933f2
RP
1620}
1621
b3b94faa
DT
1622/**
1623 * dump_holder - print information about a glock holder
6802e340 1624 * @seq: the seq_file struct
b3b94faa
DT
1625 * @gh: the glock holder
1626 *
b3b94faa
DT
1627 */
1628
ac3beb6a 1629static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
b3b94faa 1630{
6802e340 1631 struct task_struct *gh_owner = NULL;
6802e340 1632 char flags_buf[32];
b3b94faa 1633
0b3a2c99 1634 rcu_read_lock();
6802e340 1635 if (gh->gh_owner_pid)
b1e058da 1636 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
cc18152e
JP
1637 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1638 state2str(gh->gh_state),
1639 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1640 gh->gh_error,
1641 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1642 gh_owner ? gh_owner->comm : "(ended)",
1643 (void *)gh->gh_ip);
0b3a2c99 1644 rcu_read_unlock();
b3b94faa
DT
1645}
1646
627c10b7 1647static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
6802e340 1648{
627c10b7 1649 const unsigned long *gflags = &gl->gl_flags;
6802e340 1650 char *p = buf;
627c10b7 1651
6802e340
SW
1652 if (test_bit(GLF_LOCK, gflags))
1653 *p++ = 'l';
6802e340
SW
1654 if (test_bit(GLF_DEMOTE, gflags))
1655 *p++ = 'D';
1656 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1657 *p++ = 'd';
1658 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1659 *p++ = 'p';
1660 if (test_bit(GLF_DIRTY, gflags))
1661 *p++ = 'y';
1662 if (test_bit(GLF_LFLUSH, gflags))
1663 *p++ = 'f';
1664 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1665 *p++ = 'i';
1666 if (test_bit(GLF_REPLY_PENDING, gflags))
1667 *p++ = 'r';
f057f6cd 1668 if (test_bit(GLF_INITIAL, gflags))
d8348de0 1669 *p++ = 'I';
f057f6cd
SW
1670 if (test_bit(GLF_FROZEN, gflags))
1671 *p++ = 'F';
7b5e3d5f
SW
1672 if (test_bit(GLF_QUEUED, gflags))
1673 *p++ = 'q';
627c10b7
SW
1674 if (test_bit(GLF_LRU, gflags))
1675 *p++ = 'L';
1676 if (gl->gl_object)
1677 *p++ = 'o';
a245769f
SW
1678 if (test_bit(GLF_BLOCKING, gflags))
1679 *p++ = 'b';
6802e340
SW
1680 *p = 0;
1681 return buf;
b3b94faa
DT
1682}
1683
1684/**
8eae1ca0 1685 * gfs2_dump_glock - print information about a glock
6802e340 1686 * @seq: The seq_file struct
b3b94faa 1687 * @gl: the glock
6802e340
SW
1688 *
1689 * The file format is as follows:
1690 * One line per object, capital letters are used to indicate objects
1691 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1692 * other objects are indented by a single space and follow the glock to
1693 * which they are related. Fields are indicated by lower case letters
1694 * followed by a colon and the field value, except for strings which are in
1695 * [] so that its possible to see if they are composed of spaces for
1696 * example. The field's are n = number (id of the object), f = flags,
1697 * t = type, s = state, r = refcount, e = error, p = pid.
b3b94faa 1698 *
b3b94faa
DT
1699 */
1700
ac3beb6a 1701void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
b3b94faa 1702{
6802e340
SW
1703 const struct gfs2_glock_operations *glops = gl->gl_ops;
1704 unsigned long long dtime;
1705 const struct gfs2_holder *gh;
1706 char gflags_buf[32];
b3b94faa 1707
6802e340
SW
1708 dtime = jiffies - gl->gl_demote_time;
1709 dtime *= 1000000/HZ; /* demote time in uSec */
1710 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1711 dtime = 0;
7cf8dcd3 1712 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
6802e340
SW
1713 state2str(gl->gl_state),
1714 gl->gl_name.ln_type,
1715 (unsigned long long)gl->gl_name.ln_number,
627c10b7 1716 gflags2str(gflags_buf, gl),
6802e340
SW
1717 state2str(gl->gl_target),
1718 state2str(gl->gl_demote_state), dtime,
6802e340 1719 atomic_read(&gl->gl_ail_count),
f42ab085 1720 atomic_read(&gl->gl_revokes),
e66cf161 1721 (int)gl->gl_lockref.count, gl->gl_hold_time);
b3b94faa 1722
ac3beb6a
SW
1723 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1724 dump_holder(seq, gh);
1725
6802e340 1726 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
ac3beb6a 1727 glops->go_dump(seq, gl);
b3b94faa
DT
1728}
1729
a245769f
SW
1730static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1731{
1732 struct gfs2_glock *gl = iter_ptr;
1733
1734 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1735 gl->gl_name.ln_type,
1736 (unsigned long long)gl->gl_name.ln_number,
1737 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1738 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1739 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1741 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1742 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1743 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1744 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1745 return 0;
1746}
1747
1748static const char *gfs2_gltype[] = {
1749 "type",
1750 "reserved",
1751 "nondisk",
1752 "inode",
1753 "rgrp",
1754 "meta",
1755 "iopen",
1756 "flock",
1757 "plock",
1758 "quota",
1759 "journal",
1760};
1761
1762static const char *gfs2_stype[] = {
1763 [GFS2_LKS_SRTT] = "srtt",
1764 [GFS2_LKS_SRTTVAR] = "srttvar",
1765 [GFS2_LKS_SRTTB] = "srttb",
1766 [GFS2_LKS_SRTTVARB] = "srttvarb",
1767 [GFS2_LKS_SIRT] = "sirt",
1768 [GFS2_LKS_SIRTVAR] = "sirtvar",
1769 [GFS2_LKS_DCOUNT] = "dlm",
1770 [GFS2_LKS_QCOUNT] = "queue",
1771};
1772
1773#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1774
1775static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1776{
81648d04
AG
1777 struct gfs2_sbd *sdp = seq->private;
1778 loff_t pos = *(loff_t *)iter_ptr;
1779 unsigned index = pos >> 3;
1780 unsigned subindex = pos & 0x07;
a245769f
SW
1781 s64 value;
1782 int i;
1783
1784 if (index == 0 && subindex != 0)
1785 return 0;
6802e340 1786
a245769f
SW
1787 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1788 (index == 0) ? "cpu": gfs2_stype[subindex]);
b3b94faa 1789
a245769f
SW
1790 for_each_possible_cpu(i) {
1791 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1792 if (index == 0) {
1793 value = i;
1794 } else {
1795 value = lkstats->lkstats[index - 1].stats[subindex];
1796 }
1797 seq_printf(seq, " %15lld", (long long)value);
1798 }
1799 seq_putc(seq, '\n');
1800 return 0;
1801}
8fbbfd21 1802
85d1da67
SW
1803int __init gfs2_glock_init(void)
1804{
1805 unsigned i;
1806 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
bc015cb8 1807 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
087efdd3 1808 }
8fbbfd21 1809
d2115778 1810 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
58a69cb4 1811 WQ_HIGHPRI | WQ_FREEZABLE, 0);
dfc4616d
DC
1812 if (!glock_workqueue)
1813 return -ENOMEM;
d2115778 1814 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
58a69cb4 1815 WQ_MEM_RECLAIM | WQ_FREEZABLE,
d2115778 1816 0);
dfc4616d 1817 if (!gfs2_delete_workqueue) {
b94a170e 1818 destroy_workqueue(glock_workqueue);
dfc4616d 1819 return -ENOMEM;
b94a170e 1820 }
97cc1025
SW
1821
1822 register_shrinker(&glock_shrinker);
c4f68a13 1823
85d1da67
SW
1824 return 0;
1825}
1826
8fbbfd21
SW
1827void gfs2_glock_exit(void)
1828{
97cc1025 1829 unregister_shrinker(&glock_shrinker);
c4f68a13 1830 destroy_workqueue(glock_workqueue);
b94a170e 1831 destroy_workqueue(gfs2_delete_workqueue);
8fbbfd21
SW
1832}
1833
bc015cb8
SW
1834static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1835{
1836 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1837 struct gfs2_glock, gl_list);
1838}
1839
1840static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1841{
7e32d026 1842 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
bc015cb8
SW
1843 struct gfs2_glock, gl_list);
1844}
1845
6802e340 1846static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
7c52b166 1847{
7b08fc62
SW
1848 struct gfs2_glock *gl;
1849
bc015cb8
SW
1850 do {
1851 gl = gi->gl;
1852 if (gl) {
1853 gi->gl = glock_hash_next(gl);
ba1ddcb6 1854 gi->nhash++;
bc015cb8 1855 } else {
ba1ddcb6
SW
1856 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1857 rcu_read_unlock();
1858 return 1;
1859 }
bc015cb8 1860 gi->gl = glock_hash_chain(gi->hash);
ba1ddcb6 1861 gi->nhash = 0;
bc015cb8
SW
1862 }
1863 while (gi->gl == NULL) {
1864 gi->hash++;
1865 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1866 rcu_read_unlock();
1867 return 1;
1868 }
1869 gi->gl = glock_hash_chain(gi->hash);
ba1ddcb6 1870 gi->nhash = 0;
bc015cb8
SW
1871 }
1872 /* Skip entries for other sb and dead entries */
15562c43 1873 } while (gi->sdp != gi->gl->gl_name.ln_sbd ||
e3c4269d 1874 __lockref_is_dead(&gi->gl->gl_lockref));
a947e033 1875
7c52b166
RP
1876 return 0;
1877}
1878
6802e340 1879static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
7c52b166 1880{
6802e340 1881 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1882 loff_t n = *pos;
1883
ba1ddcb6
SW
1884 if (gi->last_pos <= *pos)
1885 n = gi->nhash + (*pos - gi->last_pos);
1886 else
1887 gi->hash = 0;
1888
1889 gi->nhash = 0;
bc015cb8 1890 rcu_read_lock();
7c52b166 1891
6802e340 1892 do {
bc015cb8 1893 if (gfs2_glock_iter_next(gi))
7c52b166 1894 return NULL;
6802e340 1895 } while (n--);
7c52b166 1896
ba1ddcb6 1897 gi->last_pos = *pos;
6802e340 1898 return gi->gl;
7c52b166
RP
1899}
1900
6802e340 1901static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
7c52b166
RP
1902 loff_t *pos)
1903{
6802e340 1904 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1905
1906 (*pos)++;
ba1ddcb6 1907 gi->last_pos = *pos;
bc015cb8 1908 if (gfs2_glock_iter_next(gi))
7c52b166 1909 return NULL;
7c52b166 1910
6802e340 1911 return gi->gl;
7c52b166
RP
1912}
1913
6802e340 1914static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
7c52b166 1915{
6802e340 1916 struct gfs2_glock_iter *gi = seq->private;
bc015cb8
SW
1917
1918 if (gi->gl)
1919 rcu_read_unlock();
1920 gi->gl = NULL;
7c52b166
RP
1921}
1922
6802e340 1923static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
7c52b166 1924{
ac3beb6a
SW
1925 dump_glock(seq, iter_ptr);
1926 return 0;
7c52b166
RP
1927}
1928
a245769f
SW
1929static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1930{
81648d04 1931 preempt_disable();
a245769f
SW
1932 if (*pos >= GFS2_NR_SBSTATS)
1933 return NULL;
81648d04 1934 return pos;
a245769f
SW
1935}
1936
1937static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1938 loff_t *pos)
1939{
a245769f 1940 (*pos)++;
81648d04 1941 if (*pos >= GFS2_NR_SBSTATS)
a245769f 1942 return NULL;
81648d04 1943 return pos;
a245769f
SW
1944}
1945
1946static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1947{
1948 preempt_enable();
1949}
1950
4ef29002 1951static const struct seq_operations gfs2_glock_seq_ops = {
7c52b166
RP
1952 .start = gfs2_glock_seq_start,
1953 .next = gfs2_glock_seq_next,
1954 .stop = gfs2_glock_seq_stop,
1955 .show = gfs2_glock_seq_show,
1956};
1957
a245769f
SW
1958static const struct seq_operations gfs2_glstats_seq_ops = {
1959 .start = gfs2_glock_seq_start,
1960 .next = gfs2_glock_seq_next,
1961 .stop = gfs2_glock_seq_stop,
1962 .show = gfs2_glstats_seq_show,
1963};
1964
1965static const struct seq_operations gfs2_sbstats_seq_ops = {
1966 .start = gfs2_sbstats_seq_start,
1967 .next = gfs2_sbstats_seq_next,
1968 .stop = gfs2_sbstats_seq_stop,
1969 .show = gfs2_sbstats_seq_show,
1970};
1971
0fe2f1e9
SW
1972#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1973
a245769f 1974static int gfs2_glocks_open(struct inode *inode, struct file *file)
7c52b166 1975{
6802e340
SW
1976 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1977 sizeof(struct gfs2_glock_iter));
1978 if (ret == 0) {
1979 struct seq_file *seq = file->private_data;
1980 struct gfs2_glock_iter *gi = seq->private;
1981 gi->sdp = inode->i_private;
0fe2f1e9 1982 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
df5d2f55 1983 if (seq->buf)
0fe2f1e9 1984 seq->size = GFS2_SEQ_GOODSIZE;
6802e340
SW
1985 }
1986 return ret;
7c52b166
RP
1987}
1988
a245769f
SW
1989static int gfs2_glstats_open(struct inode *inode, struct file *file)
1990{
1991 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1992 sizeof(struct gfs2_glock_iter));
1993 if (ret == 0) {
1994 struct seq_file *seq = file->private_data;
1995 struct gfs2_glock_iter *gi = seq->private;
1996 gi->sdp = inode->i_private;
0fe2f1e9 1997 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
df5d2f55 1998 if (seq->buf)
0fe2f1e9 1999 seq->size = GFS2_SEQ_GOODSIZE;
a245769f
SW
2000 }
2001 return ret;
2002}
2003
2004static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2005{
81648d04 2006 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
a245769f
SW
2007 if (ret == 0) {
2008 struct seq_file *seq = file->private_data;
81648d04 2009 seq->private = inode->i_private; /* sdp */
a245769f
SW
2010 }
2011 return ret;
2012}
2013
2014static const struct file_operations gfs2_glocks_fops = {
2015 .owner = THIS_MODULE,
2016 .open = gfs2_glocks_open,
2017 .read = seq_read,
2018 .llseek = seq_lseek,
2019 .release = seq_release_private,
2020};
2021
2022static const struct file_operations gfs2_glstats_fops = {
7c52b166 2023 .owner = THIS_MODULE,
a245769f
SW
2024 .open = gfs2_glstats_open,
2025 .read = seq_read,
2026 .llseek = seq_lseek,
2027 .release = seq_release_private,
2028};
2029
2030static const struct file_operations gfs2_sbstats_fops = {
2031 .owner = THIS_MODULE,
2032 .open = gfs2_sbstats_open,
7c52b166
RP
2033 .read = seq_read,
2034 .llseek = seq_lseek,
81648d04 2035 .release = seq_release,
7c52b166
RP
2036};
2037
2038int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2039{
7b4ddfa7
CS
2040 struct dentry *dent;
2041
2042 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2043 if (IS_ERR_OR_NULL(dent))
2044 goto fail;
2045 sdp->debugfs_dir = dent;
2046
2047 dent = debugfs_create_file("glocks",
2048 S_IFREG | S_IRUGO,
2049 sdp->debugfs_dir, sdp,
2050 &gfs2_glocks_fops);
2051 if (IS_ERR_OR_NULL(dent))
a245769f 2052 goto fail;
7b4ddfa7 2053 sdp->debugfs_dentry_glocks = dent;
a245769f 2054
7b4ddfa7
CS
2055 dent = debugfs_create_file("glstats",
2056 S_IFREG | S_IRUGO,
2057 sdp->debugfs_dir, sdp,
2058 &gfs2_glstats_fops);
2059 if (IS_ERR_OR_NULL(dent))
a245769f 2060 goto fail;
7b4ddfa7 2061 sdp->debugfs_dentry_glstats = dent;
a245769f 2062
7b4ddfa7
CS
2063 dent = debugfs_create_file("sbstats",
2064 S_IFREG | S_IRUGO,
2065 sdp->debugfs_dir, sdp,
2066 &gfs2_sbstats_fops);
2067 if (IS_ERR_OR_NULL(dent))
a245769f 2068 goto fail;
7b4ddfa7 2069 sdp->debugfs_dentry_sbstats = dent;
7c52b166
RP
2070
2071 return 0;
a245769f
SW
2072fail:
2073 gfs2_delete_debugfs_file(sdp);
7b4ddfa7 2074 return dent ? PTR_ERR(dent) : -ENOMEM;
7c52b166
RP
2075}
2076
2077void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2078{
a245769f 2079 if (sdp->debugfs_dir) {
5f882096
RP
2080 if (sdp->debugfs_dentry_glocks) {
2081 debugfs_remove(sdp->debugfs_dentry_glocks);
2082 sdp->debugfs_dentry_glocks = NULL;
2083 }
a245769f
SW
2084 if (sdp->debugfs_dentry_glstats) {
2085 debugfs_remove(sdp->debugfs_dentry_glstats);
2086 sdp->debugfs_dentry_glstats = NULL;
2087 }
2088 if (sdp->debugfs_dentry_sbstats) {
2089 debugfs_remove(sdp->debugfs_dentry_sbstats);
2090 sdp->debugfs_dentry_sbstats = NULL;
2091 }
5f882096
RP
2092 debugfs_remove(sdp->debugfs_dir);
2093 sdp->debugfs_dir = NULL;
2094 }
7c52b166
RP
2095}
2096
2097int gfs2_register_debugfs(void)
2098{
2099 gfs2_root = debugfs_create_dir("gfs2", NULL);
7b4ddfa7
CS
2100 if (IS_ERR(gfs2_root))
2101 return PTR_ERR(gfs2_root);
7c52b166
RP
2102 return gfs2_root ? 0 : -ENOMEM;
2103}
2104
2105void gfs2_unregister_debugfs(void)
2106{
2107 debugfs_remove(gfs2_root);
5f882096 2108 gfs2_root = NULL;
7c52b166 2109}
This page took 0.865524 seconds and 5 git commands to generate.