2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
34 * gfs2_tune_init - Fill a gfs2_tune structure with default values
39 void gfs2_tune_init(struct gfs2_tune
*gt
)
41 spin_lock_init(>
->gt_spin
);
44 gt
->gt_ilimit_tries
= 3;
45 gt
->gt_ilimit_min
= 1;
46 gt
->gt_demote_secs
= 300;
47 gt
->gt_incore_log_blocks
= 1024;
48 gt
->gt_log_flush_secs
= 60;
49 gt
->gt_jindex_refresh_secs
= 60;
50 gt
->gt_scand_secs
= 15;
51 gt
->gt_recoverd_secs
= 60;
53 gt
->gt_quotad_secs
= 5;
54 gt
->gt_inoded_secs
= 15;
55 gt
->gt_quota_simul_sync
= 64;
56 gt
->gt_quota_warn_period
= 10;
57 gt
->gt_quota_scale_num
= 1;
58 gt
->gt_quota_scale_den
= 1;
59 gt
->gt_quota_cache_secs
= 300;
60 gt
->gt_quota_quantum
= 60;
61 gt
->gt_atime_quantum
= 3600;
62 gt
->gt_new_files_jdata
= 0;
63 gt
->gt_new_files_directio
= 0;
64 gt
->gt_max_atomic_write
= 4 << 20;
65 gt
->gt_max_readahead
= 1 << 18;
66 gt
->gt_lockdump_size
= 131072;
67 gt
->gt_stall_secs
= 600;
68 gt
->gt_complain_secs
= 10;
69 gt
->gt_reclaim_limit
= 5000;
70 gt
->gt_entries_per_readdir
= 32;
71 gt
->gt_prefetch_secs
= 10;
72 gt
->gt_greedy_default
= HZ
/ 10;
73 gt
->gt_greedy_quantum
= HZ
/ 40;
74 gt
->gt_greedy_max
= HZ
/ 4;
75 gt
->gt_statfs_quantum
= 30;
76 gt
->gt_statfs_slow
= 0;
80 * gfs2_check_sb - Check superblock
81 * @sdp: the filesystem
83 * @silent: Don't print a message if the check fails
85 * Checks the version code of the FS is one that we understand how to
86 * read and that the sizes of the various on-disk structures have not
90 int gfs2_check_sb(struct gfs2_sbd
*sdp
, struct gfs2_sb
*sb
, int silent
)
94 if (sb
->sb_header
.mh_magic
!= GFS2_MAGIC
||
95 sb
->sb_header
.mh_type
!= GFS2_METATYPE_SB
) {
97 printk("GFS2: not a GFS2 filesystem\n");
101 /* If format numbers match exactly, we're done. */
103 if (sb
->sb_fs_format
== GFS2_FORMAT_FS
&&
104 sb
->sb_multihost_format
== GFS2_FORMAT_MULTI
)
107 if (sb
->sb_fs_format
!= GFS2_FORMAT_FS
) {
108 for (x
= 0; gfs2_old_fs_formats
[x
]; x
++)
109 if (gfs2_old_fs_formats
[x
] == sb
->sb_fs_format
)
112 if (!gfs2_old_fs_formats
[x
]) {
113 printk("GFS2: code version (%u, %u) is incompatible "
114 "with ondisk format (%u, %u)\n",
115 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
116 sb
->sb_fs_format
, sb
->sb_multihost_format
);
117 printk("GFS2: I don't know how to upgrade this FS\n");
122 if (sb
->sb_multihost_format
!= GFS2_FORMAT_MULTI
) {
123 for (x
= 0; gfs2_old_multihost_formats
[x
]; x
++)
124 if (gfs2_old_multihost_formats
[x
] == sb
->sb_multihost_format
)
127 if (!gfs2_old_multihost_formats
[x
]) {
128 printk("GFS2: code version (%u, %u) is incompatible "
129 "with ondisk format (%u, %u)\n",
130 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
131 sb
->sb_fs_format
, sb
->sb_multihost_format
);
132 printk("GFS2: I don't know how to upgrade this FS\n");
137 if (!sdp
->sd_args
.ar_upgrade
) {
138 printk("GFS2: code version (%u, %u) is incompatible "
139 "with ondisk format (%u, %u)\n",
140 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
141 sb
->sb_fs_format
, sb
->sb_multihost_format
);
142 printk("GFS2: Use the \"upgrade\" mount option to upgrade "
144 printk("GFS2: See the manual for more details\n");
152 * gfs2_read_sb - Read super block
153 * @sdp: The GFS2 superblock
154 * @gl: the glock for the superblock (assumed to be held)
155 * @silent: Don't print message if mount fails
159 int gfs2_read_sb(struct gfs2_sbd
*sdp
, struct gfs2_glock
*gl
, int silent
)
161 struct buffer_head
*bh
;
162 uint32_t hash_blocks
, ind_blocks
, leaf_blocks
;
167 error
= gfs2_meta_read(gl
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
,
168 DIO_FORCE
| DIO_START
| DIO_WAIT
, &bh
);
171 fs_err(sdp
, "can't read superblock\n");
175 gfs2_assert(sdp
, sizeof(struct gfs2_sb
) <= bh
->b_size
);
176 gfs2_sb_in(&sdp
->sd_sb
, bh
->b_data
);
179 error
= gfs2_check_sb(sdp
, &sdp
->sd_sb
, silent
);
183 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
184 GFS2_BASIC_BLOCK_SHIFT
;
185 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
186 sdp
->sd_diptrs
= (sdp
->sd_sb
.sb_bsize
-
187 sizeof(struct gfs2_dinode
)) / sizeof(uint64_t);
188 sdp
->sd_inptrs
= (sdp
->sd_sb
.sb_bsize
-
189 sizeof(struct gfs2_meta_header
)) / sizeof(uint64_t);
190 sdp
->sd_jbsize
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
);
191 sdp
->sd_hash_bsize
= sdp
->sd_sb
.sb_bsize
/ 2;
192 sdp
->sd_hash_bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
- 1;
193 sdp
->sd_hash_ptrs
= sdp
->sd_hash_bsize
/ sizeof(uint64_t);
194 sdp
->sd_ut_per_block
= (sdp
->sd_sb
.sb_bsize
-
195 sizeof(struct gfs2_meta_header
)) /
196 sizeof(struct gfs2_unlinked_tag
);
197 sdp
->sd_qc_per_block
= (sdp
->sd_sb
.sb_bsize
-
198 sizeof(struct gfs2_meta_header
)) /
199 sizeof(struct gfs2_quota_change
);
201 /* Compute maximum reservation required to add a entry to a directory */
203 hash_blocks
= DIV_RU(sizeof(uint64_t) * (1 << GFS2_DIR_MAX_DEPTH
),
207 for (tmp_blocks
= hash_blocks
; tmp_blocks
> sdp
->sd_diptrs
;) {
208 tmp_blocks
= DIV_RU(tmp_blocks
, sdp
->sd_inptrs
);
209 ind_blocks
+= tmp_blocks
;
212 leaf_blocks
= 2 + GFS2_DIR_MAX_DEPTH
;
214 sdp
->sd_max_dirres
= hash_blocks
+ ind_blocks
+ leaf_blocks
;
216 sdp
->sd_heightsize
[0] = sdp
->sd_sb
.sb_bsize
-
217 sizeof(struct gfs2_dinode
);
218 sdp
->sd_heightsize
[1] = sdp
->sd_sb
.sb_bsize
* sdp
->sd_diptrs
;
223 space
= sdp
->sd_heightsize
[x
- 1] * sdp
->sd_inptrs
;
225 m
= do_div(d
, sdp
->sd_inptrs
);
227 if (d
!= sdp
->sd_heightsize
[x
- 1] || m
)
229 sdp
->sd_heightsize
[x
] = space
;
231 sdp
->sd_max_height
= x
;
232 gfs2_assert(sdp
, sdp
->sd_max_height
<= GFS2_MAX_META_HEIGHT
);
234 sdp
->sd_jheightsize
[0] = sdp
->sd_sb
.sb_bsize
-
235 sizeof(struct gfs2_dinode
);
236 sdp
->sd_jheightsize
[1] = sdp
->sd_jbsize
* sdp
->sd_diptrs
;
241 space
= sdp
->sd_jheightsize
[x
- 1] * sdp
->sd_inptrs
;
243 m
= do_div(d
, sdp
->sd_inptrs
);
245 if (d
!= sdp
->sd_jheightsize
[x
- 1] || m
)
247 sdp
->sd_jheightsize
[x
] = space
;
249 sdp
->sd_max_jheight
= x
;
250 gfs2_assert(sdp
, sdp
->sd_max_jheight
<= GFS2_MAX_META_HEIGHT
);
255 int gfs2_do_upgrade(struct gfs2_sbd
*sdp
, struct gfs2_glock
*sb_gl
)
261 * gfs2_jindex_hold - Grab a lock on the jindex
262 * @sdp: The GFS2 superblock
263 * @ji_gh: the holder for the jindex glock
265 * This is very similar to the gfs2_rindex_hold() function, except that
266 * in general we hold the jindex lock for longer periods of time and
267 * we grab it far less frequently (in general) then the rgrp lock.
272 int gfs2_jindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ji_gh
)
274 struct gfs2_inode
*dip
= sdp
->sd_jindex
;
277 struct gfs2_jdesc
*jd
;
282 down(&sdp
->sd_jindex_mutex
);
285 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
,
286 GL_LOCAL_EXCL
, ji_gh
);
290 name
.len
= sprintf(buf
, "journal%u", sdp
->sd_journals
);
292 error
= gfs2_dir_search(sdp
->sd_jindex
, &name
, NULL
, NULL
);
293 if (error
== -ENOENT
) {
298 gfs2_glock_dq_uninit(ji_gh
);
304 jd
= kzalloc(sizeof(struct gfs2_jdesc
), GFP_KERNEL
);
308 error
= gfs2_lookupi(dip
, &name
, 1, &jd
->jd_inode
);
314 spin_lock(&sdp
->sd_jindex_spin
);
315 jd
->jd_jid
= sdp
->sd_journals
++;
316 list_add_tail(&jd
->jd_list
, &sdp
->sd_jindex_list
);
317 spin_unlock(&sdp
->sd_jindex_spin
);
320 up(&sdp
->sd_jindex_mutex
);
326 * gfs2_jindex_free - Clear all the journal index information
327 * @sdp: The GFS2 superblock
331 void gfs2_jindex_free(struct gfs2_sbd
*sdp
)
333 struct list_head list
;
334 struct gfs2_jdesc
*jd
;
336 spin_lock(&sdp
->sd_jindex_spin
);
337 list_add(&list
, &sdp
->sd_jindex_list
);
338 list_del_init(&sdp
->sd_jindex_list
);
339 sdp
->sd_journals
= 0;
340 spin_unlock(&sdp
->sd_jindex_spin
);
342 while (!list_empty(&list
)) {
343 jd
= list_entry(list
.next
, struct gfs2_jdesc
, jd_list
);
344 list_del(&jd
->jd_list
);
345 gfs2_inode_put(jd
->jd_inode
);
350 static struct gfs2_jdesc
*jdesc_find_i(struct list_head
*head
, unsigned int jid
)
352 struct gfs2_jdesc
*jd
;
355 list_for_each_entry(jd
, head
, jd_list
) {
356 if (jd
->jd_jid
== jid
) {
368 struct gfs2_jdesc
*gfs2_jdesc_find(struct gfs2_sbd
*sdp
, unsigned int jid
)
370 struct gfs2_jdesc
*jd
;
372 spin_lock(&sdp
->sd_jindex_spin
);
373 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
374 spin_unlock(&sdp
->sd_jindex_spin
);
379 void gfs2_jdesc_make_dirty(struct gfs2_sbd
*sdp
, unsigned int jid
)
381 struct gfs2_jdesc
*jd
;
383 spin_lock(&sdp
->sd_jindex_spin
);
384 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
387 spin_unlock(&sdp
->sd_jindex_spin
);
390 struct gfs2_jdesc
*gfs2_jdesc_find_dirty(struct gfs2_sbd
*sdp
)
392 struct gfs2_jdesc
*jd
;
395 spin_lock(&sdp
->sd_jindex_spin
);
397 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
404 spin_unlock(&sdp
->sd_jindex_spin
);
412 int gfs2_jdesc_check(struct gfs2_jdesc
*jd
)
414 struct gfs2_inode
*ip
= jd
->jd_inode
;
415 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
419 if (ip
->i_di
.di_size
< (8 << 20) ||
420 ip
->i_di
.di_size
> (1 << 30) ||
421 (ip
->i_di
.di_size
& (sdp
->sd_sb
.sb_bsize
- 1))) {
422 gfs2_consist_inode(ip
);
425 jd
->jd_blocks
= ip
->i_di
.di_size
>> sdp
->sd_sb
.sb_bsize_shift
;
427 error
= gfs2_write_alloc_required(ip
,
431 gfs2_consist_inode(ip
);
438 int gfs2_lookup_master_dir(struct gfs2_sbd
*sdp
)
440 struct gfs2_glock
*gl
;
443 error
= gfs2_glock_get(sdp
,
444 sdp
->sd_sb
.sb_master_dir
.no_addr
,
445 &gfs2_inode_glops
, CREATE
, &gl
);
447 error
= gfs2_inode_get(gl
, &sdp
->sd_sb
.sb_master_dir
, CREATE
,
448 &sdp
->sd_master_dir
);
456 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
457 * @sdp: the filesystem
462 int gfs2_make_fs_rw(struct gfs2_sbd
*sdp
)
464 struct gfs2_glock
*j_gl
= sdp
->sd_jdesc
->jd_inode
->i_gl
;
465 struct gfs2_holder t_gh
;
466 struct gfs2_log_header head
;
469 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
,
470 GL_LOCAL_EXCL
| GL_NEVER_RECURSE
, &t_gh
);
474 gfs2_meta_cache_flush(sdp
->sd_jdesc
->jd_inode
);
475 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
| DIO_DATA
);
477 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
481 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
487 /* Initialize some head of the log stuff */
488 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
489 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
491 error
= gfs2_unlinked_init(sdp
);
494 error
= gfs2_quota_init(sdp
);
498 set_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
500 gfs2_glock_dq_uninit(&t_gh
);
505 gfs2_unlinked_cleanup(sdp
);
508 t_gh
.gh_flags
|= GL_NOCACHE
;
509 gfs2_glock_dq_uninit(&t_gh
);
515 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
516 * @sdp: the filesystem
521 int gfs2_make_fs_ro(struct gfs2_sbd
*sdp
)
523 struct gfs2_holder t_gh
;
526 gfs2_unlinked_dealloc(sdp
);
527 gfs2_quota_sync(sdp
);
528 gfs2_statfs_sync(sdp
);
530 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
,
531 GL_LOCAL_EXCL
| GL_NEVER_RECURSE
| GL_NOCACHE
,
533 if (error
&& !test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
536 gfs2_meta_syncfs(sdp
);
537 gfs2_log_shutdown(sdp
);
539 clear_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
542 gfs2_glock_dq_uninit(&t_gh
);
544 gfs2_unlinked_cleanup(sdp
);
545 gfs2_quota_cleanup(sdp
);
550 int gfs2_statfs_init(struct gfs2_sbd
*sdp
)
552 struct gfs2_inode
*m_ip
= sdp
->sd_statfs_inode
;
553 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
554 struct gfs2_inode
*l_ip
= sdp
->sd_sc_inode
;
555 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
556 struct buffer_head
*m_bh
, *l_bh
;
557 struct gfs2_holder gh
;
560 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
565 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
569 if (sdp
->sd_args
.ar_spectator
) {
570 spin_lock(&sdp
->sd_statfs_spin
);
571 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
572 sizeof(struct gfs2_dinode
));
573 spin_unlock(&sdp
->sd_statfs_spin
);
575 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
579 spin_lock(&sdp
->sd_statfs_spin
);
580 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
581 sizeof(struct gfs2_dinode
));
582 gfs2_statfs_change_in(l_sc
, l_bh
->b_data
+
583 sizeof(struct gfs2_dinode
));
584 spin_unlock(&sdp
->sd_statfs_spin
);
593 gfs2_glock_dq_uninit(&gh
);
598 void gfs2_statfs_change(struct gfs2_sbd
*sdp
, int64_t total
, int64_t free
,
601 struct gfs2_inode
*l_ip
= sdp
->sd_sc_inode
;
602 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
603 struct buffer_head
*l_bh
;
606 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
610 down(&sdp
->sd_statfs_mutex
);
611 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
);
612 up(&sdp
->sd_statfs_mutex
);
614 spin_lock(&sdp
->sd_statfs_spin
);
615 l_sc
->sc_total
+= total
;
616 l_sc
->sc_free
+= free
;
617 l_sc
->sc_dinodes
+= dinodes
;
618 gfs2_statfs_change_out(l_sc
, l_bh
->b_data
+
619 sizeof(struct gfs2_dinode
));
620 spin_unlock(&sdp
->sd_statfs_spin
);
625 int gfs2_statfs_sync(struct gfs2_sbd
*sdp
)
627 struct gfs2_inode
*m_ip
= sdp
->sd_statfs_inode
;
628 struct gfs2_inode
*l_ip
= sdp
->sd_sc_inode
;
629 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
630 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
631 struct gfs2_holder gh
;
632 struct buffer_head
*m_bh
, *l_bh
;
635 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
640 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
644 spin_lock(&sdp
->sd_statfs_spin
);
645 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
646 sizeof(struct gfs2_dinode
));
647 if (!l_sc
->sc_total
&& !l_sc
->sc_free
&& !l_sc
->sc_dinodes
) {
648 spin_unlock(&sdp
->sd_statfs_spin
);
651 spin_unlock(&sdp
->sd_statfs_spin
);
653 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
657 error
= gfs2_trans_begin(sdp
, 2 * RES_DINODE
, 0);
661 down(&sdp
->sd_statfs_mutex
);
662 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
);
663 up(&sdp
->sd_statfs_mutex
);
665 spin_lock(&sdp
->sd_statfs_spin
);
666 m_sc
->sc_total
+= l_sc
->sc_total
;
667 m_sc
->sc_free
+= l_sc
->sc_free
;
668 m_sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
669 memset(l_sc
, 0, sizeof(struct gfs2_statfs_change
));
670 memset(l_bh
->b_data
+ sizeof(struct gfs2_dinode
),
671 0, sizeof(struct gfs2_statfs_change
));
672 spin_unlock(&sdp
->sd_statfs_spin
);
674 gfs2_trans_add_bh(m_ip
->i_gl
, m_bh
);
675 gfs2_statfs_change_out(m_sc
, m_bh
->b_data
+ sizeof(struct gfs2_dinode
));
686 gfs2_glock_dq_uninit(&gh
);
692 * gfs2_statfs_i - Do a statfs
693 * @sdp: the filesystem
694 * @sg: the sg structure
699 int gfs2_statfs_i(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change
*sc
)
701 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
702 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
704 spin_lock(&sdp
->sd_statfs_spin
);
707 sc
->sc_total
+= l_sc
->sc_total
;
708 sc
->sc_free
+= l_sc
->sc_free
;
709 sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
711 spin_unlock(&sdp
->sd_statfs_spin
);
715 if (sc
->sc_free
> sc
->sc_total
)
716 sc
->sc_free
= sc
->sc_total
;
717 if (sc
->sc_dinodes
< 0)
724 * statfs_fill - fill in the sg for a given RG
726 * @sc: the sc structure
728 * Returns: 0 on success, -ESTALE if the LVB is invalid
731 static int statfs_slow_fill(struct gfs2_rgrpd
*rgd
,
732 struct gfs2_statfs_change
*sc
)
734 gfs2_rgrp_verify(rgd
);
735 sc
->sc_total
+= rgd
->rd_ri
.ri_data
;
736 sc
->sc_free
+= rgd
->rd_rg
.rg_free
;
737 sc
->sc_dinodes
+= rgd
->rd_rg
.rg_dinodes
;
742 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
743 * @sdp: the filesystem
744 * @sc: the sc info that will be returned
746 * Any error (other than a signal) will cause this routine to fall back
747 * to the synchronous version.
749 * FIXME: This really shouldn't busy wait like this.
754 int gfs2_statfs_slow(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change
*sc
)
756 struct gfs2_holder ri_gh
;
757 struct gfs2_rgrpd
*rgd_next
;
758 struct gfs2_holder
*gha
, *gh
;
759 unsigned int slots
= 64;
764 memset(sc
, 0, sizeof(struct gfs2_statfs_change
));
765 gha
= kcalloc(slots
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
769 error
= gfs2_rindex_hold(sdp
, &ri_gh
);
773 rgd_next
= gfs2_rgrpd_get_first(sdp
);
778 for (x
= 0; x
< slots
; x
++) {
781 if (gh
->gh_gl
&& gfs2_glock_poll(gh
)) {
782 err
= gfs2_glock_wait(gh
);
784 gfs2_holder_uninit(gh
);
788 error
= statfs_slow_fill(get_gl2rgd(gh
->gh_gl
), sc
);
789 gfs2_glock_dq_uninit(gh
);
795 else if (rgd_next
&& !error
) {
796 error
= gfs2_glock_nq_init(rgd_next
->rd_gl
,
800 rgd_next
= gfs2_rgrpd_get_next(rgd_next
);
804 if (signal_pending(current
))
805 error
= -ERESTARTSYS
;
814 gfs2_glock_dq_uninit(&ri_gh
);
823 struct list_head list
;
824 struct gfs2_holder gh
;
828 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
830 * @sdp: the file system
831 * @state: the state to put the transaction lock into
832 * @t_gh: the hold on the transaction lock
837 int gfs2_lock_fs_check_clean(struct gfs2_sbd
*sdp
, struct gfs2_holder
*t_gh
)
839 struct gfs2_holder ji_gh
;
840 struct gfs2_jdesc
*jd
;
843 struct gfs2_log_header lh
;
846 error
= gfs2_jindex_hold(sdp
, &ji_gh
);
850 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
851 lfcc
= kmalloc(sizeof(struct lfcc
), GFP_KERNEL
);
856 error
= gfs2_glock_nq_init(jd
->jd_inode
->i_gl
, LM_ST_SHARED
, 0,
862 list_add(&lfcc
->list
, &list
);
865 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_DEFERRED
,
866 LM_FLAG_PRIORITY
| GL_NEVER_RECURSE
| GL_NOCACHE
,
869 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
870 error
= gfs2_jdesc_check(jd
);
873 error
= gfs2_find_jhead(jd
, &lh
);
876 if (!(lh
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
883 gfs2_glock_dq_uninit(t_gh
);
886 while (!list_empty(&list
)) {
887 lfcc
= list_entry(list
.next
, struct lfcc
, list
);
888 list_del(&lfcc
->list
);
889 gfs2_glock_dq_uninit(&lfcc
->gh
);
892 gfs2_glock_dq_uninit(&ji_gh
);
898 * gfs2_freeze_fs - freezes the file system
899 * @sdp: the file system
901 * This function flushes data and meta data for all machines by
902 * aquiring the transaction log exclusively. All journals are
903 * ensured to be in a clean state as well.
908 int gfs2_freeze_fs(struct gfs2_sbd
*sdp
)
912 down(&sdp
->sd_freeze_lock
);
914 if (!sdp
->sd_freeze_count
++) {
915 error
= gfs2_lock_fs_check_clean(sdp
, &sdp
->sd_freeze_gh
);
917 sdp
->sd_freeze_count
--;
920 up(&sdp
->sd_freeze_lock
);
926 * gfs2_unfreeze_fs - unfreezes the file system
927 * @sdp: the file system
929 * This function allows the file system to proceed by unlocking
930 * the exclusively held transaction lock. Other GFS2 nodes are
931 * now free to acquire the lock shared and go on with their lives.
935 void gfs2_unfreeze_fs(struct gfs2_sbd
*sdp
)
937 down(&sdp
->sd_freeze_lock
);
939 if (sdp
->sd_freeze_count
&& !--sdp
->sd_freeze_count
)
940 gfs2_glock_dq_uninit(&sdp
->sd_freeze_gh
);
942 up(&sdp
->sd_freeze_lock
);