2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
23 #include "lm_interface.h"
35 #define buffer_busy(bh) \
36 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
37 #define buffer_in_io(bh) \
38 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
40 static int aspace_get_block(struct inode
*inode
, sector_t lblock
,
41 struct buffer_head
*bh_result
, int create
)
43 gfs2_assert_warn(inode
->i_sb
->s_fs_info
, 0);
47 static int gfs2_aspace_writepage(struct page
*page
,
48 struct writeback_control
*wbc
)
50 return block_write_full_page(page
, aspace_get_block
, wbc
);
54 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
55 * @bh: the buffer we're stuck on
59 static void stuck_releasepage(struct buffer_head
*bh
)
61 struct inode
*inode
= bh
->b_page
->mapping
->host
;
62 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
63 struct gfs2_bufdata
*bd
= bh
->b_private
;
64 struct gfs2_glock
*gl
;
66 fs_warn(sdp
, "stuck in gfs2_releasepage() %p\n", inode
);
67 fs_warn(sdp
, "blkno = %llu, bh->b_count = %d\n",
68 (unsigned long long)bh
->b_blocknr
, atomic_read(&bh
->b_count
));
69 fs_warn(sdp
, "pinned = %u\n", buffer_pinned(bh
));
70 fs_warn(sdp
, "bh->b_private = %s\n", (bd
) ? "!NULL" : "NULL");
77 fs_warn(sdp
, "gl = (%u, %llu)\n",
78 gl
->gl_name
.ln_type
, (unsigned long long)gl
->gl_name
.ln_number
);
80 fs_warn(sdp
, "bd_list_tr = %s, bd_le.le_list = %s\n",
81 (list_empty(&bd
->bd_list_tr
)) ? "no" : "yes",
82 (list_empty(&bd
->bd_le
.le_list
)) ? "no" : "yes");
84 if (gl
->gl_ops
== &gfs2_inode_glops
) {
85 struct gfs2_inode
*ip
= gl
->gl_object
;
91 fs_warn(sdp
, "ip = %llu %llu\n",
92 (unsigned long long)ip
->i_num
.no_formal_ino
,
93 (unsigned long long)ip
->i_num
.no_addr
);
94 fs_warn(sdp
, "ip->i_count = %d, ip->i_vnode = %s\n",
95 atomic_read(&ip
->i_count
),
96 (ip
->i_vnode
) ? "!NULL" : "NULL");
98 for (x
= 0; x
< GFS2_MAX_META_HEIGHT
; x
++)
99 fs_warn(sdp
, "ip->i_cache[%u] = %s\n",
100 x
, (ip
->i_cache
[x
]) ? "!NULL" : "NULL");
105 * gfs2_aspace_releasepage - free the metadata associated with a page
106 * @page: the page that's being released
107 * @gfp_mask: passed from Linux VFS, ignored by us
109 * Call try_to_free_buffers() if the buffers in this page can be
115 static int gfs2_aspace_releasepage(struct page
*page
, gfp_t gfp_mask
)
117 struct inode
*aspace
= page
->mapping
->host
;
118 struct gfs2_sbd
*sdp
= aspace
->i_sb
->s_fs_info
;
119 struct buffer_head
*bh
, *head
;
120 struct gfs2_bufdata
*bd
;
123 if (!page_has_buffers(page
))
126 head
= bh
= page_buffers(page
);
130 while (atomic_read(&bh
->b_count
)) {
131 if (atomic_read(&aspace
->i_writecount
)) {
132 if (time_after_eq(jiffies
, t
+
133 gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
134 stuck_releasepage(bh
);
145 gfs2_assert_warn(sdp
, !buffer_pinned(bh
));
149 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
150 gfs2_assert_warn(sdp
, list_empty(&bd
->bd_list_tr
));
151 gfs2_assert_warn(sdp
, list_empty(&bd
->bd_le
.le_list
));
152 gfs2_assert_warn(sdp
, !bd
->bd_ail
);
153 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
154 bh
->b_private
= NULL
;
157 bh
= bh
->b_this_page
;
162 return try_to_free_buffers(page
);
165 static struct address_space_operations aspace_aops
= {
166 .writepage
= gfs2_aspace_writepage
,
167 .releasepage
= gfs2_aspace_releasepage
,
171 * gfs2_aspace_get - Create and initialize a struct inode structure
172 * @sdp: the filesystem the aspace is in
174 * Right now a struct inode is just a struct inode. Maybe Linux
175 * will supply a more lightweight address space construct (that works)
178 * Make sure pages/buffers in this aspace aren't in high memory.
180 * Returns: the aspace
183 struct inode
*gfs2_aspace_get(struct gfs2_sbd
*sdp
)
185 struct inode
*aspace
;
187 aspace
= new_inode(sdp
->sd_vfs
);
189 mapping_set_gfp_mask(aspace
->i_mapping
, GFP_KERNEL
);
190 aspace
->i_mapping
->a_ops
= &aspace_aops
;
191 aspace
->i_size
= ~0ULL;
192 aspace
->u
.generic_ip
= NULL
;
193 insert_inode_hash(aspace
);
198 void gfs2_aspace_put(struct inode
*aspace
)
200 remove_inode_hash(aspace
);
205 * gfs2_ail1_start_one - Start I/O on a part of the AIL
206 * @sdp: the filesystem
207 * @tr: the part of the AIL
211 void gfs2_ail1_start_one(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
213 struct gfs2_bufdata
*bd
, *s
;
214 struct buffer_head
*bh
;
217 BUG_ON(!spin_is_locked(&sdp
->sd_log_lock
));
222 list_for_each_entry_safe_reverse(bd
, s
, &ai
->ai_ail1_list
,
226 gfs2_assert(sdp
, bd
->bd_ail
== ai
);
228 if (!buffer_busy(bh
)) {
229 if (!buffer_uptodate(bh
)) {
230 gfs2_log_unlock(sdp
);
231 gfs2_io_error_bh(sdp
, bh
);
234 list_move(&bd
->bd_ail_st_list
,
239 if (!buffer_dirty(bh
))
242 list_move(&bd
->bd_ail_st_list
, &ai
->ai_ail1_list
);
244 gfs2_log_unlock(sdp
);
246 ll_rw_block(WRITE
, 1, &bh
);
256 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
257 * @sdp: the filesystem
262 int gfs2_ail1_empty_one(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
, int flags
)
264 struct gfs2_bufdata
*bd
, *s
;
265 struct buffer_head
*bh
;
267 list_for_each_entry_safe_reverse(bd
, s
, &ai
->ai_ail1_list
,
271 gfs2_assert(sdp
, bd
->bd_ail
== ai
);
273 if (buffer_busy(bh
)) {
280 if (!buffer_uptodate(bh
))
281 gfs2_io_error_bh(sdp
, bh
);
283 list_move(&bd
->bd_ail_st_list
, &ai
->ai_ail2_list
);
286 return list_empty(&ai
->ai_ail1_list
);
290 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
291 * @sdp: the filesystem
296 void gfs2_ail2_empty_one(struct gfs2_sbd
*sdp
, struct gfs2_ail
*ai
)
298 struct list_head
*head
= &ai
->ai_ail2_list
;
299 struct gfs2_bufdata
*bd
;
301 while (!list_empty(head
)) {
302 bd
= list_entry(head
->prev
, struct gfs2_bufdata
,
304 gfs2_assert(sdp
, bd
->bd_ail
== ai
);
306 list_del(&bd
->bd_ail_st_list
);
307 list_del(&bd
->bd_ail_gl_list
);
308 atomic_dec(&bd
->bd_gl
->gl_ail_count
);
314 * ail_empty_gl - remove all buffers for a given lock from the AIL
317 * None of the buffers should be dirty, locked, or pinned.
320 void gfs2_ail_empty_gl(struct gfs2_glock
*gl
)
322 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
324 struct list_head
*head
= &gl
->gl_ail_list
;
325 struct gfs2_bufdata
*bd
;
326 struct buffer_head
*bh
;
330 blocks
= atomic_read(&gl
->gl_ail_count
);
334 error
= gfs2_trans_begin(sdp
, 0, blocks
);
335 if (gfs2_assert_withdraw(sdp
, !error
))
339 while (!list_empty(head
)) {
340 bd
= list_entry(head
->next
, struct gfs2_bufdata
,
343 blkno
= bh
->b_blocknr
;
344 gfs2_assert_withdraw(sdp
, !buffer_busy(bh
));
347 list_del(&bd
->bd_ail_st_list
);
348 list_del(&bd
->bd_ail_gl_list
);
349 atomic_dec(&gl
->gl_ail_count
);
351 gfs2_log_unlock(sdp
);
353 gfs2_trans_add_revoke(sdp
, blkno
);
357 gfs2_assert_withdraw(sdp
, !atomic_read(&gl
->gl_ail_count
));
358 gfs2_log_unlock(sdp
);
361 gfs2_log_flush(sdp
, NULL
);
365 * gfs2_meta_inval - Invalidate all buffers associated with a glock
370 void gfs2_meta_inval(struct gfs2_glock
*gl
)
372 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
373 struct inode
*aspace
= gl
->gl_aspace
;
374 struct address_space
*mapping
= gl
->gl_aspace
->i_mapping
;
376 gfs2_assert_withdraw(sdp
, !atomic_read(&gl
->gl_ail_count
));
378 atomic_inc(&aspace
->i_writecount
);
379 truncate_inode_pages(mapping
, 0);
380 atomic_dec(&aspace
->i_writecount
);
382 gfs2_assert_withdraw(sdp
, !mapping
->nrpages
);
386 * gfs2_meta_sync - Sync all buffers associated with a glock
388 * @flags: DIO_START | DIO_WAIT
392 void gfs2_meta_sync(struct gfs2_glock
*gl
, int flags
)
394 struct address_space
*mapping
= gl
->gl_aspace
->i_mapping
;
397 if (flags
& DIO_START
)
398 filemap_fdatawrite(mapping
);
399 if (!error
&& (flags
& DIO_WAIT
))
400 error
= filemap_fdatawait(mapping
);
403 gfs2_io_error(gl
->gl_sbd
);
407 * getbuf - Get a buffer with a given address space
408 * @sdp: the filesystem
409 * @aspace: the address space
410 * @blkno: the block number (filesystem scope)
411 * @create: 1 if the buffer should be created
413 * Returns: the buffer
416 static struct buffer_head
*getbuf(struct gfs2_sbd
*sdp
, struct inode
*aspace
,
417 uint64_t blkno
, int create
)
420 struct buffer_head
*bh
;
425 shift
= PAGE_CACHE_SHIFT
- sdp
->sd_sb
.sb_bsize_shift
;
426 index
= blkno
>> shift
; /* convert block to page */
427 bufnum
= blkno
- (index
<< shift
); /* block buf index within page */
431 page
= grab_cache_page(aspace
->i_mapping
, index
);
437 page
= find_lock_page(aspace
->i_mapping
, index
);
442 if (!page_has_buffers(page
))
443 create_empty_buffers(page
, sdp
->sd_sb
.sb_bsize
, 0);
445 /* Locate header for our buffer within our page */
446 for (bh
= page_buffers(page
); bufnum
--; bh
= bh
->b_this_page
)
450 if (!buffer_mapped(bh
))
451 map_bh(bh
, sdp
->sd_vfs
, blkno
);
454 mark_page_accessed(page
);
455 page_cache_release(page
);
460 static void meta_prep_new(struct buffer_head
*bh
)
462 struct gfs2_meta_header
*mh
= (struct gfs2_meta_header
*)bh
->b_data
;
465 clear_buffer_dirty(bh
);
466 set_buffer_uptodate(bh
);
469 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
473 * gfs2_meta_new - Get a block
474 * @gl: The glock associated with this block
475 * @blkno: The block number
477 * Returns: The buffer
480 struct buffer_head
*gfs2_meta_new(struct gfs2_glock
*gl
, uint64_t blkno
)
482 struct buffer_head
*bh
;
483 bh
= getbuf(gl
->gl_sbd
, gl
->gl_aspace
, blkno
, CREATE
);
489 * gfs2_meta_read - Read a block from disk
490 * @gl: The glock covering the block
491 * @blkno: The block number
492 * @flags: flags to gfs2_dreread()
493 * @bhp: the place where the buffer is returned (NULL on failure)
498 int gfs2_meta_read(struct gfs2_glock
*gl
, uint64_t blkno
, int flags
,
499 struct buffer_head
**bhp
)
503 *bhp
= getbuf(gl
->gl_sbd
, gl
->gl_aspace
, blkno
, CREATE
);
504 error
= gfs2_meta_reread(gl
->gl_sbd
, *bhp
, flags
);
512 * gfs2_meta_reread - Reread a block from disk
513 * @sdp: the filesystem
514 * @bh: The block to read
515 * @flags: Flags that control the read
520 int gfs2_meta_reread(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
, int flags
)
522 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
525 if (flags
& DIO_FORCE
)
526 clear_buffer_uptodate(bh
);
528 if ((flags
& DIO_START
) && !buffer_uptodate(bh
))
529 ll_rw_block(READ
, 1, &bh
);
531 if (flags
& DIO_WAIT
) {
534 if (!buffer_uptodate(bh
)) {
535 struct gfs2_trans
*tr
= current
->journal_info
;
536 if (tr
&& tr
->tr_touched
)
537 gfs2_io_error_bh(sdp
, bh
);
540 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
548 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
549 * @gl: the glock the buffer belongs to
550 * @bh: The buffer to be attached to
551 * @meta: Flag to indicate whether its metadata or not
554 void gfs2_attach_bufdata(struct gfs2_glock
*gl
, struct buffer_head
*bh
,
557 struct gfs2_bufdata
*bd
;
560 lock_page(bh
->b_page
);
564 unlock_page(bh
->b_page
);
568 bd
= kmem_cache_alloc(gfs2_bufdata_cachep
, GFP_NOFS
| __GFP_NOFAIL
),
569 memset(bd
, 0, sizeof(struct gfs2_bufdata
));
574 INIT_LIST_HEAD(&bd
->bd_list_tr
);
576 lops_init_le(&bd
->bd_le
, &gfs2_buf_lops
);
578 lops_init_le(&bd
->bd_le
, &gfs2_databuf_lops
);
584 unlock_page(bh
->b_page
);
588 * gfs2_pin - Pin a buffer in memory
589 * @sdp: the filesystem the buffer belongs to
590 * @bh: The buffer to be pinned
594 void gfs2_pin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
596 struct gfs2_bufdata
*bd
= bh
->b_private
;
598 gfs2_assert_withdraw(sdp
, test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
));
600 if (test_set_buffer_pinned(bh
))
601 gfs2_assert_withdraw(sdp
, 0);
605 /* If this buffer is in the AIL and it has already been written
606 to in-place disk block, remove it from the AIL. */
609 if (bd
->bd_ail
&& !buffer_in_io(bh
))
610 list_move(&bd
->bd_ail_st_list
, &bd
->bd_ail
->ai_ail2_list
);
611 gfs2_log_unlock(sdp
);
613 clear_buffer_dirty(bh
);
616 if (!buffer_uptodate(bh
))
617 gfs2_io_error_bh(sdp
, bh
);
623 * gfs2_unpin - Unpin a buffer
624 * @sdp: the filesystem the buffer belongs to
625 * @bh: The buffer to unpin
630 void gfs2_unpin(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
,
633 struct gfs2_bufdata
*bd
= bh
->b_private
;
635 gfs2_assert_withdraw(sdp
, buffer_uptodate(bh
));
637 if (!buffer_pinned(bh
))
638 gfs2_assert_withdraw(sdp
, 0);
640 mark_buffer_dirty(bh
);
641 clear_buffer_pinned(bh
);
645 list_del(&bd
->bd_ail_st_list
);
648 struct gfs2_glock
*gl
= bd
->bd_gl
;
649 list_add(&bd
->bd_ail_gl_list
, &gl
->gl_ail_list
);
650 atomic_inc(&gl
->gl_ail_count
);
653 list_add(&bd
->bd_ail_st_list
, &ai
->ai_ail1_list
);
654 gfs2_log_unlock(sdp
);
658 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
659 * @ip: the inode who owns the buffers
660 * @bstart: the first buffer in the run
661 * @blen: the number of buffers in the run
665 void gfs2_meta_wipe(struct gfs2_inode
*ip
, uint64_t bstart
, uint32_t blen
)
667 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
668 struct inode
*aspace
= ip
->i_gl
->gl_aspace
;
669 struct buffer_head
*bh
;
672 bh
= getbuf(sdp
, aspace
, bstart
, NO_CREATE
);
674 struct gfs2_bufdata
*bd
= bh
->b_private
;
676 if (test_clear_buffer_pinned(bh
)) {
677 struct gfs2_trans
*tr
= current
->journal_info
;
679 list_del_init(&bd
->bd_le
.le_list
);
680 gfs2_assert_warn(sdp
, sdp
->sd_log_num_buf
);
681 sdp
->sd_log_num_buf
--;
682 gfs2_log_unlock(sdp
);
689 uint64_t blkno
= bh
->b_blocknr
;
691 list_del(&bd
->bd_ail_st_list
);
692 list_del(&bd
->bd_ail_gl_list
);
693 atomic_dec(&bd
->bd_gl
->gl_ail_count
);
695 gfs2_log_unlock(sdp
);
696 gfs2_trans_add_revoke(sdp
, blkno
);
698 gfs2_log_unlock(sdp
);
702 clear_buffer_dirty(bh
);
703 clear_buffer_uptodate(bh
);
715 * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
716 * @ip: The GFS2 inode
718 * This releases buffers that are in the most-recently-used array of
719 * blocks used for indirect block addressing for this inode.
722 void gfs2_meta_cache_flush(struct gfs2_inode
*ip
)
724 struct buffer_head
**bh_slot
;
727 spin_lock(&ip
->i_spin
);
729 for (x
= 0; x
< GFS2_MAX_META_HEIGHT
; x
++) {
730 bh_slot
= &ip
->i_cache
[x
];
737 spin_unlock(&ip
->i_spin
);
741 * gfs2_meta_indirect_buffer - Get a metadata buffer
742 * @ip: The GFS2 inode
743 * @height: The level of this buf in the metadata (indir addr) tree (if any)
744 * @num: The block number (device relative) of the buffer
745 * @new: Non-zero if we may create a new buffer
746 * @bhp: the buffer is returned here
748 * Try to use the gfs2_inode's MRU metadata tree cache.
753 int gfs2_meta_indirect_buffer(struct gfs2_inode
*ip
, int height
, uint64_t num
,
754 int new, struct buffer_head
**bhp
)
756 struct buffer_head
*bh
, **bh_slot
= ip
->i_cache
+ height
;
759 spin_lock(&ip
->i_spin
);
762 if (bh
->b_blocknr
== num
)
767 spin_unlock(&ip
->i_spin
);
773 error
= gfs2_meta_reread(ip
->i_sbd
, bh
,
774 DIO_START
| DIO_WAIT
);
782 bh
= gfs2_meta_new(ip
->i_gl
, num
);
784 error
= gfs2_meta_read(ip
->i_gl
, num
,
785 DIO_START
| DIO_WAIT
, &bh
);
790 spin_lock(&ip
->i_spin
);
791 if (*bh_slot
!= bh
) {
796 spin_unlock(&ip
->i_spin
);
800 if (gfs2_assert_warn(ip
->i_sbd
, height
)) {
804 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
805 gfs2_metatype_set(bh
, GFS2_METATYPE_IN
, GFS2_FORMAT_IN
);
806 gfs2_buffer_clear_tail(bh
, sizeof(struct gfs2_meta_header
));
808 } else if (gfs2_metatype_check(ip
->i_sbd
, bh
,
809 (height
) ? GFS2_METATYPE_IN
: GFS2_METATYPE_DI
)) {
820 * gfs2_meta_ra - start readahead on an extent of a file
821 * @gl: the glock the blocks belong to
822 * @dblock: the starting disk block
823 * @extlen: the number of blocks in the extent
827 void gfs2_meta_ra(struct gfs2_glock
*gl
, uint64_t dblock
, uint32_t extlen
)
829 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
830 struct inode
*aspace
= gl
->gl_aspace
;
831 struct buffer_head
*first_bh
, *bh
;
832 uint32_t max_ra
= gfs2_tune_get(sdp
, gt_max_readahead
) >>
833 sdp
->sd_sb
.sb_bsize_shift
;
836 if (!extlen
|| !max_ra
)
841 first_bh
= getbuf(sdp
, aspace
, dblock
, CREATE
);
843 if (buffer_uptodate(first_bh
))
845 if (!buffer_locked(first_bh
)) {
846 error
= gfs2_meta_reread(sdp
, first_bh
, DIO_START
);
855 bh
= getbuf(sdp
, aspace
, dblock
, CREATE
);
857 if (!buffer_uptodate(bh
) && !buffer_locked(bh
)) {
858 error
= gfs2_meta_reread(sdp
, bh
, DIO_START
);
868 if (buffer_uptodate(first_bh
))
877 * gfs2_meta_syncfs - sync all the buffers in a filesystem
878 * @sdp: the filesystem
882 void gfs2_meta_syncfs(struct gfs2_sbd
*sdp
)
884 gfs2_log_flush(sdp
, NULL
);
886 gfs2_ail1_start(sdp
, DIO_ALL
);
887 if (gfs2_ail1_empty(sdp
, DIO_ALL
))