4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/capability.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/task_io_accounting_ops.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head
*bh
, bh_end_io_t
*handler
, void *private)
53 bh
->b_end_io
= handler
;
54 bh
->b_private
= private;
57 static int sync_buffer(void *word
)
59 struct block_device
*bd
;
60 struct buffer_head
*bh
61 = container_of(word
, struct buffer_head
, b_state
);
66 blk_run_address_space(bd
->bd_inode
->i_mapping
);
71 void fastcall
__lock_buffer(struct buffer_head
*bh
)
73 wait_on_bit_lock(&bh
->b_state
, BH_Lock
, sync_buffer
,
74 TASK_UNINTERRUPTIBLE
);
76 EXPORT_SYMBOL(__lock_buffer
);
78 void fastcall
unlock_buffer(struct buffer_head
*bh
)
80 smp_mb__before_clear_bit();
81 clear_buffer_locked(bh
);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh
->b_state
, BH_Lock
);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head
* bh
)
93 wait_on_bit(&bh
->b_state
, BH_Lock
, sync_buffer
, TASK_UNINTERRUPTIBLE
);
97 __clear_page_buffers(struct page
*page
)
99 ClearPagePrivate(page
);
100 set_page_private(page
, 0);
101 page_cache_release(page
);
104 static void buffer_io_error(struct buffer_head
*bh
)
106 char b
[BDEVNAME_SIZE
];
108 printk(KERN_ERR
"Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh
->b_bdev
, b
),
110 (unsigned long long)bh
->b_blocknr
);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
)
120 set_buffer_uptodate(bh
);
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh
);
129 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
131 char b
[BDEVNAME_SIZE
];
134 set_buffer_uptodate(bh
);
136 if (!buffer_eopnotsupp(bh
) && printk_ratelimit()) {
138 printk(KERN_WARNING
"lost page write due to "
140 bdevname(bh
->b_bdev
, b
));
142 set_buffer_write_io_error(bh
);
143 clear_buffer_uptodate(bh
);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device
*bdev
)
158 ret
= filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
161 EXPORT_SYMBOL(sync_blockdev
);
164 * Write out and wait upon all dirty data associated with this
165 * device. Filesystem data as well as the underlying block
166 * device. Takes the superblock lock.
168 int fsync_bdev(struct block_device
*bdev
)
170 struct super_block
*sb
= get_super(bdev
);
172 int res
= fsync_super(sb
);
176 return sync_blockdev(bdev
);
180 * freeze_bdev -- lock a filesystem and force it into a consistent state
181 * @bdev: blockdevice to lock
183 * This takes the block device bd_mount_sem to make sure no new mounts
184 * happen on bdev until thaw_bdev() is called.
185 * If a superblock is found on this device, we take the s_umount semaphore
186 * on it to make sure nobody unmounts until the snapshot creation is done.
188 struct super_block
*freeze_bdev(struct block_device
*bdev
)
190 struct super_block
*sb
;
192 down(&bdev
->bd_mount_sem
);
193 sb
= get_super(bdev
);
194 if (sb
&& !(sb
->s_flags
& MS_RDONLY
)) {
195 sb
->s_frozen
= SB_FREEZE_WRITE
;
200 sb
->s_frozen
= SB_FREEZE_TRANS
;
203 sync_blockdev(sb
->s_bdev
);
205 if (sb
->s_op
->write_super_lockfs
)
206 sb
->s_op
->write_super_lockfs(sb
);
210 return sb
; /* thaw_bdev releases s->s_umount and bd_mount_sem */
212 EXPORT_SYMBOL(freeze_bdev
);
215 * thaw_bdev -- unlock filesystem
216 * @bdev: blockdevice to unlock
217 * @sb: associated superblock
219 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
221 void thaw_bdev(struct block_device
*bdev
, struct super_block
*sb
)
224 BUG_ON(sb
->s_bdev
!= bdev
);
226 if (sb
->s_op
->unlockfs
)
227 sb
->s_op
->unlockfs(sb
);
228 sb
->s_frozen
= SB_UNFROZEN
;
230 wake_up(&sb
->s_wait_unfrozen
);
234 up(&bdev
->bd_mount_sem
);
236 EXPORT_SYMBOL(thaw_bdev
);
239 * Various filesystems appear to want __find_get_block to be non-blocking.
240 * But it's the page lock which protects the buffers. To get around this,
241 * we get exclusion from try_to_free_buffers with the blockdev mapping's
244 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
245 * may be quite high. This code could TryLock the page, and if that
246 * succeeds, there is no need to take private_lock. (But if
247 * private_lock is contended then so is mapping->tree_lock).
249 static struct buffer_head
*
250 __find_get_block_slow(struct block_device
*bdev
, sector_t block
)
252 struct inode
*bd_inode
= bdev
->bd_inode
;
253 struct address_space
*bd_mapping
= bd_inode
->i_mapping
;
254 struct buffer_head
*ret
= NULL
;
256 struct buffer_head
*bh
;
257 struct buffer_head
*head
;
261 index
= block
>> (PAGE_CACHE_SHIFT
- bd_inode
->i_blkbits
);
262 page
= find_get_page(bd_mapping
, index
);
266 spin_lock(&bd_mapping
->private_lock
);
267 if (!page_has_buffers(page
))
269 head
= page_buffers(page
);
272 if (bh
->b_blocknr
== block
) {
277 if (!buffer_mapped(bh
))
279 bh
= bh
->b_this_page
;
280 } while (bh
!= head
);
282 /* we might be here because some of the buffers on this page are
283 * not mapped. This is due to various races between
284 * file io on the block device and getblk. It gets dealt with
285 * elsewhere, don't buffer_error if we had some unmapped buffers
288 printk("__find_get_block_slow() failed. "
289 "block=%llu, b_blocknr=%llu\n",
290 (unsigned long long)block
,
291 (unsigned long long)bh
->b_blocknr
);
292 printk("b_state=0x%08lx, b_size=%zu\n",
293 bh
->b_state
, bh
->b_size
);
294 printk("device blocksize: %d\n", 1 << bd_inode
->i_blkbits
);
297 spin_unlock(&bd_mapping
->private_lock
);
298 page_cache_release(page
);
303 /* If invalidate_buffers() will trash dirty buffers, it means some kind
304 of fs corruption is going on. Trashing dirty data always imply losing
305 information that was supposed to be just stored on the physical layer
308 Thus invalidate_buffers in general usage is not allwowed to trash
309 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
310 be preserved. These buffers are simply skipped.
312 We also skip buffers which are still in use. For example this can
313 happen if a userspace program is reading the block device.
315 NOTE: In the case where the user removed a removable-media-disk even if
316 there's still dirty data not synced on disk (due a bug in the device driver
317 or due an error of the user), by not destroying the dirty buffers we could
318 generate corruption also on the next media inserted, thus a parameter is
319 necessary to handle this case in the most safe way possible (trying
320 to not corrupt also the new disk inserted with the data belonging to
321 the old now corrupted disk). Also for the ramdisk the natural thing
322 to do in order to release the ramdisk memory is to destroy dirty buffers.
324 These are two special cases. Normal usage imply the device driver
325 to issue a sync on the device (without waiting I/O completion) and
326 then an invalidate_buffers call that doesn't trash dirty buffers.
328 For handling cache coherency with the blkdev pagecache the 'update' case
329 is been introduced. It is needed to re-read from disk any pinned
330 buffer. NOTE: re-reading from disk is destructive so we can do it only
331 when we assume nobody is changing the buffercache under our I/O and when
332 we think the disk contains more recent information than the buffercache.
333 The update == 1 pass marks the buffers we need to update, the update == 2
334 pass does the actual I/O. */
335 void invalidate_bdev(struct block_device
*bdev
)
337 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
339 if (mapping
->nrpages
== 0)
342 invalidate_bh_lrus();
343 invalidate_mapping_pages(mapping
, 0, -1);
347 * Kick pdflush then try to free up some ZONE_NORMAL memory.
349 static void free_more_memory(void)
354 wakeup_pdflush(1024);
357 for_each_online_pgdat(pgdat
) {
358 zones
= pgdat
->node_zonelists
[gfp_zone(GFP_NOFS
)].zones
;
360 try_to_free_pages(zones
, GFP_NOFS
);
365 * I/O completion handler for block_read_full_page() - pages
366 * which come unlocked at the end of I/O.
368 static void end_buffer_async_read(struct buffer_head
*bh
, int uptodate
)
371 struct buffer_head
*first
;
372 struct buffer_head
*tmp
;
374 int page_uptodate
= 1;
376 BUG_ON(!buffer_async_read(bh
));
380 set_buffer_uptodate(bh
);
382 clear_buffer_uptodate(bh
);
383 if (printk_ratelimit())
389 * Be _very_ careful from here on. Bad things can happen if
390 * two buffer heads end IO at almost the same time and both
391 * decide that the page is now completely done.
393 first
= page_buffers(page
);
394 local_irq_save(flags
);
395 bit_spin_lock(BH_Uptodate_Lock
, &first
->b_state
);
396 clear_buffer_async_read(bh
);
400 if (!buffer_uptodate(tmp
))
402 if (buffer_async_read(tmp
)) {
403 BUG_ON(!buffer_locked(tmp
));
406 tmp
= tmp
->b_this_page
;
408 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
409 local_irq_restore(flags
);
412 * If none of the buffers had errors and they are all
413 * uptodate then we can set the page uptodate.
415 if (page_uptodate
&& !PageError(page
))
416 SetPageUptodate(page
);
421 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
422 local_irq_restore(flags
);
427 * Completion handler for block_write_full_page() - pages which are unlocked
428 * during I/O, and which have PageWriteback cleared upon I/O completion.
430 static void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
)
432 char b
[BDEVNAME_SIZE
];
434 struct buffer_head
*first
;
435 struct buffer_head
*tmp
;
438 BUG_ON(!buffer_async_write(bh
));
442 set_buffer_uptodate(bh
);
444 if (printk_ratelimit()) {
446 printk(KERN_WARNING
"lost page write due to "
448 bdevname(bh
->b_bdev
, b
));
450 set_bit(AS_EIO
, &page
->mapping
->flags
);
451 set_buffer_write_io_error(bh
);
452 clear_buffer_uptodate(bh
);
456 first
= page_buffers(page
);
457 local_irq_save(flags
);
458 bit_spin_lock(BH_Uptodate_Lock
, &first
->b_state
);
460 clear_buffer_async_write(bh
);
462 tmp
= bh
->b_this_page
;
464 if (buffer_async_write(tmp
)) {
465 BUG_ON(!buffer_locked(tmp
));
468 tmp
= tmp
->b_this_page
;
470 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
471 local_irq_restore(flags
);
472 end_page_writeback(page
);
476 bit_spin_unlock(BH_Uptodate_Lock
, &first
->b_state
);
477 local_irq_restore(flags
);
482 * If a page's buffers are under async readin (end_buffer_async_read
483 * completion) then there is a possibility that another thread of
484 * control could lock one of the buffers after it has completed
485 * but while some of the other buffers have not completed. This
486 * locked buffer would confuse end_buffer_async_read() into not unlocking
487 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
488 * that this buffer is not under async I/O.
490 * The page comes unlocked when it has no locked buffer_async buffers
493 * PageLocked prevents anyone starting new async I/O reads any of
496 * PageWriteback is used to prevent simultaneous writeout of the same
499 * PageLocked prevents anyone from starting writeback of a page which is
500 * under read I/O (PageWriteback is only ever set against a locked page).
502 static void mark_buffer_async_read(struct buffer_head
*bh
)
504 bh
->b_end_io
= end_buffer_async_read
;
505 set_buffer_async_read(bh
);
508 void mark_buffer_async_write(struct buffer_head
*bh
)
510 bh
->b_end_io
= end_buffer_async_write
;
511 set_buffer_async_write(bh
);
513 EXPORT_SYMBOL(mark_buffer_async_write
);
517 * fs/buffer.c contains helper functions for buffer-backed address space's
518 * fsync functions. A common requirement for buffer-based filesystems is
519 * that certain data from the backing blockdev needs to be written out for
520 * a successful fsync(). For example, ext2 indirect blocks need to be
521 * written back and waited upon before fsync() returns.
523 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
524 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
525 * management of a list of dependent buffers at ->i_mapping->private_list.
527 * Locking is a little subtle: try_to_free_buffers() will remove buffers
528 * from their controlling inode's queue when they are being freed. But
529 * try_to_free_buffers() will be operating against the *blockdev* mapping
530 * at the time, not against the S_ISREG file which depends on those buffers.
531 * So the locking for private_list is via the private_lock in the address_space
532 * which backs the buffers. Which is different from the address_space
533 * against which the buffers are listed. So for a particular address_space,
534 * mapping->private_lock does *not* protect mapping->private_list! In fact,
535 * mapping->private_list will always be protected by the backing blockdev's
538 * Which introduces a requirement: all buffers on an address_space's
539 * ->private_list must be from the same address_space: the blockdev's.
541 * address_spaces which do not place buffers at ->private_list via these
542 * utility functions are free to use private_lock and private_list for
543 * whatever they want. The only requirement is that list_empty(private_list)
544 * be true at clear_inode() time.
546 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
547 * filesystems should do that. invalidate_inode_buffers() should just go
548 * BUG_ON(!list_empty).
550 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
551 * take an address_space, not an inode. And it should be called
552 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
555 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
556 * list if it is already on a list. Because if the buffer is on a list,
557 * it *must* already be on the right one. If not, the filesystem is being
558 * silly. This will save a ton of locking. But first we have to ensure
559 * that buffers are taken *off* the old inode's list when they are freed
560 * (presumably in truncate). That requires careful auditing of all
561 * filesystems (do it inside bforget()). It could also be done by bringing
566 * The buffer's backing address_space's private_lock must be held
568 static inline void __remove_assoc_queue(struct buffer_head
*bh
)
570 list_del_init(&bh
->b_assoc_buffers
);
571 WARN_ON(!bh
->b_assoc_map
);
572 if (buffer_write_io_error(bh
))
573 set_bit(AS_EIO
, &bh
->b_assoc_map
->flags
);
574 bh
->b_assoc_map
= NULL
;
577 int inode_has_buffers(struct inode
*inode
)
579 return !list_empty(&inode
->i_data
.private_list
);
583 * osync is designed to support O_SYNC io. It waits synchronously for
584 * all already-submitted IO to complete, but does not queue any new
585 * writes to the disk.
587 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
588 * you dirty the buffers, and then use osync_inode_buffers to wait for
589 * completion. Any other dirty buffers which are not yet queued for
590 * write will not be flushed to disk by the osync.
592 static int osync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
594 struct buffer_head
*bh
;
600 list_for_each_prev(p
, list
) {
602 if (buffer_locked(bh
)) {
606 if (!buffer_uptodate(bh
))
618 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
620 * @mapping: the mapping which wants those buffers written
622 * Starts I/O against the buffers at mapping->private_list, and waits upon
625 * Basically, this is a convenience function for fsync().
626 * @mapping is a file or directory which needs those buffers to be written for
627 * a successful fsync().
629 int sync_mapping_buffers(struct address_space
*mapping
)
631 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
633 if (buffer_mapping
== NULL
|| list_empty(&mapping
->private_list
))
636 return fsync_buffers_list(&buffer_mapping
->private_lock
,
637 &mapping
->private_list
);
639 EXPORT_SYMBOL(sync_mapping_buffers
);
642 * Called when we've recently written block `bblock', and it is known that
643 * `bblock' was for a buffer_boundary() buffer. This means that the block at
644 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
645 * dirty, schedule it for IO. So that indirects merge nicely with their data.
647 void write_boundary_block(struct block_device
*bdev
,
648 sector_t bblock
, unsigned blocksize
)
650 struct buffer_head
*bh
= __find_get_block(bdev
, bblock
+ 1, blocksize
);
652 if (buffer_dirty(bh
))
653 ll_rw_block(WRITE
, 1, &bh
);
658 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
)
660 struct address_space
*mapping
= inode
->i_mapping
;
661 struct address_space
*buffer_mapping
= bh
->b_page
->mapping
;
663 mark_buffer_dirty(bh
);
664 if (!mapping
->assoc_mapping
) {
665 mapping
->assoc_mapping
= buffer_mapping
;
667 BUG_ON(mapping
->assoc_mapping
!= buffer_mapping
);
669 if (list_empty(&bh
->b_assoc_buffers
)) {
670 spin_lock(&buffer_mapping
->private_lock
);
671 list_move_tail(&bh
->b_assoc_buffers
,
672 &mapping
->private_list
);
673 bh
->b_assoc_map
= mapping
;
674 spin_unlock(&buffer_mapping
->private_lock
);
677 EXPORT_SYMBOL(mark_buffer_dirty_inode
);
680 * Add a page to the dirty page list.
682 * It is a sad fact of life that this function is called from several places
683 * deeply under spinlocking. It may not sleep.
685 * If the page has buffers, the uptodate buffers are set dirty, to preserve
686 * dirty-state coherency between the page and the buffers. It the page does
687 * not have buffers then when they are later attached they will all be set
690 * The buffers are dirtied before the page is dirtied. There's a small race
691 * window in which a writepage caller may see the page cleanness but not the
692 * buffer dirtiness. That's fine. If this code were to set the page dirty
693 * before the buffers, a concurrent writepage caller could clear the page dirty
694 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
695 * page on the dirty page list.
697 * We use private_lock to lock against try_to_free_buffers while using the
698 * page's buffer list. Also use this to protect against clean buffers being
699 * added to the page after it was set dirty.
701 * FIXME: may need to call ->reservepage here as well. That's rather up to the
702 * address_space though.
704 int __set_page_dirty_buffers(struct page
*page
)
706 struct address_space
* const mapping
= page_mapping(page
);
708 if (unlikely(!mapping
))
709 return !TestSetPageDirty(page
);
711 spin_lock(&mapping
->private_lock
);
712 if (page_has_buffers(page
)) {
713 struct buffer_head
*head
= page_buffers(page
);
714 struct buffer_head
*bh
= head
;
717 set_buffer_dirty(bh
);
718 bh
= bh
->b_this_page
;
719 } while (bh
!= head
);
721 spin_unlock(&mapping
->private_lock
);
723 if (TestSetPageDirty(page
))
726 write_lock_irq(&mapping
->tree_lock
);
727 if (page
->mapping
) { /* Race with truncate? */
728 if (mapping_cap_account_dirty(mapping
)) {
729 __inc_zone_page_state(page
, NR_FILE_DIRTY
);
730 task_io_account_write(PAGE_CACHE_SIZE
);
732 radix_tree_tag_set(&mapping
->page_tree
,
733 page_index(page
), PAGECACHE_TAG_DIRTY
);
735 write_unlock_irq(&mapping
->tree_lock
);
736 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
739 EXPORT_SYMBOL(__set_page_dirty_buffers
);
742 * Write out and wait upon a list of buffers.
744 * We have conflicting pressures: we want to make sure that all
745 * initially dirty buffers get waited on, but that any subsequently
746 * dirtied buffers don't. After all, we don't want fsync to last
747 * forever if somebody is actively writing to the file.
749 * Do this in two main stages: first we copy dirty buffers to a
750 * temporary inode list, queueing the writes as we go. Then we clean
751 * up, waiting for those writes to complete.
753 * During this second stage, any subsequent updates to the file may end
754 * up refiling the buffer on the original inode's dirty list again, so
755 * there is a chance we will end up with a buffer queued for write but
756 * not yet completed on that list. So, as a final cleanup we go through
757 * the osync code to catch these locked, dirty buffers without requeuing
758 * any newly dirty buffers for write.
760 static int fsync_buffers_list(spinlock_t
*lock
, struct list_head
*list
)
762 struct buffer_head
*bh
;
763 struct list_head tmp
;
766 INIT_LIST_HEAD(&tmp
);
769 while (!list_empty(list
)) {
770 bh
= BH_ENTRY(list
->next
);
771 __remove_assoc_queue(bh
);
772 if (buffer_dirty(bh
) || buffer_locked(bh
)) {
773 list_add(&bh
->b_assoc_buffers
, &tmp
);
774 if (buffer_dirty(bh
)) {
778 * Ensure any pending I/O completes so that
779 * ll_rw_block() actually writes the current
780 * contents - it is a noop if I/O is still in
781 * flight on potentially older contents.
783 ll_rw_block(SWRITE
, 1, &bh
);
790 while (!list_empty(&tmp
)) {
791 bh
= BH_ENTRY(tmp
.prev
);
792 list_del_init(&bh
->b_assoc_buffers
);
796 if (!buffer_uptodate(bh
))
803 err2
= osync_buffers_list(lock
, list
);
811 * Invalidate any and all dirty buffers on a given inode. We are
812 * probably unmounting the fs, but that doesn't mean we have already
813 * done a sync(). Just drop the buffers from the inode list.
815 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
816 * assumes that all the buffers are against the blockdev. Not true
819 void invalidate_inode_buffers(struct inode
*inode
)
821 if (inode_has_buffers(inode
)) {
822 struct address_space
*mapping
= &inode
->i_data
;
823 struct list_head
*list
= &mapping
->private_list
;
824 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
826 spin_lock(&buffer_mapping
->private_lock
);
827 while (!list_empty(list
))
828 __remove_assoc_queue(BH_ENTRY(list
->next
));
829 spin_unlock(&buffer_mapping
->private_lock
);
834 * Remove any clean buffers from the inode's buffer list. This is called
835 * when we're trying to free the inode itself. Those buffers can pin it.
837 * Returns true if all buffers were removed.
839 int remove_inode_buffers(struct inode
*inode
)
843 if (inode_has_buffers(inode
)) {
844 struct address_space
*mapping
= &inode
->i_data
;
845 struct list_head
*list
= &mapping
->private_list
;
846 struct address_space
*buffer_mapping
= mapping
->assoc_mapping
;
848 spin_lock(&buffer_mapping
->private_lock
);
849 while (!list_empty(list
)) {
850 struct buffer_head
*bh
= BH_ENTRY(list
->next
);
851 if (buffer_dirty(bh
)) {
855 __remove_assoc_queue(bh
);
857 spin_unlock(&buffer_mapping
->private_lock
);
863 * Create the appropriate buffers when given a page for data area and
864 * the size of each buffer.. Use the bh->b_this_page linked list to
865 * follow the buffers created. Return NULL if unable to create more
868 * The retry flag is used to differentiate async IO (paging, swapping)
869 * which may not fail from ordinary buffer allocations.
871 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
874 struct buffer_head
*bh
, *head
;
880 while ((offset
-= size
) >= 0) {
881 bh
= alloc_buffer_head(GFP_NOFS
);
886 bh
->b_this_page
= head
;
891 atomic_set(&bh
->b_count
, 0);
892 bh
->b_private
= NULL
;
895 /* Link the buffer to its page */
896 set_bh_page(bh
, page
, offset
);
898 init_buffer(bh
, NULL
, NULL
);
902 * In case anything failed, we just free everything we got.
908 head
= head
->b_this_page
;
909 free_buffer_head(bh
);
914 * Return failure for non-async IO requests. Async IO requests
915 * are not allowed to fail, so we have to wait until buffer heads
916 * become available. But we don't want tasks sleeping with
917 * partially complete buffers, so all were released above.
922 /* We're _really_ low on memory. Now we just
923 * wait for old buffer heads to become free due to
924 * finishing IO. Since this is an async request and
925 * the reserve list is empty, we're sure there are
926 * async buffer heads in use.
931 EXPORT_SYMBOL_GPL(alloc_page_buffers
);
934 link_dev_buffers(struct page
*page
, struct buffer_head
*head
)
936 struct buffer_head
*bh
, *tail
;
941 bh
= bh
->b_this_page
;
943 tail
->b_this_page
= head
;
944 attach_page_buffers(page
, head
);
948 * Initialise the state of a blockdev page's buffers.
951 init_page_buffers(struct page
*page
, struct block_device
*bdev
,
952 sector_t block
, int size
)
954 struct buffer_head
*head
= page_buffers(page
);
955 struct buffer_head
*bh
= head
;
956 int uptodate
= PageUptodate(page
);
959 if (!buffer_mapped(bh
)) {
960 init_buffer(bh
, NULL
, NULL
);
962 bh
->b_blocknr
= block
;
964 set_buffer_uptodate(bh
);
965 set_buffer_mapped(bh
);
968 bh
= bh
->b_this_page
;
969 } while (bh
!= head
);
973 * Create the page-cache page that contains the requested block.
975 * This is user purely for blockdev mappings.
978 grow_dev_page(struct block_device
*bdev
, sector_t block
,
979 pgoff_t index
, int size
)
981 struct inode
*inode
= bdev
->bd_inode
;
983 struct buffer_head
*bh
;
985 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
989 BUG_ON(!PageLocked(page
));
991 if (page_has_buffers(page
)) {
992 bh
= page_buffers(page
);
993 if (bh
->b_size
== size
) {
994 init_page_buffers(page
, bdev
, block
, size
);
997 if (!try_to_free_buffers(page
))
1002 * Allocate some buffers for this page
1004 bh
= alloc_page_buffers(page
, size
, 0);
1009 * Link the page to the buffers and initialise them. Take the
1010 * lock to be atomic wrt __find_get_block(), which does not
1011 * run under the page lock.
1013 spin_lock(&inode
->i_mapping
->private_lock
);
1014 link_dev_buffers(page
, bh
);
1015 init_page_buffers(page
, bdev
, block
, size
);
1016 spin_unlock(&inode
->i_mapping
->private_lock
);
1022 page_cache_release(page
);
1027 * Create buffers for the specified block device block's page. If
1028 * that page was dirty, the buffers are set dirty also.
1030 * Except that's a bug. Attaching dirty buffers to a dirty
1031 * blockdev's page can result in filesystem corruption, because
1032 * some of those buffers may be aliases of filesystem data.
1033 * grow_dev_page() will go BUG() if this happens.
1036 grow_buffers(struct block_device
*bdev
, sector_t block
, int size
)
1045 } while ((size
<< sizebits
) < PAGE_SIZE
);
1047 index
= block
>> sizebits
;
1050 * Check for a block which wants to lie outside our maximum possible
1051 * pagecache index. (this comparison is done using sector_t types).
1053 if (unlikely(index
!= block
>> sizebits
)) {
1054 char b
[BDEVNAME_SIZE
];
1056 printk(KERN_ERR
"%s: requested out-of-range block %llu for "
1058 __FUNCTION__
, (unsigned long long)block
,
1062 block
= index
<< sizebits
;
1063 /* Create a page with the proper size buffers.. */
1064 page
= grow_dev_page(bdev
, block
, index
, size
);
1068 page_cache_release(page
);
1072 static struct buffer_head
*
1073 __getblk_slow(struct block_device
*bdev
, sector_t block
, int size
)
1075 /* Size must be multiple of hard sectorsize */
1076 if (unlikely(size
& (bdev_hardsect_size(bdev
)-1) ||
1077 (size
< 512 || size
> PAGE_SIZE
))) {
1078 printk(KERN_ERR
"getblk(): invalid block size %d requested\n",
1080 printk(KERN_ERR
"hardsect size: %d\n",
1081 bdev_hardsect_size(bdev
));
1088 struct buffer_head
* bh
;
1091 bh
= __find_get_block(bdev
, block
, size
);
1095 ret
= grow_buffers(bdev
, block
, size
);
1104 * The relationship between dirty buffers and dirty pages:
1106 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1107 * the page is tagged dirty in its radix tree.
1109 * At all times, the dirtiness of the buffers represents the dirtiness of
1110 * subsections of the page. If the page has buffers, the page dirty bit is
1111 * merely a hint about the true dirty state.
1113 * When a page is set dirty in its entirety, all its buffers are marked dirty
1114 * (if the page has buffers).
1116 * When a buffer is marked dirty, its page is dirtied, but the page's other
1119 * Also. When blockdev buffers are explicitly read with bread(), they
1120 * individually become uptodate. But their backing page remains not
1121 * uptodate - even if all of its buffers are uptodate. A subsequent
1122 * block_read_full_page() against that page will discover all the uptodate
1123 * buffers, will set the page uptodate and will perform no I/O.
1127 * mark_buffer_dirty - mark a buffer_head as needing writeout
1128 * @bh: the buffer_head to mark dirty
1130 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1131 * backing page dirty, then tag the page as dirty in its address_space's radix
1132 * tree and then attach the address_space's inode to its superblock's dirty
1135 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1136 * mapping->tree_lock and the global inode_lock.
1138 void fastcall
mark_buffer_dirty(struct buffer_head
*bh
)
1140 if (!buffer_dirty(bh
) && !test_set_buffer_dirty(bh
))
1141 __set_page_dirty_nobuffers(bh
->b_page
);
1145 * Decrement a buffer_head's reference count. If all buffers against a page
1146 * have zero reference count, are clean and unlocked, and if the page is clean
1147 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1148 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1149 * a page but it ends up not being freed, and buffers may later be reattached).
1151 void __brelse(struct buffer_head
* buf
)
1153 if (atomic_read(&buf
->b_count
)) {
1157 printk(KERN_ERR
"VFS: brelse: Trying to free free buffer\n");
1162 * bforget() is like brelse(), except it discards any
1163 * potentially dirty data.
1165 void __bforget(struct buffer_head
*bh
)
1167 clear_buffer_dirty(bh
);
1168 if (!list_empty(&bh
->b_assoc_buffers
)) {
1169 struct address_space
*buffer_mapping
= bh
->b_page
->mapping
;
1171 spin_lock(&buffer_mapping
->private_lock
);
1172 list_del_init(&bh
->b_assoc_buffers
);
1173 bh
->b_assoc_map
= NULL
;
1174 spin_unlock(&buffer_mapping
->private_lock
);
1179 static struct buffer_head
*__bread_slow(struct buffer_head
*bh
)
1182 if (buffer_uptodate(bh
)) {
1187 bh
->b_end_io
= end_buffer_read_sync
;
1188 submit_bh(READ
, bh
);
1190 if (buffer_uptodate(bh
))
1198 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1199 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1200 * refcount elevated by one when they're in an LRU. A buffer can only appear
1201 * once in a particular CPU's LRU. A single buffer can be present in multiple
1202 * CPU's LRUs at the same time.
1204 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1205 * sb_find_get_block().
1207 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1208 * a local interrupt disable for that.
1211 #define BH_LRU_SIZE 8
1214 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1217 static DEFINE_PER_CPU(struct bh_lru
, bh_lrus
) = {{ NULL
}};
1220 #define bh_lru_lock() local_irq_disable()
1221 #define bh_lru_unlock() local_irq_enable()
1223 #define bh_lru_lock() preempt_disable()
1224 #define bh_lru_unlock() preempt_enable()
1227 static inline void check_irqs_on(void)
1229 #ifdef irqs_disabled
1230 BUG_ON(irqs_disabled());
1235 * The LRU management algorithm is dopey-but-simple. Sorry.
1237 static void bh_lru_install(struct buffer_head
*bh
)
1239 struct buffer_head
*evictee
= NULL
;
1244 lru
= &__get_cpu_var(bh_lrus
);
1245 if (lru
->bhs
[0] != bh
) {
1246 struct buffer_head
*bhs
[BH_LRU_SIZE
];
1252 for (in
= 0; in
< BH_LRU_SIZE
; in
++) {
1253 struct buffer_head
*bh2
= lru
->bhs
[in
];
1258 if (out
>= BH_LRU_SIZE
) {
1259 BUG_ON(evictee
!= NULL
);
1266 while (out
< BH_LRU_SIZE
)
1268 memcpy(lru
->bhs
, bhs
, sizeof(bhs
));
1277 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1279 static struct buffer_head
*
1280 lookup_bh_lru(struct block_device
*bdev
, sector_t block
, unsigned size
)
1282 struct buffer_head
*ret
= NULL
;
1288 lru
= &__get_cpu_var(bh_lrus
);
1289 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1290 struct buffer_head
*bh
= lru
->bhs
[i
];
1292 if (bh
&& bh
->b_bdev
== bdev
&&
1293 bh
->b_blocknr
== block
&& bh
->b_size
== size
) {
1296 lru
->bhs
[i
] = lru
->bhs
[i
- 1];
1311 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1312 * it in the LRU and mark it as accessed. If it is not present then return
1315 struct buffer_head
*
1316 __find_get_block(struct block_device
*bdev
, sector_t block
, unsigned size
)
1318 struct buffer_head
*bh
= lookup_bh_lru(bdev
, block
, size
);
1321 bh
= __find_get_block_slow(bdev
, block
);
1329 EXPORT_SYMBOL(__find_get_block
);
1332 * __getblk will locate (and, if necessary, create) the buffer_head
1333 * which corresponds to the passed block_device, block and size. The
1334 * returned buffer has its reference count incremented.
1336 * __getblk() cannot fail - it just keeps trying. If you pass it an
1337 * illegal block number, __getblk() will happily return a buffer_head
1338 * which represents the non-existent block. Very weird.
1340 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1341 * attempt is failing. FIXME, perhaps?
1343 struct buffer_head
*
1344 __getblk(struct block_device
*bdev
, sector_t block
, unsigned size
)
1346 struct buffer_head
*bh
= __find_get_block(bdev
, block
, size
);
1350 bh
= __getblk_slow(bdev
, block
, size
);
1353 EXPORT_SYMBOL(__getblk
);
1356 * Do async read-ahead on a buffer..
1358 void __breadahead(struct block_device
*bdev
, sector_t block
, unsigned size
)
1360 struct buffer_head
*bh
= __getblk(bdev
, block
, size
);
1362 ll_rw_block(READA
, 1, &bh
);
1366 EXPORT_SYMBOL(__breadahead
);
1369 * __bread() - reads a specified block and returns the bh
1370 * @bdev: the block_device to read from
1371 * @block: number of block
1372 * @size: size (in bytes) to read
1374 * Reads a specified block, and returns buffer head that contains it.
1375 * It returns NULL if the block was unreadable.
1377 struct buffer_head
*
1378 __bread(struct block_device
*bdev
, sector_t block
, unsigned size
)
1380 struct buffer_head
*bh
= __getblk(bdev
, block
, size
);
1382 if (likely(bh
) && !buffer_uptodate(bh
))
1383 bh
= __bread_slow(bh
);
1386 EXPORT_SYMBOL(__bread
);
1389 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1390 * This doesn't race because it runs in each cpu either in irq
1391 * or with preempt disabled.
1393 static void invalidate_bh_lru(void *arg
)
1395 struct bh_lru
*b
= &get_cpu_var(bh_lrus
);
1398 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
1402 put_cpu_var(bh_lrus
);
1405 void invalidate_bh_lrus(void)
1407 on_each_cpu(invalidate_bh_lru
, NULL
, 1, 1);
1410 void set_bh_page(struct buffer_head
*bh
,
1411 struct page
*page
, unsigned long offset
)
1414 BUG_ON(offset
>= PAGE_SIZE
);
1415 if (PageHighMem(page
))
1417 * This catches illegal uses and preserves the offset:
1419 bh
->b_data
= (char *)(0 + offset
);
1421 bh
->b_data
= page_address(page
) + offset
;
1423 EXPORT_SYMBOL(set_bh_page
);
1426 * Called when truncating a buffer on a page completely.
1428 static void discard_buffer(struct buffer_head
* bh
)
1431 clear_buffer_dirty(bh
);
1433 clear_buffer_mapped(bh
);
1434 clear_buffer_req(bh
);
1435 clear_buffer_new(bh
);
1436 clear_buffer_delay(bh
);
1437 clear_buffer_unwritten(bh
);
1442 * block_invalidatepage - invalidate part of all of a buffer-backed page
1444 * @page: the page which is affected
1445 * @offset: the index of the truncation point
1447 * block_invalidatepage() is called when all or part of the page has become
1448 * invalidatedby a truncate operation.
1450 * block_invalidatepage() does not have to release all buffers, but it must
1451 * ensure that no dirty buffer is left outside @offset and that no I/O
1452 * is underway against any of the blocks which are outside the truncation
1453 * point. Because the caller is about to free (and possibly reuse) those
1456 void block_invalidatepage(struct page
*page
, unsigned long offset
)
1458 struct buffer_head
*head
, *bh
, *next
;
1459 unsigned int curr_off
= 0;
1461 BUG_ON(!PageLocked(page
));
1462 if (!page_has_buffers(page
))
1465 head
= page_buffers(page
);
1468 unsigned int next_off
= curr_off
+ bh
->b_size
;
1469 next
= bh
->b_this_page
;
1472 * is this block fully invalidated?
1474 if (offset
<= curr_off
)
1476 curr_off
= next_off
;
1478 } while (bh
!= head
);
1481 * We release buffers only if the entire page is being invalidated.
1482 * The get_block cached value has been unconditionally invalidated,
1483 * so real IO is not possible anymore.
1486 try_to_release_page(page
, 0);
1490 EXPORT_SYMBOL(block_invalidatepage
);
1493 * We attach and possibly dirty the buffers atomically wrt
1494 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1495 * is already excluded via the page lock.
1497 void create_empty_buffers(struct page
*page
,
1498 unsigned long blocksize
, unsigned long b_state
)
1500 struct buffer_head
*bh
, *head
, *tail
;
1502 head
= alloc_page_buffers(page
, blocksize
, 1);
1505 bh
->b_state
|= b_state
;
1507 bh
= bh
->b_this_page
;
1509 tail
->b_this_page
= head
;
1511 spin_lock(&page
->mapping
->private_lock
);
1512 if (PageUptodate(page
) || PageDirty(page
)) {
1515 if (PageDirty(page
))
1516 set_buffer_dirty(bh
);
1517 if (PageUptodate(page
))
1518 set_buffer_uptodate(bh
);
1519 bh
= bh
->b_this_page
;
1520 } while (bh
!= head
);
1522 attach_page_buffers(page
, head
);
1523 spin_unlock(&page
->mapping
->private_lock
);
1525 EXPORT_SYMBOL(create_empty_buffers
);
1528 * We are taking a block for data and we don't want any output from any
1529 * buffer-cache aliases starting from return from that function and
1530 * until the moment when something will explicitly mark the buffer
1531 * dirty (hopefully that will not happen until we will free that block ;-)
1532 * We don't even need to mark it not-uptodate - nobody can expect
1533 * anything from a newly allocated buffer anyway. We used to used
1534 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1535 * don't want to mark the alias unmapped, for example - it would confuse
1536 * anyone who might pick it with bread() afterwards...
1538 * Also.. Note that bforget() doesn't lock the buffer. So there can
1539 * be writeout I/O going on against recently-freed buffers. We don't
1540 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1541 * only if we really need to. That happens here.
1543 void unmap_underlying_metadata(struct block_device
*bdev
, sector_t block
)
1545 struct buffer_head
*old_bh
;
1549 old_bh
= __find_get_block_slow(bdev
, block
);
1551 clear_buffer_dirty(old_bh
);
1552 wait_on_buffer(old_bh
);
1553 clear_buffer_req(old_bh
);
1557 EXPORT_SYMBOL(unmap_underlying_metadata
);
1560 * NOTE! All mapped/uptodate combinations are valid:
1562 * Mapped Uptodate Meaning
1564 * No No "unknown" - must do get_block()
1565 * No Yes "hole" - zero-filled
1566 * Yes No "allocated" - allocated on disk, not read in
1567 * Yes Yes "valid" - allocated and up-to-date in memory.
1569 * "Dirty" is valid only with the last case (mapped+uptodate).
1573 * While block_write_full_page is writing back the dirty buffers under
1574 * the page lock, whoever dirtied the buffers may decide to clean them
1575 * again at any time. We handle that by only looking at the buffer
1576 * state inside lock_buffer().
1578 * If block_write_full_page() is called for regular writeback
1579 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1580 * locked buffer. This only can happen if someone has written the buffer
1581 * directly, with submit_bh(). At the address_space level PageWriteback
1582 * prevents this contention from occurring.
1584 static int __block_write_full_page(struct inode
*inode
, struct page
*page
,
1585 get_block_t
*get_block
, struct writeback_control
*wbc
)
1589 sector_t last_block
;
1590 struct buffer_head
*bh
, *head
;
1591 const unsigned blocksize
= 1 << inode
->i_blkbits
;
1592 int nr_underway
= 0;
1594 BUG_ON(!PageLocked(page
));
1596 last_block
= (i_size_read(inode
) - 1) >> inode
->i_blkbits
;
1598 if (!page_has_buffers(page
)) {
1599 create_empty_buffers(page
, blocksize
,
1600 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
1604 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1605 * here, and the (potentially unmapped) buffers may become dirty at
1606 * any time. If a buffer becomes dirty here after we've inspected it
1607 * then we just miss that fact, and the page stays dirty.
1609 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1610 * handle that here by just cleaning them.
1613 block
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
1614 head
= page_buffers(page
);
1618 * Get all the dirty buffers mapped to disk addresses and
1619 * handle any aliases from the underlying blockdev's mapping.
1622 if (block
> last_block
) {
1624 * mapped buffers outside i_size will occur, because
1625 * this page can be outside i_size when there is a
1626 * truncate in progress.
1629 * The buffer was zeroed by block_write_full_page()
1631 clear_buffer_dirty(bh
);
1632 set_buffer_uptodate(bh
);
1633 } else if (!buffer_mapped(bh
) && buffer_dirty(bh
)) {
1634 WARN_ON(bh
->b_size
!= blocksize
);
1635 err
= get_block(inode
, block
, bh
, 1);
1638 if (buffer_new(bh
)) {
1639 /* blockdev mappings never come here */
1640 clear_buffer_new(bh
);
1641 unmap_underlying_metadata(bh
->b_bdev
,
1645 bh
= bh
->b_this_page
;
1647 } while (bh
!= head
);
1650 if (!buffer_mapped(bh
))
1653 * If it's a fully non-blocking write attempt and we cannot
1654 * lock the buffer then redirty the page. Note that this can
1655 * potentially cause a busy-wait loop from pdflush and kswapd
1656 * activity, but those code paths have their own higher-level
1659 if (wbc
->sync_mode
!= WB_SYNC_NONE
|| !wbc
->nonblocking
) {
1661 } else if (test_set_buffer_locked(bh
)) {
1662 redirty_page_for_writepage(wbc
, page
);
1665 if (test_clear_buffer_dirty(bh
)) {
1666 mark_buffer_async_write(bh
);
1670 } while ((bh
= bh
->b_this_page
) != head
);
1673 * The page and its buffers are protected by PageWriteback(), so we can
1674 * drop the bh refcounts early.
1676 BUG_ON(PageWriteback(page
));
1677 set_page_writeback(page
);
1680 struct buffer_head
*next
= bh
->b_this_page
;
1681 if (buffer_async_write(bh
)) {
1682 submit_bh(WRITE
, bh
);
1686 } while (bh
!= head
);
1691 if (nr_underway
== 0) {
1693 * The page was marked dirty, but the buffers were
1694 * clean. Someone wrote them back by hand with
1695 * ll_rw_block/submit_bh. A rare case.
1697 end_page_writeback(page
);
1700 * The page and buffer_heads can be released at any time from
1703 wbc
->pages_skipped
++; /* We didn't write this page */
1709 * ENOSPC, or some other error. We may already have added some
1710 * blocks to the file, so we need to write these out to avoid
1711 * exposing stale data.
1712 * The page is currently locked and not marked for writeback
1715 /* Recovery: lock and submit the mapped buffers */
1717 if (buffer_mapped(bh
) && buffer_dirty(bh
)) {
1719 mark_buffer_async_write(bh
);
1722 * The buffer may have been set dirty during
1723 * attachment to a dirty page.
1725 clear_buffer_dirty(bh
);
1727 } while ((bh
= bh
->b_this_page
) != head
);
1729 BUG_ON(PageWriteback(page
));
1730 mapping_set_error(page
->mapping
, err
);
1731 set_page_writeback(page
);
1733 struct buffer_head
*next
= bh
->b_this_page
;
1734 if (buffer_async_write(bh
)) {
1735 clear_buffer_dirty(bh
);
1736 submit_bh(WRITE
, bh
);
1740 } while (bh
!= head
);
1745 static int __block_prepare_write(struct inode
*inode
, struct page
*page
,
1746 unsigned from
, unsigned to
, get_block_t
*get_block
)
1748 unsigned block_start
, block_end
;
1751 unsigned blocksize
, bbits
;
1752 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
=wait
;
1754 BUG_ON(!PageLocked(page
));
1755 BUG_ON(from
> PAGE_CACHE_SIZE
);
1756 BUG_ON(to
> PAGE_CACHE_SIZE
);
1759 blocksize
= 1 << inode
->i_blkbits
;
1760 if (!page_has_buffers(page
))
1761 create_empty_buffers(page
, blocksize
, 0);
1762 head
= page_buffers(page
);
1764 bbits
= inode
->i_blkbits
;
1765 block
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- bbits
);
1767 for(bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
1768 block
++, block_start
=block_end
, bh
= bh
->b_this_page
) {
1769 block_end
= block_start
+ blocksize
;
1770 if (block_end
<= from
|| block_start
>= to
) {
1771 if (PageUptodate(page
)) {
1772 if (!buffer_uptodate(bh
))
1773 set_buffer_uptodate(bh
);
1778 clear_buffer_new(bh
);
1779 if (!buffer_mapped(bh
)) {
1780 WARN_ON(bh
->b_size
!= blocksize
);
1781 err
= get_block(inode
, block
, bh
, 1);
1784 if (buffer_new(bh
)) {
1785 unmap_underlying_metadata(bh
->b_bdev
,
1787 if (PageUptodate(page
)) {
1788 set_buffer_uptodate(bh
);
1791 if (block_end
> to
|| block_start
< from
) {
1794 kaddr
= kmap_atomic(page
, KM_USER0
);
1798 if (block_start
< from
)
1799 memset(kaddr
+block_start
,
1800 0, from
-block_start
);
1801 flush_dcache_page(page
);
1802 kunmap_atomic(kaddr
, KM_USER0
);
1807 if (PageUptodate(page
)) {
1808 if (!buffer_uptodate(bh
))
1809 set_buffer_uptodate(bh
);
1812 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) &&
1813 !buffer_unwritten(bh
) &&
1814 (block_start
< from
|| block_end
> to
)) {
1815 ll_rw_block(READ
, 1, &bh
);
1820 * If we issued read requests - let them complete.
1822 while(wait_bh
> wait
) {
1823 wait_on_buffer(*--wait_bh
);
1824 if (!buffer_uptodate(*wait_bh
))
1831 clear_buffer_new(bh
);
1832 } while ((bh
= bh
->b_this_page
) != head
);
1837 * Zero out any newly allocated blocks to avoid exposing stale
1838 * data. If BH_New is set, we know that the block was newly
1839 * allocated in the above loop.
1844 block_end
= block_start
+blocksize
;
1845 if (block_end
<= from
)
1847 if (block_start
>= to
)
1849 if (buffer_new(bh
)) {
1852 clear_buffer_new(bh
);
1853 kaddr
= kmap_atomic(page
, KM_USER0
);
1854 memset(kaddr
+block_start
, 0, bh
->b_size
);
1855 flush_dcache_page(page
);
1856 kunmap_atomic(kaddr
, KM_USER0
);
1857 set_buffer_uptodate(bh
);
1858 mark_buffer_dirty(bh
);
1861 block_start
= block_end
;
1862 bh
= bh
->b_this_page
;
1863 } while (bh
!= head
);
1867 static int __block_commit_write(struct inode
*inode
, struct page
*page
,
1868 unsigned from
, unsigned to
)
1870 unsigned block_start
, block_end
;
1873 struct buffer_head
*bh
, *head
;
1875 blocksize
= 1 << inode
->i_blkbits
;
1877 for(bh
= head
= page_buffers(page
), block_start
= 0;
1878 bh
!= head
|| !block_start
;
1879 block_start
=block_end
, bh
= bh
->b_this_page
) {
1880 block_end
= block_start
+ blocksize
;
1881 if (block_end
<= from
|| block_start
>= to
) {
1882 if (!buffer_uptodate(bh
))
1885 set_buffer_uptodate(bh
);
1886 mark_buffer_dirty(bh
);
1891 * If this is a partial write which happened to make all buffers
1892 * uptodate then we can optimize away a bogus readpage() for
1893 * the next read(). Here we 'discover' whether the page went
1894 * uptodate as a result of this (potentially partial) write.
1897 SetPageUptodate(page
);
1902 * Generic "read page" function for block devices that have the normal
1903 * get_block functionality. This is most of the block device filesystems.
1904 * Reads the page asynchronously --- the unlock_buffer() and
1905 * set/clear_buffer_uptodate() functions propagate buffer state into the
1906 * page struct once IO has completed.
1908 int block_read_full_page(struct page
*page
, get_block_t
*get_block
)
1910 struct inode
*inode
= page
->mapping
->host
;
1911 sector_t iblock
, lblock
;
1912 struct buffer_head
*bh
, *head
, *arr
[MAX_BUF_PER_PAGE
];
1913 unsigned int blocksize
;
1915 int fully_mapped
= 1;
1917 BUG_ON(!PageLocked(page
));
1918 blocksize
= 1 << inode
->i_blkbits
;
1919 if (!page_has_buffers(page
))
1920 create_empty_buffers(page
, blocksize
, 0);
1921 head
= page_buffers(page
);
1923 iblock
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
1924 lblock
= (i_size_read(inode
)+blocksize
-1) >> inode
->i_blkbits
;
1930 if (buffer_uptodate(bh
))
1933 if (!buffer_mapped(bh
)) {
1937 if (iblock
< lblock
) {
1938 WARN_ON(bh
->b_size
!= blocksize
);
1939 err
= get_block(inode
, iblock
, bh
, 0);
1943 if (!buffer_mapped(bh
)) {
1944 void *kaddr
= kmap_atomic(page
, KM_USER0
);
1945 memset(kaddr
+ i
* blocksize
, 0, blocksize
);
1946 flush_dcache_page(page
);
1947 kunmap_atomic(kaddr
, KM_USER0
);
1949 set_buffer_uptodate(bh
);
1953 * get_block() might have updated the buffer
1956 if (buffer_uptodate(bh
))
1960 } while (i
++, iblock
++, (bh
= bh
->b_this_page
) != head
);
1963 SetPageMappedToDisk(page
);
1967 * All buffers are uptodate - we can set the page uptodate
1968 * as well. But not if get_block() returned an error.
1970 if (!PageError(page
))
1971 SetPageUptodate(page
);
1976 /* Stage two: lock the buffers */
1977 for (i
= 0; i
< nr
; i
++) {
1980 mark_buffer_async_read(bh
);
1984 * Stage 3: start the IO. Check for uptodateness
1985 * inside the buffer lock in case another process reading
1986 * the underlying blockdev brought it uptodate (the sct fix).
1988 for (i
= 0; i
< nr
; i
++) {
1990 if (buffer_uptodate(bh
))
1991 end_buffer_async_read(bh
, 1);
1993 submit_bh(READ
, bh
);
1998 /* utility function for filesystems that need to do work on expanding
1999 * truncates. Uses prepare/commit_write to allow the filesystem to
2000 * deal with the hole.
2002 static int __generic_cont_expand(struct inode
*inode
, loff_t size
,
2003 pgoff_t index
, unsigned int offset
)
2005 struct address_space
*mapping
= inode
->i_mapping
;
2007 unsigned long limit
;
2011 limit
= current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
2012 if (limit
!= RLIM_INFINITY
&& size
> (loff_t
)limit
) {
2013 send_sig(SIGXFSZ
, current
, 0);
2016 if (size
> inode
->i_sb
->s_maxbytes
)
2020 page
= grab_cache_page(mapping
, index
);
2023 err
= mapping
->a_ops
->prepare_write(NULL
, page
, offset
, offset
);
2026 * ->prepare_write() may have instantiated a few blocks
2027 * outside i_size. Trim these off again.
2030 page_cache_release(page
);
2031 vmtruncate(inode
, inode
->i_size
);
2035 err
= mapping
->a_ops
->commit_write(NULL
, page
, offset
, offset
);
2038 page_cache_release(page
);
2045 int generic_cont_expand(struct inode
*inode
, loff_t size
)
2048 unsigned int offset
;
2050 offset
= (size
& (PAGE_CACHE_SIZE
- 1)); /* Within page */
2052 /* ugh. in prepare/commit_write, if from==to==start of block, we
2053 ** skip the prepare. make sure we never send an offset for the start
2056 if ((offset
& (inode
->i_sb
->s_blocksize
- 1)) == 0) {
2057 /* caller must handle this extra byte. */
2060 index
= size
>> PAGE_CACHE_SHIFT
;
2062 return __generic_cont_expand(inode
, size
, index
, offset
);
2065 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
)
2067 loff_t pos
= size
- 1;
2068 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
2069 unsigned int offset
= (pos
& (PAGE_CACHE_SIZE
- 1)) + 1;
2071 /* prepare/commit_write can handle even if from==to==start of block. */
2072 return __generic_cont_expand(inode
, size
, index
, offset
);
2076 * For moronic filesystems that do not allow holes in file.
2077 * We may have to extend the file.
2080 int cont_prepare_write(struct page
*page
, unsigned offset
,
2081 unsigned to
, get_block_t
*get_block
, loff_t
*bytes
)
2083 struct address_space
*mapping
= page
->mapping
;
2084 struct inode
*inode
= mapping
->host
;
2085 struct page
*new_page
;
2089 unsigned blocksize
= 1 << inode
->i_blkbits
;
2092 while(page
->index
> (pgpos
= *bytes
>>PAGE_CACHE_SHIFT
)) {
2094 new_page
= grab_cache_page(mapping
, pgpos
);
2097 /* we might sleep */
2098 if (*bytes
>>PAGE_CACHE_SHIFT
!= pgpos
) {
2099 unlock_page(new_page
);
2100 page_cache_release(new_page
);
2103 zerofrom
= *bytes
& ~PAGE_CACHE_MASK
;
2104 if (zerofrom
& (blocksize
-1)) {
2105 *bytes
|= (blocksize
-1);
2108 status
= __block_prepare_write(inode
, new_page
, zerofrom
,
2109 PAGE_CACHE_SIZE
, get_block
);
2112 kaddr
= kmap_atomic(new_page
, KM_USER0
);
2113 memset(kaddr
+zerofrom
, 0, PAGE_CACHE_SIZE
-zerofrom
);
2114 flush_dcache_page(new_page
);
2115 kunmap_atomic(kaddr
, KM_USER0
);
2116 generic_commit_write(NULL
, new_page
, zerofrom
, PAGE_CACHE_SIZE
);
2117 unlock_page(new_page
);
2118 page_cache_release(new_page
);
2121 if (page
->index
< pgpos
) {
2122 /* completely inside the area */
2125 /* page covers the boundary, find the boundary offset */
2126 zerofrom
= *bytes
& ~PAGE_CACHE_MASK
;
2128 /* if we will expand the thing last block will be filled */
2129 if (to
> zerofrom
&& (zerofrom
& (blocksize
-1))) {
2130 *bytes
|= (blocksize
-1);
2134 /* starting below the boundary? Nothing to zero out */
2135 if (offset
<= zerofrom
)
2138 status
= __block_prepare_write(inode
, page
, zerofrom
, to
, get_block
);
2141 if (zerofrom
< offset
) {
2142 kaddr
= kmap_atomic(page
, KM_USER0
);
2143 memset(kaddr
+zerofrom
, 0, offset
-zerofrom
);
2144 flush_dcache_page(page
);
2145 kunmap_atomic(kaddr
, KM_USER0
);
2146 __block_commit_write(inode
, page
, zerofrom
, offset
);
2150 ClearPageUptodate(page
);
2154 ClearPageUptodate(new_page
);
2155 unlock_page(new_page
);
2156 page_cache_release(new_page
);
2161 int block_prepare_write(struct page
*page
, unsigned from
, unsigned to
,
2162 get_block_t
*get_block
)
2164 struct inode
*inode
= page
->mapping
->host
;
2165 int err
= __block_prepare_write(inode
, page
, from
, to
, get_block
);
2167 ClearPageUptodate(page
);
2171 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
)
2173 struct inode
*inode
= page
->mapping
->host
;
2174 __block_commit_write(inode
,page
,from
,to
);
2178 int generic_commit_write(struct file
*file
, struct page
*page
,
2179 unsigned from
, unsigned to
)
2181 struct inode
*inode
= page
->mapping
->host
;
2182 loff_t pos
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + to
;
2183 __block_commit_write(inode
,page
,from
,to
);
2185 * No need to use i_size_read() here, the i_size
2186 * cannot change under us because we hold i_mutex.
2188 if (pos
> inode
->i_size
) {
2189 i_size_write(inode
, pos
);
2190 mark_inode_dirty(inode
);
2197 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2198 * immediately, while under the page lock. So it needs a special end_io
2199 * handler which does not touch the bh after unlocking it.
2201 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2202 * a race there is benign: unlock_buffer() only use the bh's address for
2203 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2206 static void end_buffer_read_nobh(struct buffer_head
*bh
, int uptodate
)
2209 set_buffer_uptodate(bh
);
2211 /* This happens, due to failed READA attempts. */
2212 clear_buffer_uptodate(bh
);
2218 * On entry, the page is fully not uptodate.
2219 * On exit the page is fully uptodate in the areas outside (from,to)
2221 int nobh_prepare_write(struct page
*page
, unsigned from
, unsigned to
,
2222 get_block_t
*get_block
)
2224 struct inode
*inode
= page
->mapping
->host
;
2225 const unsigned blkbits
= inode
->i_blkbits
;
2226 const unsigned blocksize
= 1 << blkbits
;
2227 struct buffer_head map_bh
;
2228 struct buffer_head
*read_bh
[MAX_BUF_PER_PAGE
];
2229 unsigned block_in_page
;
2230 unsigned block_start
;
2231 sector_t block_in_file
;
2236 int is_mapped_to_disk
= 1;
2238 if (PageMappedToDisk(page
))
2241 block_in_file
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- blkbits
);
2242 map_bh
.b_page
= page
;
2245 * We loop across all blocks in the page, whether or not they are
2246 * part of the affected region. This is so we can discover if the
2247 * page is fully mapped-to-disk.
2249 for (block_start
= 0, block_in_page
= 0;
2250 block_start
< PAGE_CACHE_SIZE
;
2251 block_in_page
++, block_start
+= blocksize
) {
2252 unsigned block_end
= block_start
+ blocksize
;
2257 if (block_start
>= to
)
2259 map_bh
.b_size
= blocksize
;
2260 ret
= get_block(inode
, block_in_file
+ block_in_page
,
2264 if (!buffer_mapped(&map_bh
))
2265 is_mapped_to_disk
= 0;
2266 if (buffer_new(&map_bh
))
2267 unmap_underlying_metadata(map_bh
.b_bdev
,
2269 if (PageUptodate(page
))
2271 if (buffer_new(&map_bh
) || !buffer_mapped(&map_bh
)) {
2272 kaddr
= kmap_atomic(page
, KM_USER0
);
2273 if (block_start
< from
)
2274 memset(kaddr
+block_start
, 0, from
-block_start
);
2276 memset(kaddr
+ to
, 0, block_end
- to
);
2277 flush_dcache_page(page
);
2278 kunmap_atomic(kaddr
, KM_USER0
);
2281 if (buffer_uptodate(&map_bh
))
2282 continue; /* reiserfs does this */
2283 if (block_start
< from
|| block_end
> to
) {
2284 struct buffer_head
*bh
= alloc_buffer_head(GFP_NOFS
);
2290 bh
->b_state
= map_bh
.b_state
;
2291 atomic_set(&bh
->b_count
, 0);
2292 bh
->b_this_page
= NULL
;
2294 bh
->b_blocknr
= map_bh
.b_blocknr
;
2295 bh
->b_size
= blocksize
;
2296 bh
->b_data
= (char *)(long)block_start
;
2297 bh
->b_bdev
= map_bh
.b_bdev
;
2298 bh
->b_private
= NULL
;
2299 read_bh
[nr_reads
++] = bh
;
2304 struct buffer_head
*bh
;
2307 * The page is locked, so these buffers are protected from
2308 * any VM or truncate activity. Hence we don't need to care
2309 * for the buffer_head refcounts.
2311 for (i
= 0; i
< nr_reads
; i
++) {
2314 bh
->b_end_io
= end_buffer_read_nobh
;
2315 submit_bh(READ
, bh
);
2317 for (i
= 0; i
< nr_reads
; i
++) {
2320 if (!buffer_uptodate(bh
))
2322 free_buffer_head(bh
);
2329 if (is_mapped_to_disk
)
2330 SetPageMappedToDisk(page
);
2335 for (i
= 0; i
< nr_reads
; i
++) {
2337 free_buffer_head(read_bh
[i
]);
2341 * Error recovery is pretty slack. Clear the page and mark it dirty
2342 * so we'll later zero out any blocks which _were_ allocated.
2344 kaddr
= kmap_atomic(page
, KM_USER0
);
2345 memset(kaddr
, 0, PAGE_CACHE_SIZE
);
2346 flush_dcache_page(page
);
2347 kunmap_atomic(kaddr
, KM_USER0
);
2348 SetPageUptodate(page
);
2349 set_page_dirty(page
);
2352 EXPORT_SYMBOL(nobh_prepare_write
);
2355 * Make sure any changes to nobh_commit_write() are reflected in
2356 * nobh_truncate_page(), since it doesn't call commit_write().
2358 int nobh_commit_write(struct file
*file
, struct page
*page
,
2359 unsigned from
, unsigned to
)
2361 struct inode
*inode
= page
->mapping
->host
;
2362 loff_t pos
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + to
;
2364 SetPageUptodate(page
);
2365 set_page_dirty(page
);
2366 if (pos
> inode
->i_size
) {
2367 i_size_write(inode
, pos
);
2368 mark_inode_dirty(inode
);
2372 EXPORT_SYMBOL(nobh_commit_write
);
2375 * nobh_writepage() - based on block_full_write_page() except
2376 * that it tries to operate without attaching bufferheads to
2379 int nobh_writepage(struct page
*page
, get_block_t
*get_block
,
2380 struct writeback_control
*wbc
)
2382 struct inode
* const inode
= page
->mapping
->host
;
2383 loff_t i_size
= i_size_read(inode
);
2384 const pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
2389 /* Is the page fully inside i_size? */
2390 if (page
->index
< end_index
)
2393 /* Is the page fully outside i_size? (truncate in progress) */
2394 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
2395 if (page
->index
>= end_index
+1 || !offset
) {
2397 * The page may have dirty, unmapped buffers. For example,
2398 * they may have been added in ext3_writepage(). Make them
2399 * freeable here, so the page does not leak.
2402 /* Not really sure about this - do we need this ? */
2403 if (page
->mapping
->a_ops
->invalidatepage
)
2404 page
->mapping
->a_ops
->invalidatepage(page
, offset
);
2407 return 0; /* don't care */
2411 * The page straddles i_size. It must be zeroed out on each and every
2412 * writepage invocation because it may be mmapped. "A file is mapped
2413 * in multiples of the page size. For a file that is not a multiple of
2414 * the page size, the remaining memory is zeroed when mapped, and
2415 * writes to that region are not written out to the file."
2417 kaddr
= kmap_atomic(page
, KM_USER0
);
2418 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
2419 flush_dcache_page(page
);
2420 kunmap_atomic(kaddr
, KM_USER0
);
2422 ret
= mpage_writepage(page
, get_block
, wbc
);
2424 ret
= __block_write_full_page(inode
, page
, get_block
, wbc
);
2427 EXPORT_SYMBOL(nobh_writepage
);
2430 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2432 int nobh_truncate_page(struct address_space
*mapping
, loff_t from
)
2434 struct inode
*inode
= mapping
->host
;
2435 unsigned blocksize
= 1 << inode
->i_blkbits
;
2436 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
2437 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
2440 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
2444 if ((offset
& (blocksize
- 1)) == 0)
2448 page
= grab_cache_page(mapping
, index
);
2452 to
= (offset
+ blocksize
) & ~(blocksize
- 1);
2453 ret
= a_ops
->prepare_write(NULL
, page
, offset
, to
);
2455 kaddr
= kmap_atomic(page
, KM_USER0
);
2456 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
2457 flush_dcache_page(page
);
2458 kunmap_atomic(kaddr
, KM_USER0
);
2460 * It would be more correct to call aops->commit_write()
2461 * here, but this is more efficient.
2463 SetPageUptodate(page
);
2464 set_page_dirty(page
);
2467 page_cache_release(page
);
2471 EXPORT_SYMBOL(nobh_truncate_page
);
2473 int block_truncate_page(struct address_space
*mapping
,
2474 loff_t from
, get_block_t
*get_block
)
2476 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
2477 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
2480 unsigned length
, pos
;
2481 struct inode
*inode
= mapping
->host
;
2483 struct buffer_head
*bh
;
2487 blocksize
= 1 << inode
->i_blkbits
;
2488 length
= offset
& (blocksize
- 1);
2490 /* Block boundary? Nothing to do */
2494 length
= blocksize
- length
;
2495 iblock
= (sector_t
)index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
2497 page
= grab_cache_page(mapping
, index
);
2502 if (!page_has_buffers(page
))
2503 create_empty_buffers(page
, blocksize
, 0);
2505 /* Find the buffer that contains "offset" */
2506 bh
= page_buffers(page
);
2508 while (offset
>= pos
) {
2509 bh
= bh
->b_this_page
;
2515 if (!buffer_mapped(bh
)) {
2516 WARN_ON(bh
->b_size
!= blocksize
);
2517 err
= get_block(inode
, iblock
, bh
, 0);
2520 /* unmapped? It's a hole - nothing to do */
2521 if (!buffer_mapped(bh
))
2525 /* Ok, it's mapped. Make sure it's up-to-date */
2526 if (PageUptodate(page
))
2527 set_buffer_uptodate(bh
);
2529 if (!buffer_uptodate(bh
) && !buffer_delay(bh
) && !buffer_unwritten(bh
)) {
2531 ll_rw_block(READ
, 1, &bh
);
2533 /* Uhhuh. Read error. Complain and punt. */
2534 if (!buffer_uptodate(bh
))
2538 kaddr
= kmap_atomic(page
, KM_USER0
);
2539 memset(kaddr
+ offset
, 0, length
);
2540 flush_dcache_page(page
);
2541 kunmap_atomic(kaddr
, KM_USER0
);
2543 mark_buffer_dirty(bh
);
2548 page_cache_release(page
);
2554 * The generic ->writepage function for buffer-backed address_spaces
2556 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
2557 struct writeback_control
*wbc
)
2559 struct inode
* const inode
= page
->mapping
->host
;
2560 loff_t i_size
= i_size_read(inode
);
2561 const pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
2565 /* Is the page fully inside i_size? */
2566 if (page
->index
< end_index
)
2567 return __block_write_full_page(inode
, page
, get_block
, wbc
);
2569 /* Is the page fully outside i_size? (truncate in progress) */
2570 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
2571 if (page
->index
>= end_index
+1 || !offset
) {
2573 * The page may have dirty, unmapped buffers. For example,
2574 * they may have been added in ext3_writepage(). Make them
2575 * freeable here, so the page does not leak.
2577 do_invalidatepage(page
, 0);
2579 return 0; /* don't care */
2583 * The page straddles i_size. It must be zeroed out on each and every
2584 * writepage invokation because it may be mmapped. "A file is mapped
2585 * in multiples of the page size. For a file that is not a multiple of
2586 * the page size, the remaining memory is zeroed when mapped, and
2587 * writes to that region are not written out to the file."
2589 kaddr
= kmap_atomic(page
, KM_USER0
);
2590 memset(kaddr
+ offset
, 0, PAGE_CACHE_SIZE
- offset
);
2591 flush_dcache_page(page
);
2592 kunmap_atomic(kaddr
, KM_USER0
);
2593 return __block_write_full_page(inode
, page
, get_block
, wbc
);
2596 sector_t
generic_block_bmap(struct address_space
*mapping
, sector_t block
,
2597 get_block_t
*get_block
)
2599 struct buffer_head tmp
;
2600 struct inode
*inode
= mapping
->host
;
2603 tmp
.b_size
= 1 << inode
->i_blkbits
;
2604 get_block(inode
, block
, &tmp
, 0);
2605 return tmp
.b_blocknr
;
2608 static int end_bio_bh_io_sync(struct bio
*bio
, unsigned int bytes_done
, int err
)
2610 struct buffer_head
*bh
= bio
->bi_private
;
2615 if (err
== -EOPNOTSUPP
) {
2616 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
2617 set_bit(BH_Eopnotsupp
, &bh
->b_state
);
2620 bh
->b_end_io(bh
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
2625 int submit_bh(int rw
, struct buffer_head
* bh
)
2630 BUG_ON(!buffer_locked(bh
));
2631 BUG_ON(!buffer_mapped(bh
));
2632 BUG_ON(!bh
->b_end_io
);
2634 if (buffer_ordered(bh
) && (rw
== WRITE
))
2638 * Only clear out a write error when rewriting, should this
2639 * include WRITE_SYNC as well?
2641 if (test_set_buffer_req(bh
) && (rw
== WRITE
|| rw
== WRITE_BARRIER
))
2642 clear_buffer_write_io_error(bh
);
2645 * from here on down, it's all bio -- do the initial mapping,
2646 * submit_bio -> generic_make_request may further map this bio around
2648 bio
= bio_alloc(GFP_NOIO
, 1);
2650 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
2651 bio
->bi_bdev
= bh
->b_bdev
;
2652 bio
->bi_io_vec
[0].bv_page
= bh
->b_page
;
2653 bio
->bi_io_vec
[0].bv_len
= bh
->b_size
;
2654 bio
->bi_io_vec
[0].bv_offset
= bh_offset(bh
);
2658 bio
->bi_size
= bh
->b_size
;
2660 bio
->bi_end_io
= end_bio_bh_io_sync
;
2661 bio
->bi_private
= bh
;
2664 submit_bio(rw
, bio
);
2666 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
2674 * ll_rw_block: low-level access to block devices (DEPRECATED)
2675 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2676 * @nr: number of &struct buffer_heads in the array
2677 * @bhs: array of pointers to &struct buffer_head
2679 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2680 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2681 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2682 * are sent to disk. The fourth %READA option is described in the documentation
2683 * for generic_make_request() which ll_rw_block() calls.
2685 * This function drops any buffer that it cannot get a lock on (with the
2686 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2687 * clean when doing a write request, and any buffer that appears to be
2688 * up-to-date when doing read request. Further it marks as clean buffers that
2689 * are processed for writing (the buffer cache won't assume that they are
2690 * actually clean until the buffer gets unlocked).
2692 * ll_rw_block sets b_end_io to simple completion handler that marks
2693 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2696 * All of the buffers must be for the same device, and must also be a
2697 * multiple of the current approved size for the device.
2699 void ll_rw_block(int rw
, int nr
, struct buffer_head
*bhs
[])
2703 for (i
= 0; i
< nr
; i
++) {
2704 struct buffer_head
*bh
= bhs
[i
];
2708 else if (test_set_buffer_locked(bh
))
2711 if (rw
== WRITE
|| rw
== SWRITE
) {
2712 if (test_clear_buffer_dirty(bh
)) {
2713 bh
->b_end_io
= end_buffer_write_sync
;
2715 submit_bh(WRITE
, bh
);
2719 if (!buffer_uptodate(bh
)) {
2720 bh
->b_end_io
= end_buffer_read_sync
;
2731 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2732 * and then start new I/O and then wait upon it. The caller must have a ref on
2735 int sync_dirty_buffer(struct buffer_head
*bh
)
2739 WARN_ON(atomic_read(&bh
->b_count
) < 1);
2741 if (test_clear_buffer_dirty(bh
)) {
2743 bh
->b_end_io
= end_buffer_write_sync
;
2744 ret
= submit_bh(WRITE
, bh
);
2746 if (buffer_eopnotsupp(bh
)) {
2747 clear_buffer_eopnotsupp(bh
);
2750 if (!ret
&& !buffer_uptodate(bh
))
2759 * try_to_free_buffers() checks if all the buffers on this particular page
2760 * are unused, and releases them if so.
2762 * Exclusion against try_to_free_buffers may be obtained by either
2763 * locking the page or by holding its mapping's private_lock.
2765 * If the page is dirty but all the buffers are clean then we need to
2766 * be sure to mark the page clean as well. This is because the page
2767 * may be against a block device, and a later reattachment of buffers
2768 * to a dirty page will set *all* buffers dirty. Which would corrupt
2769 * filesystem data on the same device.
2771 * The same applies to regular filesystem pages: if all the buffers are
2772 * clean then we set the page clean and proceed. To do that, we require
2773 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2776 * try_to_free_buffers() is non-blocking.
2778 static inline int buffer_busy(struct buffer_head
*bh
)
2780 return atomic_read(&bh
->b_count
) |
2781 (bh
->b_state
& ((1 << BH_Dirty
) | (1 << BH_Lock
)));
2785 drop_buffers(struct page
*page
, struct buffer_head
**buffers_to_free
)
2787 struct buffer_head
*head
= page_buffers(page
);
2788 struct buffer_head
*bh
;
2792 if (buffer_write_io_error(bh
) && page
->mapping
)
2793 set_bit(AS_EIO
, &page
->mapping
->flags
);
2794 if (buffer_busy(bh
))
2796 bh
= bh
->b_this_page
;
2797 } while (bh
!= head
);
2800 struct buffer_head
*next
= bh
->b_this_page
;
2802 if (!list_empty(&bh
->b_assoc_buffers
))
2803 __remove_assoc_queue(bh
);
2805 } while (bh
!= head
);
2806 *buffers_to_free
= head
;
2807 __clear_page_buffers(page
);
2813 int try_to_free_buffers(struct page
*page
)
2815 struct address_space
* const mapping
= page
->mapping
;
2816 struct buffer_head
*buffers_to_free
= NULL
;
2819 BUG_ON(!PageLocked(page
));
2820 if (PageWriteback(page
))
2823 if (mapping
== NULL
) { /* can this still happen? */
2824 ret
= drop_buffers(page
, &buffers_to_free
);
2828 spin_lock(&mapping
->private_lock
);
2829 ret
= drop_buffers(page
, &buffers_to_free
);
2832 * If the filesystem writes its buffers by hand (eg ext3)
2833 * then we can have clean buffers against a dirty page. We
2834 * clean the page here; otherwise the VM will never notice
2835 * that the filesystem did any IO at all.
2837 * Also, during truncate, discard_buffer will have marked all
2838 * the page's buffers clean. We discover that here and clean
2841 * private_lock must be held over this entire operation in order
2842 * to synchronise against __set_page_dirty_buffers and prevent the
2843 * dirty bit from being lost.
2846 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
2847 spin_unlock(&mapping
->private_lock
);
2849 if (buffers_to_free
) {
2850 struct buffer_head
*bh
= buffers_to_free
;
2853 struct buffer_head
*next
= bh
->b_this_page
;
2854 free_buffer_head(bh
);
2856 } while (bh
!= buffers_to_free
);
2860 EXPORT_SYMBOL(try_to_free_buffers
);
2862 void block_sync_page(struct page
*page
)
2864 struct address_space
*mapping
;
2867 mapping
= page_mapping(page
);
2869 blk_run_backing_dev(mapping
->backing_dev_info
, page
);
2873 * There are no bdflush tunables left. But distributions are
2874 * still running obsolete flush daemons, so we terminate them here.
2876 * Use of bdflush() is deprecated and will be removed in a future kernel.
2877 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2879 asmlinkage
long sys_bdflush(int func
, long data
)
2881 static int msg_count
;
2883 if (!capable(CAP_SYS_ADMIN
))
2886 if (msg_count
< 5) {
2889 "warning: process `%s' used the obsolete bdflush"
2890 " system call\n", current
->comm
);
2891 printk(KERN_INFO
"Fix your initscripts?\n");
2900 * Buffer-head allocation
2902 static struct kmem_cache
*bh_cachep
;
2905 * Once the number of bh's in the machine exceeds this level, we start
2906 * stripping them in writeback.
2908 static int max_buffer_heads
;
2910 int buffer_heads_over_limit
;
2912 struct bh_accounting
{
2913 int nr
; /* Number of live bh's */
2914 int ratelimit
; /* Limit cacheline bouncing */
2917 static DEFINE_PER_CPU(struct bh_accounting
, bh_accounting
) = {0, 0};
2919 static void recalc_bh_state(void)
2924 if (__get_cpu_var(bh_accounting
).ratelimit
++ < 4096)
2926 __get_cpu_var(bh_accounting
).ratelimit
= 0;
2927 for_each_online_cpu(i
)
2928 tot
+= per_cpu(bh_accounting
, i
).nr
;
2929 buffer_heads_over_limit
= (tot
> max_buffer_heads
);
2932 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
)
2934 struct buffer_head
*ret
= kmem_cache_alloc(bh_cachep
, gfp_flags
);
2936 get_cpu_var(bh_accounting
).nr
++;
2938 put_cpu_var(bh_accounting
);
2942 EXPORT_SYMBOL(alloc_buffer_head
);
2944 void free_buffer_head(struct buffer_head
*bh
)
2946 BUG_ON(!list_empty(&bh
->b_assoc_buffers
));
2947 kmem_cache_free(bh_cachep
, bh
);
2948 get_cpu_var(bh_accounting
).nr
--;
2950 put_cpu_var(bh_accounting
);
2952 EXPORT_SYMBOL(free_buffer_head
);
2955 init_buffer_head(void *data
, struct kmem_cache
*cachep
, unsigned long flags
)
2957 if (flags
& SLAB_CTOR_CONSTRUCTOR
) {
2958 struct buffer_head
* bh
= (struct buffer_head
*)data
;
2960 memset(bh
, 0, sizeof(*bh
));
2961 INIT_LIST_HEAD(&bh
->b_assoc_buffers
);
2965 static void buffer_exit_cpu(int cpu
)
2968 struct bh_lru
*b
= &per_cpu(bh_lrus
, cpu
);
2970 for (i
= 0; i
< BH_LRU_SIZE
; i
++) {
2974 get_cpu_var(bh_accounting
).nr
+= per_cpu(bh_accounting
, cpu
).nr
;
2975 per_cpu(bh_accounting
, cpu
).nr
= 0;
2976 put_cpu_var(bh_accounting
);
2979 static int buffer_cpu_notify(struct notifier_block
*self
,
2980 unsigned long action
, void *hcpu
)
2982 if (action
== CPU_DEAD
)
2983 buffer_exit_cpu((unsigned long)hcpu
);
2987 void __init
buffer_init(void)
2991 bh_cachep
= kmem_cache_create("buffer_head",
2992 sizeof(struct buffer_head
), 0,
2993 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
2999 * Limit the bh occupancy to 10% of ZONE_NORMAL
3001 nrpages
= (nr_free_buffer_pages() * 10) / 100;
3002 max_buffer_heads
= nrpages
* (PAGE_SIZE
/ sizeof(struct buffer_head
));
3003 hotcpu_notifier(buffer_cpu_notify
, 0);
3006 EXPORT_SYMBOL(__bforget
);
3007 EXPORT_SYMBOL(__brelse
);
3008 EXPORT_SYMBOL(__wait_on_buffer
);
3009 EXPORT_SYMBOL(block_commit_write
);
3010 EXPORT_SYMBOL(block_prepare_write
);
3011 EXPORT_SYMBOL(block_read_full_page
);
3012 EXPORT_SYMBOL(block_sync_page
);
3013 EXPORT_SYMBOL(block_truncate_page
);
3014 EXPORT_SYMBOL(block_write_full_page
);
3015 EXPORT_SYMBOL(cont_prepare_write
);
3016 EXPORT_SYMBOL(end_buffer_read_sync
);
3017 EXPORT_SYMBOL(end_buffer_write_sync
);
3018 EXPORT_SYMBOL(file_fsync
);
3019 EXPORT_SYMBOL(fsync_bdev
);
3020 EXPORT_SYMBOL(generic_block_bmap
);
3021 EXPORT_SYMBOL(generic_commit_write
);
3022 EXPORT_SYMBOL(generic_cont_expand
);
3023 EXPORT_SYMBOL(generic_cont_expand_simple
);
3024 EXPORT_SYMBOL(init_buffer
);
3025 EXPORT_SYMBOL(invalidate_bdev
);
3026 EXPORT_SYMBOL(ll_rw_block
);
3027 EXPORT_SYMBOL(mark_buffer_dirty
);
3028 EXPORT_SYMBOL(submit_bh
);
3029 EXPORT_SYMBOL(sync_dirty_buffer
);
3030 EXPORT_SYMBOL(unlock_buffer
);