2 * linux/fs/jbd2/transaction.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Generic filesystem transaction handling code; part of the ext2fs
15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the
20 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
27 #include <linux/highmem.h>
29 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
);
32 * jbd2_get_transaction: obtain a new transaction_t object.
34 * Simply allocate and initialise a new transaction. Create it in
35 * RUNNING state and add it to the current journal (which should not
36 * have an existing running transaction: we only make a new transaction
37 * once we have started to commit the old one).
40 * The journal MUST be locked. We don't perform atomic mallocs on the
41 * new transaction and we can't block without protecting against other
42 * processes trying to touch the journal while it is in transition.
46 static transaction_t
*
47 jbd2_get_transaction(journal_t
*journal
, transaction_t
*transaction
)
49 transaction
->t_journal
= journal
;
50 transaction
->t_state
= T_RUNNING
;
51 transaction
->t_tid
= journal
->j_transaction_sequence
++;
52 transaction
->t_expires
= jiffies
+ journal
->j_commit_interval
;
53 spin_lock_init(&transaction
->t_handle_lock
);
54 INIT_LIST_HEAD(&transaction
->t_inode_list
);
55 INIT_LIST_HEAD(&transaction
->t_private_list
);
57 /* Set up the commit timer for the new transaction. */
58 journal
->j_commit_timer
.expires
= round_jiffies(transaction
->t_expires
);
59 add_timer(&journal
->j_commit_timer
);
61 J_ASSERT(journal
->j_running_transaction
== NULL
);
62 journal
->j_running_transaction
= transaction
;
63 transaction
->t_max_wait
= 0;
64 transaction
->t_start
= jiffies
;
72 * A handle_t is an object which represents a single atomic update to a
73 * filesystem, and which tracks all of the modifications which form part
78 * start_this_handle: Given a handle, deal with any locking or stalling
79 * needed to make sure that there is enough journal space for the handle
80 * to begin. Attach the handle to a transaction and set up the
81 * transaction's buffer credits.
84 static int start_this_handle(journal_t
*journal
, handle_t
*handle
)
86 transaction_t
*transaction
;
88 int nblocks
= handle
->h_buffer_credits
;
89 transaction_t
*new_transaction
= NULL
;
91 unsigned long ts
= jiffies
;
93 if (nblocks
> journal
->j_max_transaction_buffers
) {
94 printk(KERN_ERR
"JBD: %s wants too many credits (%d > %d)\n",
95 current
->comm
, nblocks
,
96 journal
->j_max_transaction_buffers
);
102 if (!journal
->j_running_transaction
) {
103 new_transaction
= kzalloc(sizeof(*new_transaction
),
104 GFP_NOFS
|__GFP_NOFAIL
);
105 if (!new_transaction
) {
111 jbd_debug(3, "New handle %p going live.\n", handle
);
116 * We need to hold j_state_lock until t_updates has been incremented,
117 * for proper journal barrier handling
119 spin_lock(&journal
->j_state_lock
);
121 if (is_journal_aborted(journal
) ||
122 (journal
->j_errno
!= 0 && !(journal
->j_flags
& JBD2_ACK_ERR
))) {
123 spin_unlock(&journal
->j_state_lock
);
128 /* Wait on the journal's transaction barrier if necessary */
129 if (journal
->j_barrier_count
) {
130 spin_unlock(&journal
->j_state_lock
);
131 wait_event(journal
->j_wait_transaction_locked
,
132 journal
->j_barrier_count
== 0);
136 if (!journal
->j_running_transaction
) {
137 if (!new_transaction
) {
138 spin_unlock(&journal
->j_state_lock
);
139 goto alloc_transaction
;
141 jbd2_get_transaction(journal
, new_transaction
);
142 new_transaction
= NULL
;
145 transaction
= journal
->j_running_transaction
;
148 * If the current transaction is locked down for commit, wait for the
149 * lock to be released.
151 if (transaction
->t_state
== T_LOCKED
) {
154 prepare_to_wait(&journal
->j_wait_transaction_locked
,
155 &wait
, TASK_UNINTERRUPTIBLE
);
156 spin_unlock(&journal
->j_state_lock
);
158 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
163 * If there is not enough space left in the log to write all potential
164 * buffers requested by this operation, we need to stall pending a log
165 * checkpoint to free some more log space.
167 spin_lock(&transaction
->t_handle_lock
);
168 needed
= transaction
->t_outstanding_credits
+ nblocks
;
170 if (needed
> journal
->j_max_transaction_buffers
) {
172 * If the current transaction is already too large, then start
173 * to commit it: we can then go back and attach this handle to
178 jbd_debug(2, "Handle %p starting new commit...\n", handle
);
179 spin_unlock(&transaction
->t_handle_lock
);
180 prepare_to_wait(&journal
->j_wait_transaction_locked
, &wait
,
181 TASK_UNINTERRUPTIBLE
);
182 __jbd2_log_start_commit(journal
, transaction
->t_tid
);
183 spin_unlock(&journal
->j_state_lock
);
185 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
190 * The commit code assumes that it can get enough log space
191 * without forcing a checkpoint. This is *critical* for
192 * correctness: a checkpoint of a buffer which is also
193 * associated with a committing transaction creates a deadlock,
194 * so commit simply cannot force through checkpoints.
196 * We must therefore ensure the necessary space in the journal
197 * *before* starting to dirty potentially checkpointed buffers
198 * in the new transaction.
200 * The worst part is, any transaction currently committing can
201 * reduce the free space arbitrarily. Be careful to account for
202 * those buffers when checkpointing.
206 * @@@ AKPM: This seems rather over-defensive. We're giving commit
207 * a _lot_ of headroom: 1/4 of the journal plus the size of
208 * the committing transaction. Really, we only need to give it
209 * committing_transaction->t_outstanding_credits plus "enough" for
210 * the log control blocks.
211 * Also, this test is inconsitent with the matching one in
212 * jbd2_journal_extend().
214 if (__jbd2_log_space_left(journal
) < jbd_space_needed(journal
)) {
215 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle
);
216 spin_unlock(&transaction
->t_handle_lock
);
217 __jbd2_log_wait_for_space(journal
);
221 /* OK, account for the buffers that this operation expects to
222 * use and add the handle to the running transaction. */
224 if (time_after(transaction
->t_start
, ts
)) {
225 ts
= jbd2_time_diff(ts
, transaction
->t_start
);
226 if (ts
> transaction
->t_max_wait
)
227 transaction
->t_max_wait
= ts
;
230 handle
->h_transaction
= transaction
;
231 transaction
->t_outstanding_credits
+= nblocks
;
232 transaction
->t_updates
++;
233 transaction
->t_handle_count
++;
234 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
235 handle
, nblocks
, transaction
->t_outstanding_credits
,
236 __jbd2_log_space_left(journal
));
237 spin_unlock(&transaction
->t_handle_lock
);
238 spin_unlock(&journal
->j_state_lock
);
240 if (unlikely(new_transaction
)) /* It's usually NULL */
241 kfree(new_transaction
);
245 static struct lock_class_key jbd2_handle_key
;
247 /* Allocate a new handle. This should probably be in a slab... */
248 static handle_t
*new_handle(int nblocks
)
250 handle_t
*handle
= jbd2_alloc_handle(GFP_NOFS
);
253 memset(handle
, 0, sizeof(*handle
));
254 handle
->h_buffer_credits
= nblocks
;
257 lockdep_init_map(&handle
->h_lockdep_map
, "jbd2_handle",
258 &jbd2_handle_key
, 0);
264 * handle_t *jbd2_journal_start() - Obtain a new handle.
265 * @journal: Journal to start transaction on.
266 * @nblocks: number of block buffer we might modify
268 * We make sure that the transaction can guarantee at least nblocks of
269 * modified buffers in the log. We block until the log can guarantee
272 * This function is visible to journal users (like ext3fs), so is not
273 * called with the journal already locked.
275 * Return a pointer to a newly allocated handle, or NULL on failure
277 handle_t
*jbd2_journal_start(journal_t
*journal
, int nblocks
)
279 handle_t
*handle
= journal_current_handle();
283 return ERR_PTR(-EROFS
);
286 J_ASSERT(handle
->h_transaction
->t_journal
== journal
);
291 handle
= new_handle(nblocks
);
293 return ERR_PTR(-ENOMEM
);
295 current
->journal_info
= handle
;
297 err
= start_this_handle(journal
, handle
);
299 jbd2_free_handle(handle
);
300 current
->journal_info
= NULL
;
301 handle
= ERR_PTR(err
);
305 lock_map_acquire(&handle
->h_lockdep_map
);
311 * int jbd2_journal_extend() - extend buffer credits.
312 * @handle: handle to 'extend'
313 * @nblocks: nr blocks to try to extend by.
315 * Some transactions, such as large extends and truncates, can be done
316 * atomically all at once or in several stages. The operation requests
317 * a credit for a number of buffer modications in advance, but can
318 * extend its credit if it needs more.
320 * jbd2_journal_extend tries to give the running handle more buffer credits.
321 * It does not guarantee that allocation - this is a best-effort only.
322 * The calling process MUST be able to deal cleanly with a failure to
325 * Return 0 on success, non-zero on failure.
327 * return code < 0 implies an error
328 * return code > 0 implies normal transaction-full status.
330 int jbd2_journal_extend(handle_t
*handle
, int nblocks
)
332 transaction_t
*transaction
= handle
->h_transaction
;
333 journal_t
*journal
= transaction
->t_journal
;
338 if (is_handle_aborted(handle
))
343 spin_lock(&journal
->j_state_lock
);
345 /* Don't extend a locked-down transaction! */
346 if (handle
->h_transaction
->t_state
!= T_RUNNING
) {
347 jbd_debug(3, "denied handle %p %d blocks: "
348 "transaction not running\n", handle
, nblocks
);
352 spin_lock(&transaction
->t_handle_lock
);
353 wanted
= transaction
->t_outstanding_credits
+ nblocks
;
355 if (wanted
> journal
->j_max_transaction_buffers
) {
356 jbd_debug(3, "denied handle %p %d blocks: "
357 "transaction too large\n", handle
, nblocks
);
361 if (wanted
> __jbd2_log_space_left(journal
)) {
362 jbd_debug(3, "denied handle %p %d blocks: "
363 "insufficient log space\n", handle
, nblocks
);
367 handle
->h_buffer_credits
+= nblocks
;
368 transaction
->t_outstanding_credits
+= nblocks
;
371 jbd_debug(3, "extended handle %p by %d\n", handle
, nblocks
);
373 spin_unlock(&transaction
->t_handle_lock
);
375 spin_unlock(&journal
->j_state_lock
);
382 * int jbd2_journal_restart() - restart a handle .
383 * @handle: handle to restart
384 * @nblocks: nr credits requested
386 * Restart a handle for a multi-transaction filesystem
389 * If the jbd2_journal_extend() call above fails to grant new buffer credits
390 * to a running handle, a call to jbd2_journal_restart will commit the
391 * handle's transaction so far and reattach the handle to a new
392 * transaction capabable of guaranteeing the requested number of
396 int jbd2_journal_restart(handle_t
*handle
, int nblocks
)
398 transaction_t
*transaction
= handle
->h_transaction
;
399 journal_t
*journal
= transaction
->t_journal
;
402 /* If we've had an abort of any type, don't even think about
403 * actually doing the restart! */
404 if (is_handle_aborted(handle
))
408 * First unlink the handle from its current transaction, and start the
411 J_ASSERT(transaction
->t_updates
> 0);
412 J_ASSERT(journal_current_handle() == handle
);
414 spin_lock(&journal
->j_state_lock
);
415 spin_lock(&transaction
->t_handle_lock
);
416 transaction
->t_outstanding_credits
-= handle
->h_buffer_credits
;
417 transaction
->t_updates
--;
419 if (!transaction
->t_updates
)
420 wake_up(&journal
->j_wait_updates
);
421 spin_unlock(&transaction
->t_handle_lock
);
423 jbd_debug(2, "restarting handle %p\n", handle
);
424 __jbd2_log_start_commit(journal
, transaction
->t_tid
);
425 spin_unlock(&journal
->j_state_lock
);
427 handle
->h_buffer_credits
= nblocks
;
428 ret
= start_this_handle(journal
, handle
);
434 * void jbd2_journal_lock_updates () - establish a transaction barrier.
435 * @journal: Journal to establish a barrier on.
437 * This locks out any further updates from being started, and blocks
438 * until all existing updates have completed, returning only once the
439 * journal is in a quiescent state with no updates running.
441 * The journal lock should not be held on entry.
443 void jbd2_journal_lock_updates(journal_t
*journal
)
447 spin_lock(&journal
->j_state_lock
);
448 ++journal
->j_barrier_count
;
450 /* Wait until there are no running updates */
452 transaction_t
*transaction
= journal
->j_running_transaction
;
457 spin_lock(&transaction
->t_handle_lock
);
458 if (!transaction
->t_updates
) {
459 spin_unlock(&transaction
->t_handle_lock
);
462 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
463 TASK_UNINTERRUPTIBLE
);
464 spin_unlock(&transaction
->t_handle_lock
);
465 spin_unlock(&journal
->j_state_lock
);
467 finish_wait(&journal
->j_wait_updates
, &wait
);
468 spin_lock(&journal
->j_state_lock
);
470 spin_unlock(&journal
->j_state_lock
);
473 * We have now established a barrier against other normal updates, but
474 * we also need to barrier against other jbd2_journal_lock_updates() calls
475 * to make sure that we serialise special journal-locked operations
478 mutex_lock(&journal
->j_barrier
);
482 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
483 * @journal: Journal to release the barrier on.
485 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
487 * Should be called without the journal lock held.
489 void jbd2_journal_unlock_updates (journal_t
*journal
)
491 J_ASSERT(journal
->j_barrier_count
!= 0);
493 mutex_unlock(&journal
->j_barrier
);
494 spin_lock(&journal
->j_state_lock
);
495 --journal
->j_barrier_count
;
496 spin_unlock(&journal
->j_state_lock
);
497 wake_up(&journal
->j_wait_transaction_locked
);
501 * Report any unexpected dirty buffers which turn up. Normally those
502 * indicate an error, but they can occur if the user is running (say)
503 * tune2fs to modify the live filesystem, so we need the option of
504 * continuing as gracefully as possible. #
506 * The caller should already hold the journal lock and
507 * j_list_lock spinlock: most callers will need those anyway
508 * in order to probe the buffer's journaling state safely.
510 static void jbd_unexpected_dirty_buffer(struct journal_head
*jh
)
514 /* If this buffer is one which might reasonably be dirty
515 * --- ie. data, or not part of this journal --- then
516 * we're OK to leave it alone, but otherwise we need to
517 * move the dirty bit to the journal's own internal
521 if (jlist
== BJ_Metadata
|| jlist
== BJ_Reserved
||
522 jlist
== BJ_Shadow
|| jlist
== BJ_Forget
) {
523 struct buffer_head
*bh
= jh2bh(jh
);
525 if (test_clear_buffer_dirty(bh
))
526 set_buffer_jbddirty(bh
);
531 * If the buffer is already part of the current transaction, then there
532 * is nothing we need to do. If it is already part of a prior
533 * transaction which we are still committing to disk, then we need to
534 * make sure that we do not overwrite the old copy: we do copy-out to
535 * preserve the copy going to disk. We also account the buffer against
536 * the handle's metadata buffer credits (unless the buffer is already
537 * part of the transaction, that is).
541 do_get_write_access(handle_t
*handle
, struct journal_head
*jh
,
544 struct buffer_head
*bh
;
545 transaction_t
*transaction
;
548 char *frozen_buffer
= NULL
;
551 if (is_handle_aborted(handle
))
554 transaction
= handle
->h_transaction
;
555 journal
= transaction
->t_journal
;
557 jbd_debug(5, "buffer_head %p, force_copy %d\n", jh
, force_copy
);
559 JBUFFER_TRACE(jh
, "entry");
563 /* @@@ Need to check for errors here at some point. */
566 jbd_lock_bh_state(bh
);
568 /* We now hold the buffer lock so it is safe to query the buffer
569 * state. Is the buffer dirty?
571 * If so, there are two possibilities. The buffer may be
572 * non-journaled, and undergoing a quite legitimate writeback.
573 * Otherwise, it is journaled, and we don't expect dirty buffers
574 * in that state (the buffers should be marked JBD_Dirty
575 * instead.) So either the IO is being done under our own
576 * control and this is a bug, or it's a third party IO such as
577 * dump(8) (which may leave the buffer scheduled for read ---
578 * ie. locked but not dirty) or tune2fs (which may actually have
579 * the buffer dirtied, ugh.) */
581 if (buffer_dirty(bh
)) {
583 * First question: is this buffer already part of the current
584 * transaction or the existing committing transaction?
586 if (jh
->b_transaction
) {
588 jh
->b_transaction
== transaction
||
590 journal
->j_committing_transaction
);
591 if (jh
->b_next_transaction
)
592 J_ASSERT_JH(jh
, jh
->b_next_transaction
==
596 * In any case we need to clean the dirty flag and we must
597 * do it under the buffer lock to be sure we don't race
598 * with running write-out.
600 JBUFFER_TRACE(jh
, "Unexpected dirty buffer");
601 jbd_unexpected_dirty_buffer(jh
);
607 if (is_handle_aborted(handle
)) {
608 jbd_unlock_bh_state(bh
);
614 * The buffer is already part of this transaction if b_transaction or
615 * b_next_transaction points to it
617 if (jh
->b_transaction
== transaction
||
618 jh
->b_next_transaction
== transaction
)
622 * this is the first time this transaction is touching this buffer,
623 * reset the modified flag
628 * If there is already a copy-out version of this buffer, then we don't
629 * need to make another one
631 if (jh
->b_frozen_data
) {
632 JBUFFER_TRACE(jh
, "has frozen data");
633 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
634 jh
->b_next_transaction
= transaction
;
638 /* Is there data here we need to preserve? */
640 if (jh
->b_transaction
&& jh
->b_transaction
!= transaction
) {
641 JBUFFER_TRACE(jh
, "owned by older transaction");
642 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
643 J_ASSERT_JH(jh
, jh
->b_transaction
==
644 journal
->j_committing_transaction
);
646 /* There is one case we have to be very careful about.
647 * If the committing transaction is currently writing
648 * this buffer out to disk and has NOT made a copy-out,
649 * then we cannot modify the buffer contents at all
650 * right now. The essence of copy-out is that it is the
651 * extra copy, not the primary copy, which gets
652 * journaled. If the primary copy is already going to
653 * disk then we cannot do copy-out here. */
655 if (jh
->b_jlist
== BJ_Shadow
) {
656 DEFINE_WAIT_BIT(wait
, &bh
->b_state
, BH_Unshadow
);
657 wait_queue_head_t
*wqh
;
659 wqh
= bit_waitqueue(&bh
->b_state
, BH_Unshadow
);
661 JBUFFER_TRACE(jh
, "on shadow: sleep");
662 jbd_unlock_bh_state(bh
);
663 /* commit wakes up all shadow buffers after IO */
665 prepare_to_wait(wqh
, &wait
.wait
,
666 TASK_UNINTERRUPTIBLE
);
667 if (jh
->b_jlist
!= BJ_Shadow
)
671 finish_wait(wqh
, &wait
.wait
);
675 /* Only do the copy if the currently-owning transaction
676 * still needs it. If it is on the Forget list, the
677 * committing transaction is past that stage. The
678 * buffer had better remain locked during the kmalloc,
679 * but that should be true --- we hold the journal lock
680 * still and the buffer is already on the BUF_JOURNAL
681 * list so won't be flushed.
683 * Subtle point, though: if this is a get_undo_access,
684 * then we will be relying on the frozen_data to contain
685 * the new value of the committed_data record after the
686 * transaction, so we HAVE to force the frozen_data copy
689 if (jh
->b_jlist
!= BJ_Forget
|| force_copy
) {
690 JBUFFER_TRACE(jh
, "generate frozen data");
691 if (!frozen_buffer
) {
692 JBUFFER_TRACE(jh
, "allocate memory for buffer");
693 jbd_unlock_bh_state(bh
);
695 jbd2_alloc(jh2bh(jh
)->b_size
,
697 if (!frozen_buffer
) {
699 "%s: OOM for frozen_buffer\n",
701 JBUFFER_TRACE(jh
, "oom!");
703 jbd_lock_bh_state(bh
);
708 jh
->b_frozen_data
= frozen_buffer
;
709 frozen_buffer
= NULL
;
712 jh
->b_next_transaction
= transaction
;
717 * Finally, if the buffer is not journaled right now, we need to make
718 * sure it doesn't get written to disk before the caller actually
719 * commits the new data
721 if (!jh
->b_transaction
) {
722 JBUFFER_TRACE(jh
, "no transaction");
723 J_ASSERT_JH(jh
, !jh
->b_next_transaction
);
724 jh
->b_transaction
= transaction
;
725 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
726 spin_lock(&journal
->j_list_lock
);
727 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
728 spin_unlock(&journal
->j_list_lock
);
737 J_EXPECT_JH(jh
, buffer_uptodate(jh2bh(jh
)),
738 "Possible IO failure.\n");
739 page
= jh2bh(jh
)->b_page
;
740 offset
= ((unsigned long) jh2bh(jh
)->b_data
) & ~PAGE_MASK
;
741 source
= kmap_atomic(page
, KM_USER0
);
742 memcpy(jh
->b_frozen_data
, source
+offset
, jh2bh(jh
)->b_size
);
743 kunmap_atomic(source
, KM_USER0
);
746 * Now that the frozen data is saved off, we need to store
747 * any matching triggers.
749 jh
->b_frozen_triggers
= jh
->b_triggers
;
751 jbd_unlock_bh_state(bh
);
754 * If we are about to journal a buffer, then any revoke pending on it is
757 jbd2_journal_cancel_revoke(handle
, jh
);
760 if (unlikely(frozen_buffer
)) /* It's usually NULL */
761 jbd2_free(frozen_buffer
, bh
->b_size
);
763 JBUFFER_TRACE(jh
, "exit");
768 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
769 * @handle: transaction to add buffer modifications to
770 * @bh: bh to be used for metadata writes
771 * @credits: variable that will receive credits for the buffer
773 * Returns an error code or 0 on success.
775 * In full data journalling mode the buffer may be of type BJ_AsyncData,
776 * because we're write()ing a buffer which is also part of a shared mapping.
779 int jbd2_journal_get_write_access(handle_t
*handle
, struct buffer_head
*bh
)
781 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
784 /* We do not want to get caught playing with fields which the
785 * log thread also manipulates. Make sure that the buffer
786 * completes any outstanding IO before proceeding. */
787 rc
= do_get_write_access(handle
, jh
, 0);
788 jbd2_journal_put_journal_head(jh
);
794 * When the user wants to journal a newly created buffer_head
795 * (ie. getblk() returned a new buffer and we are going to populate it
796 * manually rather than reading off disk), then we need to keep the
797 * buffer_head locked until it has been completely filled with new
798 * data. In this case, we should be able to make the assertion that
799 * the bh is not already part of an existing transaction.
801 * The buffer should already be locked by the caller by this point.
802 * There is no lock ranking violation: it was a newly created,
803 * unlocked buffer beforehand. */
806 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
807 * @handle: transaction to new buffer to
810 * Call this if you create a new bh.
812 int jbd2_journal_get_create_access(handle_t
*handle
, struct buffer_head
*bh
)
814 transaction_t
*transaction
= handle
->h_transaction
;
815 journal_t
*journal
= transaction
->t_journal
;
816 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
819 jbd_debug(5, "journal_head %p\n", jh
);
821 if (is_handle_aborted(handle
))
825 JBUFFER_TRACE(jh
, "entry");
827 * The buffer may already belong to this transaction due to pre-zeroing
828 * in the filesystem's new_block code. It may also be on the previous,
829 * committing transaction's lists, but it HAS to be in Forget state in
830 * that case: the transaction must have deleted the buffer for it to be
833 jbd_lock_bh_state(bh
);
834 spin_lock(&journal
->j_list_lock
);
835 J_ASSERT_JH(jh
, (jh
->b_transaction
== transaction
||
836 jh
->b_transaction
== NULL
||
837 (jh
->b_transaction
== journal
->j_committing_transaction
&&
838 jh
->b_jlist
== BJ_Forget
)));
840 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
841 J_ASSERT_JH(jh
, buffer_locked(jh2bh(jh
)));
843 if (jh
->b_transaction
== NULL
) {
844 jh
->b_transaction
= transaction
;
846 /* first access by this transaction */
849 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
850 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
851 } else if (jh
->b_transaction
== journal
->j_committing_transaction
) {
852 /* first access by this transaction */
855 JBUFFER_TRACE(jh
, "set next transaction");
856 jh
->b_next_transaction
= transaction
;
858 spin_unlock(&journal
->j_list_lock
);
859 jbd_unlock_bh_state(bh
);
862 * akpm: I added this. ext3_alloc_branch can pick up new indirect
863 * blocks which contain freed but then revoked metadata. We need
864 * to cancel the revoke in case we end up freeing it yet again
865 * and the reallocating as data - this would cause a second revoke,
866 * which hits an assertion error.
868 JBUFFER_TRACE(jh
, "cancelling revoke");
869 jbd2_journal_cancel_revoke(handle
, jh
);
870 jbd2_journal_put_journal_head(jh
);
876 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
877 * non-rewindable consequences
878 * @handle: transaction
879 * @bh: buffer to undo
880 * @credits: store the number of taken credits here (if not NULL)
882 * Sometimes there is a need to distinguish between metadata which has
883 * been committed to disk and that which has not. The ext3fs code uses
884 * this for freeing and allocating space, we have to make sure that we
885 * do not reuse freed space until the deallocation has been committed,
886 * since if we overwrote that space we would make the delete
887 * un-rewindable in case of a crash.
889 * To deal with that, jbd2_journal_get_undo_access requests write access to a
890 * buffer for parts of non-rewindable operations such as delete
891 * operations on the bitmaps. The journaling code must keep a copy of
892 * the buffer's contents prior to the undo_access call until such time
893 * as we know that the buffer has definitely been committed to disk.
895 * We never need to know which transaction the committed data is part
896 * of, buffers touched here are guaranteed to be dirtied later and so
897 * will be committed to a new transaction in due course, at which point
898 * we can discard the old committed data pointer.
900 * Returns error number or 0 on success.
902 int jbd2_journal_get_undo_access(handle_t
*handle
, struct buffer_head
*bh
)
905 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
906 char *committed_data
= NULL
;
908 JBUFFER_TRACE(jh
, "entry");
911 * Do this first --- it can drop the journal lock, so we want to
912 * make sure that obtaining the committed_data is done
913 * atomically wrt. completion of any outstanding commits.
915 err
= do_get_write_access(handle
, jh
, 1);
920 if (!jh
->b_committed_data
) {
921 committed_data
= jbd2_alloc(jh2bh(jh
)->b_size
, GFP_NOFS
);
922 if (!committed_data
) {
923 printk(KERN_EMERG
"%s: No memory for committed data\n",
930 jbd_lock_bh_state(bh
);
931 if (!jh
->b_committed_data
) {
932 /* Copy out the current buffer contents into the
933 * preserved, committed copy. */
934 JBUFFER_TRACE(jh
, "generate b_committed data");
935 if (!committed_data
) {
936 jbd_unlock_bh_state(bh
);
940 jh
->b_committed_data
= committed_data
;
941 committed_data
= NULL
;
942 memcpy(jh
->b_committed_data
, bh
->b_data
, bh
->b_size
);
944 jbd_unlock_bh_state(bh
);
946 jbd2_journal_put_journal_head(jh
);
947 if (unlikely(committed_data
))
948 jbd2_free(committed_data
, bh
->b_size
);
953 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
954 * @bh: buffer to trigger on
955 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
957 * Set any triggers on this journal_head. This is always safe, because
958 * triggers for a committing buffer will be saved off, and triggers for
959 * a running transaction will match the buffer in that transaction.
961 * Call with NULL to clear the triggers.
963 void jbd2_journal_set_triggers(struct buffer_head
*bh
,
964 struct jbd2_buffer_trigger_type
*type
)
966 struct journal_head
*jh
= bh2jh(bh
);
968 jh
->b_triggers
= type
;
971 void jbd2_buffer_commit_trigger(struct journal_head
*jh
, void *mapped_data
,
972 struct jbd2_buffer_trigger_type
*triggers
)
974 struct buffer_head
*bh
= jh2bh(jh
);
976 if (!triggers
|| !triggers
->t_commit
)
979 triggers
->t_commit(triggers
, bh
, mapped_data
, bh
->b_size
);
982 void jbd2_buffer_abort_trigger(struct journal_head
*jh
,
983 struct jbd2_buffer_trigger_type
*triggers
)
985 if (!triggers
|| !triggers
->t_abort
)
988 triggers
->t_abort(triggers
, jh2bh(jh
));
994 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
995 * @handle: transaction to add buffer to.
996 * @bh: buffer to mark
998 * mark dirty metadata which needs to be journaled as part of the current
1001 * The buffer is placed on the transaction's metadata list and is marked
1002 * as belonging to the transaction.
1004 * Returns error number or 0 on success.
1006 * Special care needs to be taken if the buffer already belongs to the
1007 * current committing transaction (in which case we should have frozen
1008 * data present for that commit). In that case, we don't relink the
1009 * buffer: that only gets done when the old transaction finally
1010 * completes its commit.
1012 int jbd2_journal_dirty_metadata(handle_t
*handle
, struct buffer_head
*bh
)
1014 transaction_t
*transaction
= handle
->h_transaction
;
1015 journal_t
*journal
= transaction
->t_journal
;
1016 struct journal_head
*jh
= bh2jh(bh
);
1018 jbd_debug(5, "journal_head %p\n", jh
);
1019 JBUFFER_TRACE(jh
, "entry");
1020 if (is_handle_aborted(handle
))
1023 jbd_lock_bh_state(bh
);
1025 if (jh
->b_modified
== 0) {
1027 * This buffer's got modified and becoming part
1028 * of the transaction. This needs to be done
1029 * once a transaction -bzzz
1032 J_ASSERT_JH(jh
, handle
->h_buffer_credits
> 0);
1033 handle
->h_buffer_credits
--;
1037 * fastpath, to avoid expensive locking. If this buffer is already
1038 * on the running transaction's metadata list there is nothing to do.
1039 * Nobody can take it off again because there is a handle open.
1040 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1041 * result in this test being false, so we go in and take the locks.
1043 if (jh
->b_transaction
== transaction
&& jh
->b_jlist
== BJ_Metadata
) {
1044 JBUFFER_TRACE(jh
, "fastpath");
1045 J_ASSERT_JH(jh
, jh
->b_transaction
==
1046 journal
->j_running_transaction
);
1050 set_buffer_jbddirty(bh
);
1053 * Metadata already on the current transaction list doesn't
1054 * need to be filed. Metadata on another transaction's list must
1055 * be committing, and will be refiled once the commit completes:
1056 * leave it alone for now.
1058 if (jh
->b_transaction
!= transaction
) {
1059 JBUFFER_TRACE(jh
, "already on other transaction");
1060 J_ASSERT_JH(jh
, jh
->b_transaction
==
1061 journal
->j_committing_transaction
);
1062 J_ASSERT_JH(jh
, jh
->b_next_transaction
== transaction
);
1063 /* And this case is illegal: we can't reuse another
1064 * transaction's data buffer, ever. */
1068 /* That test should have eliminated the following case: */
1069 J_ASSERT_JH(jh
, jh
->b_frozen_data
== NULL
);
1071 JBUFFER_TRACE(jh
, "file as BJ_Metadata");
1072 spin_lock(&journal
->j_list_lock
);
1073 __jbd2_journal_file_buffer(jh
, handle
->h_transaction
, BJ_Metadata
);
1074 spin_unlock(&journal
->j_list_lock
);
1076 jbd_unlock_bh_state(bh
);
1078 JBUFFER_TRACE(jh
, "exit");
1083 * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1084 * updates, if the update decided in the end that it didn't need access.
1088 jbd2_journal_release_buffer(handle_t
*handle
, struct buffer_head
*bh
)
1090 BUFFER_TRACE(bh
, "entry");
1094 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1095 * @handle: transaction handle
1096 * @bh: bh to 'forget'
1098 * We can only do the bforget if there are no commits pending against the
1099 * buffer. If the buffer is dirty in the current running transaction we
1100 * can safely unlink it.
1102 * bh may not be a journalled buffer at all - it may be a non-JBD
1103 * buffer which came off the hashtable. Check for this.
1105 * Decrements bh->b_count by one.
1107 * Allow this call even if the handle has aborted --- it may be part of
1108 * the caller's cleanup after an abort.
1110 int jbd2_journal_forget (handle_t
*handle
, struct buffer_head
*bh
)
1112 transaction_t
*transaction
= handle
->h_transaction
;
1113 journal_t
*journal
= transaction
->t_journal
;
1114 struct journal_head
*jh
;
1115 int drop_reserve
= 0;
1117 int was_modified
= 0;
1119 BUFFER_TRACE(bh
, "entry");
1121 jbd_lock_bh_state(bh
);
1122 spin_lock(&journal
->j_list_lock
);
1124 if (!buffer_jbd(bh
))
1128 /* Critical error: attempting to delete a bitmap buffer, maybe?
1129 * Don't do any jbd operations, and return an error. */
1130 if (!J_EXPECT_JH(jh
, !jh
->b_committed_data
,
1131 "inconsistent data on disk")) {
1136 /* keep track of wether or not this transaction modified us */
1137 was_modified
= jh
->b_modified
;
1140 * The buffer's going from the transaction, we must drop
1141 * all references -bzzz
1145 if (jh
->b_transaction
== handle
->h_transaction
) {
1146 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1148 /* If we are forgetting a buffer which is already part
1149 * of this transaction, then we can just drop it from
1150 * the transaction immediately. */
1151 clear_buffer_dirty(bh
);
1152 clear_buffer_jbddirty(bh
);
1154 JBUFFER_TRACE(jh
, "belongs to current transaction: unfile");
1157 * we only want to drop a reference if this transaction
1158 * modified the buffer
1164 * We are no longer going to journal this buffer.
1165 * However, the commit of this transaction is still
1166 * important to the buffer: the delete that we are now
1167 * processing might obsolete an old log entry, so by
1168 * committing, we can satisfy the buffer's checkpoint.
1170 * So, if we have a checkpoint on the buffer, we should
1171 * now refile the buffer on our BJ_Forget list so that
1172 * we know to remove the checkpoint after we commit.
1175 if (jh
->b_cp_transaction
) {
1176 __jbd2_journal_temp_unlink_buffer(jh
);
1177 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1179 __jbd2_journal_unfile_buffer(jh
);
1180 jbd2_journal_remove_journal_head(bh
);
1182 if (!buffer_jbd(bh
)) {
1183 spin_unlock(&journal
->j_list_lock
);
1184 jbd_unlock_bh_state(bh
);
1189 } else if (jh
->b_transaction
) {
1190 J_ASSERT_JH(jh
, (jh
->b_transaction
==
1191 journal
->j_committing_transaction
));
1192 /* However, if the buffer is still owned by a prior
1193 * (committing) transaction, we can't drop it yet... */
1194 JBUFFER_TRACE(jh
, "belongs to older transaction");
1195 /* ... but we CAN drop it from the new transaction if we
1196 * have also modified it since the original commit. */
1198 if (jh
->b_next_transaction
) {
1199 J_ASSERT(jh
->b_next_transaction
== transaction
);
1200 jh
->b_next_transaction
= NULL
;
1203 * only drop a reference if this transaction modified
1212 spin_unlock(&journal
->j_list_lock
);
1213 jbd_unlock_bh_state(bh
);
1217 /* no need to reserve log space for this block -bzzz */
1218 handle
->h_buffer_credits
++;
1224 * int jbd2_journal_stop() - complete a transaction
1225 * @handle: tranaction to complete.
1227 * All done for a particular handle.
1229 * There is not much action needed here. We just return any remaining
1230 * buffer credits to the transaction and remove the handle. The only
1231 * complication is that we need to start a commit operation if the
1232 * filesystem is marked for synchronous update.
1234 * jbd2_journal_stop itself will not usually return an error, but it may
1235 * do so in unusual circumstances. In particular, expect it to
1236 * return -EIO if a jbd2_journal_abort has been executed since the
1237 * transaction began.
1239 int jbd2_journal_stop(handle_t
*handle
)
1241 transaction_t
*transaction
= handle
->h_transaction
;
1242 journal_t
*journal
= transaction
->t_journal
;
1243 int old_handle_count
, err
;
1246 J_ASSERT(journal_current_handle() == handle
);
1248 if (is_handle_aborted(handle
))
1251 J_ASSERT(transaction
->t_updates
> 0);
1255 if (--handle
->h_ref
> 0) {
1256 jbd_debug(4, "h_ref %d -> %d\n", handle
->h_ref
+ 1,
1261 jbd_debug(4, "Handle %p going down\n", handle
);
1264 * Implement synchronous transaction batching. If the handle
1265 * was synchronous, don't force a commit immediately. Let's
1266 * yield and let another thread piggyback onto this transaction.
1267 * Keep doing that while new threads continue to arrive.
1268 * It doesn't cost much - we're about to run a commit and sleep
1269 * on IO anyway. Speeds up many-threaded, many-dir operations
1272 * But don't do this if this process was the most recent one to
1273 * perform a synchronous write. We do this to detect the case where a
1274 * single process is doing a stream of sync writes. No point in waiting
1275 * for joiners in that case.
1278 if (handle
->h_sync
&& journal
->j_last_sync_writer
!= pid
) {
1279 journal
->j_last_sync_writer
= pid
;
1281 old_handle_count
= transaction
->t_handle_count
;
1282 schedule_timeout_uninterruptible(1);
1283 } while (old_handle_count
!= transaction
->t_handle_count
);
1286 current
->journal_info
= NULL
;
1287 spin_lock(&journal
->j_state_lock
);
1288 spin_lock(&transaction
->t_handle_lock
);
1289 transaction
->t_outstanding_credits
-= handle
->h_buffer_credits
;
1290 transaction
->t_updates
--;
1291 if (!transaction
->t_updates
) {
1292 wake_up(&journal
->j_wait_updates
);
1293 if (journal
->j_barrier_count
)
1294 wake_up(&journal
->j_wait_transaction_locked
);
1298 * If the handle is marked SYNC, we need to set another commit
1299 * going! We also want to force a commit if the current
1300 * transaction is occupying too much of the log, or if the
1301 * transaction is too old now.
1303 if (handle
->h_sync
||
1304 transaction
->t_outstanding_credits
>
1305 journal
->j_max_transaction_buffers
||
1306 time_after_eq(jiffies
, transaction
->t_expires
)) {
1307 /* Do this even for aborted journals: an abort still
1308 * completes the commit thread, it just doesn't write
1309 * anything to disk. */
1310 tid_t tid
= transaction
->t_tid
;
1312 spin_unlock(&transaction
->t_handle_lock
);
1313 jbd_debug(2, "transaction too old, requesting commit for "
1314 "handle %p\n", handle
);
1315 /* This is non-blocking */
1316 __jbd2_log_start_commit(journal
, transaction
->t_tid
);
1317 spin_unlock(&journal
->j_state_lock
);
1320 * Special case: JBD2_SYNC synchronous updates require us
1321 * to wait for the commit to complete.
1323 if (handle
->h_sync
&& !(current
->flags
& PF_MEMALLOC
))
1324 err
= jbd2_log_wait_commit(journal
, tid
);
1326 spin_unlock(&transaction
->t_handle_lock
);
1327 spin_unlock(&journal
->j_state_lock
);
1330 lock_map_release(&handle
->h_lockdep_map
);
1332 jbd2_free_handle(handle
);
1337 * int jbd2_journal_force_commit() - force any uncommitted transactions
1338 * @journal: journal to force
1340 * For synchronous operations: force any uncommitted transactions
1341 * to disk. May seem kludgy, but it reuses all the handle batching
1342 * code in a very simple manner.
1344 int jbd2_journal_force_commit(journal_t
*journal
)
1349 handle
= jbd2_journal_start(journal
, 1);
1350 if (IS_ERR(handle
)) {
1351 ret
= PTR_ERR(handle
);
1354 ret
= jbd2_journal_stop(handle
);
1361 * List management code snippets: various functions for manipulating the
1362 * transaction buffer lists.
1367 * Append a buffer to a transaction list, given the transaction's list head
1370 * j_list_lock is held.
1372 * jbd_lock_bh_state(jh2bh(jh)) is held.
1376 __blist_add_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1379 jh
->b_tnext
= jh
->b_tprev
= jh
;
1382 /* Insert at the tail of the list to preserve order */
1383 struct journal_head
*first
= *list
, *last
= first
->b_tprev
;
1385 jh
->b_tnext
= first
;
1386 last
->b_tnext
= first
->b_tprev
= jh
;
1391 * Remove a buffer from a transaction list, given the transaction's list
1394 * Called with j_list_lock held, and the journal may not be locked.
1396 * jbd_lock_bh_state(jh2bh(jh)) is held.
1400 __blist_del_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1403 *list
= jh
->b_tnext
;
1407 jh
->b_tprev
->b_tnext
= jh
->b_tnext
;
1408 jh
->b_tnext
->b_tprev
= jh
->b_tprev
;
1412 * Remove a buffer from the appropriate transaction list.
1414 * Note that this function can *change* the value of
1415 * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1416 * t_log_list or t_reserved_list. If the caller is holding onto a copy of one
1417 * of these pointers, it could go bad. Generally the caller needs to re-read
1418 * the pointer from the transaction_t.
1420 * Called under j_list_lock. The journal may not be locked.
1422 void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
)
1424 struct journal_head
**list
= NULL
;
1425 transaction_t
*transaction
;
1426 struct buffer_head
*bh
= jh2bh(jh
);
1428 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1429 transaction
= jh
->b_transaction
;
1431 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1433 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1434 if (jh
->b_jlist
!= BJ_None
)
1435 J_ASSERT_JH(jh
, transaction
!= NULL
);
1437 switch (jh
->b_jlist
) {
1441 transaction
->t_nr_buffers
--;
1442 J_ASSERT_JH(jh
, transaction
->t_nr_buffers
>= 0);
1443 list
= &transaction
->t_buffers
;
1446 list
= &transaction
->t_forget
;
1449 list
= &transaction
->t_iobuf_list
;
1452 list
= &transaction
->t_shadow_list
;
1455 list
= &transaction
->t_log_list
;
1458 list
= &transaction
->t_reserved_list
;
1462 __blist_del_buffer(list
, jh
);
1463 jh
->b_jlist
= BJ_None
;
1464 if (test_clear_buffer_jbddirty(bh
))
1465 mark_buffer_dirty(bh
); /* Expose it to the VM */
1468 void __jbd2_journal_unfile_buffer(struct journal_head
*jh
)
1470 __jbd2_journal_temp_unlink_buffer(jh
);
1471 jh
->b_transaction
= NULL
;
1474 void jbd2_journal_unfile_buffer(journal_t
*journal
, struct journal_head
*jh
)
1476 jbd_lock_bh_state(jh2bh(jh
));
1477 spin_lock(&journal
->j_list_lock
);
1478 __jbd2_journal_unfile_buffer(jh
);
1479 spin_unlock(&journal
->j_list_lock
);
1480 jbd_unlock_bh_state(jh2bh(jh
));
1484 * Called from jbd2_journal_try_to_free_buffers().
1486 * Called under jbd_lock_bh_state(bh)
1489 __journal_try_to_free_buffer(journal_t
*journal
, struct buffer_head
*bh
)
1491 struct journal_head
*jh
;
1495 if (buffer_locked(bh
) || buffer_dirty(bh
))
1498 if (jh
->b_next_transaction
!= NULL
)
1501 spin_lock(&journal
->j_list_lock
);
1502 if (jh
->b_cp_transaction
!= NULL
&& jh
->b_transaction
== NULL
) {
1503 /* written-back checkpointed metadata buffer */
1504 if (jh
->b_jlist
== BJ_None
) {
1505 JBUFFER_TRACE(jh
, "remove from checkpoint list");
1506 __jbd2_journal_remove_checkpoint(jh
);
1507 jbd2_journal_remove_journal_head(bh
);
1511 spin_unlock(&journal
->j_list_lock
);
1517 * jbd2_journal_try_to_free_buffers() could race with
1518 * jbd2_journal_commit_transaction(). The later might still hold the
1519 * reference count to the buffers when inspecting them on
1520 * t_syncdata_list or t_locked_list.
1522 * jbd2_journal_try_to_free_buffers() will call this function to
1523 * wait for the current transaction to finish syncing data buffers, before
1524 * try to free that buffer.
1526 * Called with journal->j_state_lock hold.
1528 static void jbd2_journal_wait_for_transaction_sync_data(journal_t
*journal
)
1530 transaction_t
*transaction
;
1533 spin_lock(&journal
->j_state_lock
);
1534 transaction
= journal
->j_committing_transaction
;
1537 spin_unlock(&journal
->j_state_lock
);
1541 tid
= transaction
->t_tid
;
1542 spin_unlock(&journal
->j_state_lock
);
1543 jbd2_log_wait_commit(journal
, tid
);
1547 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1548 * @journal: journal for operation
1549 * @page: to try and free
1550 * @gfp_mask: we use the mask to detect how hard should we try to release
1551 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1552 * release the buffers.
1555 * For all the buffers on this page,
1556 * if they are fully written out ordered data, move them onto BUF_CLEAN
1557 * so try_to_free_buffers() can reap them.
1559 * This function returns non-zero if we wish try_to_free_buffers()
1560 * to be called. We do this if the page is releasable by try_to_free_buffers().
1561 * We also do it if the page has locked or dirty buffers and the caller wants
1562 * us to perform sync or async writeout.
1564 * This complicates JBD locking somewhat. We aren't protected by the
1565 * BKL here. We wish to remove the buffer from its committing or
1566 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1568 * This may *change* the value of transaction_t->t_datalist, so anyone
1569 * who looks at t_datalist needs to lock against this function.
1571 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1572 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1573 * will come out of the lock with the buffer dirty, which makes it
1574 * ineligible for release here.
1576 * Who else is affected by this? hmm... Really the only contender
1577 * is do_get_write_access() - it could be looking at the buffer while
1578 * journal_try_to_free_buffer() is changing its state. But that
1579 * cannot happen because we never reallocate freed data as metadata
1580 * while the data is part of a transaction. Yes?
1582 * Return 0 on failure, 1 on success
1584 int jbd2_journal_try_to_free_buffers(journal_t
*journal
,
1585 struct page
*page
, gfp_t gfp_mask
)
1587 struct buffer_head
*head
;
1588 struct buffer_head
*bh
;
1591 J_ASSERT(PageLocked(page
));
1593 head
= page_buffers(page
);
1596 struct journal_head
*jh
;
1599 * We take our own ref against the journal_head here to avoid
1600 * having to add tons of locking around each instance of
1601 * jbd2_journal_remove_journal_head() and
1602 * jbd2_journal_put_journal_head().
1604 jh
= jbd2_journal_grab_journal_head(bh
);
1608 jbd_lock_bh_state(bh
);
1609 __journal_try_to_free_buffer(journal
, bh
);
1610 jbd2_journal_put_journal_head(jh
);
1611 jbd_unlock_bh_state(bh
);
1614 } while ((bh
= bh
->b_this_page
) != head
);
1616 ret
= try_to_free_buffers(page
);
1619 * There are a number of places where jbd2_journal_try_to_free_buffers()
1620 * could race with jbd2_journal_commit_transaction(), the later still
1621 * holds the reference to the buffers to free while processing them.
1622 * try_to_free_buffers() failed to free those buffers. Some of the
1623 * caller of releasepage() request page buffers to be dropped, otherwise
1624 * treat the fail-to-free as errors (such as generic_file_direct_IO())
1626 * So, if the caller of try_to_release_page() wants the synchronous
1627 * behaviour(i.e make sure buffers are dropped upon return),
1628 * let's wait for the current transaction to finish flush of
1629 * dirty data buffers, then try to free those buffers again,
1630 * with the journal locked.
1632 if (ret
== 0 && (gfp_mask
& __GFP_WAIT
) && (gfp_mask
& __GFP_FS
)) {
1633 jbd2_journal_wait_for_transaction_sync_data(journal
);
1634 ret
= try_to_free_buffers(page
);
1642 * This buffer is no longer needed. If it is on an older transaction's
1643 * checkpoint list we need to record it on this transaction's forget list
1644 * to pin this buffer (and hence its checkpointing transaction) down until
1645 * this transaction commits. If the buffer isn't on a checkpoint list, we
1647 * Returns non-zero if JBD no longer has an interest in the buffer.
1649 * Called under j_list_lock.
1651 * Called under jbd_lock_bh_state(bh).
1653 static int __dispose_buffer(struct journal_head
*jh
, transaction_t
*transaction
)
1656 struct buffer_head
*bh
= jh2bh(jh
);
1658 __jbd2_journal_unfile_buffer(jh
);
1660 if (jh
->b_cp_transaction
) {
1661 JBUFFER_TRACE(jh
, "on running+cp transaction");
1662 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1663 clear_buffer_jbddirty(bh
);
1666 JBUFFER_TRACE(jh
, "on running transaction");
1667 jbd2_journal_remove_journal_head(bh
);
1674 * jbd2_journal_invalidatepage
1676 * This code is tricky. It has a number of cases to deal with.
1678 * There are two invariants which this code relies on:
1680 * i_size must be updated on disk before we start calling invalidatepage on the
1683 * This is done in ext3 by defining an ext3_setattr method which
1684 * updates i_size before truncate gets going. By maintaining this
1685 * invariant, we can be sure that it is safe to throw away any buffers
1686 * attached to the current transaction: once the transaction commits,
1687 * we know that the data will not be needed.
1689 * Note however that we can *not* throw away data belonging to the
1690 * previous, committing transaction!
1692 * Any disk blocks which *are* part of the previous, committing
1693 * transaction (and which therefore cannot be discarded immediately) are
1694 * not going to be reused in the new running transaction
1696 * The bitmap committed_data images guarantee this: any block which is
1697 * allocated in one transaction and removed in the next will be marked
1698 * as in-use in the committed_data bitmap, so cannot be reused until
1699 * the next transaction to delete the block commits. This means that
1700 * leaving committing buffers dirty is quite safe: the disk blocks
1701 * cannot be reallocated to a different file and so buffer aliasing is
1705 * The above applies mainly to ordered data mode. In writeback mode we
1706 * don't make guarantees about the order in which data hits disk --- in
1707 * particular we don't guarantee that new dirty data is flushed before
1708 * transaction commit --- so it is always safe just to discard data
1709 * immediately in that mode. --sct
1713 * The journal_unmap_buffer helper function returns zero if the buffer
1714 * concerned remains pinned as an anonymous buffer belonging to an older
1717 * We're outside-transaction here. Either or both of j_running_transaction
1718 * and j_committing_transaction may be NULL.
1720 static int journal_unmap_buffer(journal_t
*journal
, struct buffer_head
*bh
)
1722 transaction_t
*transaction
;
1723 struct journal_head
*jh
;
1727 BUFFER_TRACE(bh
, "entry");
1730 * It is safe to proceed here without the j_list_lock because the
1731 * buffers cannot be stolen by try_to_free_buffers as long as we are
1732 * holding the page lock. --sct
1735 if (!buffer_jbd(bh
))
1736 goto zap_buffer_unlocked
;
1738 /* OK, we have data buffer in journaled mode */
1739 spin_lock(&journal
->j_state_lock
);
1740 jbd_lock_bh_state(bh
);
1741 spin_lock(&journal
->j_list_lock
);
1743 jh
= jbd2_journal_grab_journal_head(bh
);
1745 goto zap_buffer_no_jh
;
1747 transaction
= jh
->b_transaction
;
1748 if (transaction
== NULL
) {
1749 /* First case: not on any transaction. If it
1750 * has no checkpoint link, then we can zap it:
1751 * it's a writeback-mode buffer so we don't care
1752 * if it hits disk safely. */
1753 if (!jh
->b_cp_transaction
) {
1754 JBUFFER_TRACE(jh
, "not on any transaction: zap");
1758 if (!buffer_dirty(bh
)) {
1759 /* bdflush has written it. We can drop it now */
1763 /* OK, it must be in the journal but still not
1764 * written fully to disk: it's metadata or
1765 * journaled data... */
1767 if (journal
->j_running_transaction
) {
1768 /* ... and once the current transaction has
1769 * committed, the buffer won't be needed any
1771 JBUFFER_TRACE(jh
, "checkpointed: add to BJ_Forget");
1772 ret
= __dispose_buffer(jh
,
1773 journal
->j_running_transaction
);
1774 jbd2_journal_put_journal_head(jh
);
1775 spin_unlock(&journal
->j_list_lock
);
1776 jbd_unlock_bh_state(bh
);
1777 spin_unlock(&journal
->j_state_lock
);
1780 /* There is no currently-running transaction. So the
1781 * orphan record which we wrote for this file must have
1782 * passed into commit. We must attach this buffer to
1783 * the committing transaction, if it exists. */
1784 if (journal
->j_committing_transaction
) {
1785 JBUFFER_TRACE(jh
, "give to committing trans");
1786 ret
= __dispose_buffer(jh
,
1787 journal
->j_committing_transaction
);
1788 jbd2_journal_put_journal_head(jh
);
1789 spin_unlock(&journal
->j_list_lock
);
1790 jbd_unlock_bh_state(bh
);
1791 spin_unlock(&journal
->j_state_lock
);
1794 /* The orphan record's transaction has
1795 * committed. We can cleanse this buffer */
1796 clear_buffer_jbddirty(bh
);
1800 } else if (transaction
== journal
->j_committing_transaction
) {
1801 JBUFFER_TRACE(jh
, "on committing transaction");
1803 * If it is committing, we simply cannot touch it. We
1804 * can remove it's next_transaction pointer from the
1805 * running transaction if that is set, but nothing
1807 set_buffer_freed(bh
);
1808 if (jh
->b_next_transaction
) {
1809 J_ASSERT(jh
->b_next_transaction
==
1810 journal
->j_running_transaction
);
1811 jh
->b_next_transaction
= NULL
;
1813 jbd2_journal_put_journal_head(jh
);
1814 spin_unlock(&journal
->j_list_lock
);
1815 jbd_unlock_bh_state(bh
);
1816 spin_unlock(&journal
->j_state_lock
);
1819 /* Good, the buffer belongs to the running transaction.
1820 * We are writing our own transaction's data, not any
1821 * previous one's, so it is safe to throw it away
1822 * (remember that we expect the filesystem to have set
1823 * i_size already for this truncate so recovery will not
1824 * expose the disk blocks we are discarding here.) */
1825 J_ASSERT_JH(jh
, transaction
== journal
->j_running_transaction
);
1826 JBUFFER_TRACE(jh
, "on running transaction");
1827 may_free
= __dispose_buffer(jh
, transaction
);
1831 jbd2_journal_put_journal_head(jh
);
1833 spin_unlock(&journal
->j_list_lock
);
1834 jbd_unlock_bh_state(bh
);
1835 spin_unlock(&journal
->j_state_lock
);
1836 zap_buffer_unlocked
:
1837 clear_buffer_dirty(bh
);
1838 J_ASSERT_BH(bh
, !buffer_jbddirty(bh
));
1839 clear_buffer_mapped(bh
);
1840 clear_buffer_req(bh
);
1841 clear_buffer_new(bh
);
1847 * void jbd2_journal_invalidatepage()
1848 * @journal: journal to use for flush...
1849 * @page: page to flush
1850 * @offset: length of page to invalidate.
1852 * Reap page buffers containing data after offset in page.
1855 void jbd2_journal_invalidatepage(journal_t
*journal
,
1857 unsigned long offset
)
1859 struct buffer_head
*head
, *bh
, *next
;
1860 unsigned int curr_off
= 0;
1863 if (!PageLocked(page
))
1865 if (!page_has_buffers(page
))
1868 /* We will potentially be playing with lists other than just the
1869 * data lists (especially for journaled data mode), so be
1870 * cautious in our locking. */
1872 head
= bh
= page_buffers(page
);
1874 unsigned int next_off
= curr_off
+ bh
->b_size
;
1875 next
= bh
->b_this_page
;
1877 if (offset
<= curr_off
) {
1878 /* This block is wholly outside the truncation point */
1880 may_free
&= journal_unmap_buffer(journal
, bh
);
1883 curr_off
= next_off
;
1886 } while (bh
!= head
);
1889 if (may_free
&& try_to_free_buffers(page
))
1890 J_ASSERT(!page_has_buffers(page
));
1895 * File a buffer on the given transaction list.
1897 void __jbd2_journal_file_buffer(struct journal_head
*jh
,
1898 transaction_t
*transaction
, int jlist
)
1900 struct journal_head
**list
= NULL
;
1902 struct buffer_head
*bh
= jh2bh(jh
);
1904 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1905 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1907 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1908 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
1909 jh
->b_transaction
== NULL
);
1911 if (jh
->b_transaction
&& jh
->b_jlist
== jlist
)
1914 /* The following list of buffer states needs to be consistent
1915 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1918 if (jlist
== BJ_Metadata
|| jlist
== BJ_Reserved
||
1919 jlist
== BJ_Shadow
|| jlist
== BJ_Forget
) {
1920 if (test_clear_buffer_dirty(bh
) ||
1921 test_clear_buffer_jbddirty(bh
))
1925 if (jh
->b_transaction
)
1926 __jbd2_journal_temp_unlink_buffer(jh
);
1927 jh
->b_transaction
= transaction
;
1931 J_ASSERT_JH(jh
, !jh
->b_committed_data
);
1932 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1935 transaction
->t_nr_buffers
++;
1936 list
= &transaction
->t_buffers
;
1939 list
= &transaction
->t_forget
;
1942 list
= &transaction
->t_iobuf_list
;
1945 list
= &transaction
->t_shadow_list
;
1948 list
= &transaction
->t_log_list
;
1951 list
= &transaction
->t_reserved_list
;
1955 __blist_add_buffer(list
, jh
);
1956 jh
->b_jlist
= jlist
;
1959 set_buffer_jbddirty(bh
);
1962 void jbd2_journal_file_buffer(struct journal_head
*jh
,
1963 transaction_t
*transaction
, int jlist
)
1965 jbd_lock_bh_state(jh2bh(jh
));
1966 spin_lock(&transaction
->t_journal
->j_list_lock
);
1967 __jbd2_journal_file_buffer(jh
, transaction
, jlist
);
1968 spin_unlock(&transaction
->t_journal
->j_list_lock
);
1969 jbd_unlock_bh_state(jh2bh(jh
));
1973 * Remove a buffer from its current buffer list in preparation for
1974 * dropping it from its current transaction entirely. If the buffer has
1975 * already started to be used by a subsequent transaction, refile the
1976 * buffer on that transaction's metadata list.
1978 * Called under journal->j_list_lock
1980 * Called under jbd_lock_bh_state(jh2bh(jh))
1982 void __jbd2_journal_refile_buffer(struct journal_head
*jh
)
1985 struct buffer_head
*bh
= jh2bh(jh
);
1987 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1988 if (jh
->b_transaction
)
1989 assert_spin_locked(&jh
->b_transaction
->t_journal
->j_list_lock
);
1991 /* If the buffer is now unused, just drop it. */
1992 if (jh
->b_next_transaction
== NULL
) {
1993 __jbd2_journal_unfile_buffer(jh
);
1998 * It has been modified by a later transaction: add it to the new
1999 * transaction's metadata list.
2002 was_dirty
= test_clear_buffer_jbddirty(bh
);
2003 __jbd2_journal_temp_unlink_buffer(jh
);
2004 jh
->b_transaction
= jh
->b_next_transaction
;
2005 jh
->b_next_transaction
= NULL
;
2006 __jbd2_journal_file_buffer(jh
, jh
->b_transaction
,
2007 jh
->b_modified
? BJ_Metadata
: BJ_Reserved
);
2008 J_ASSERT_JH(jh
, jh
->b_transaction
->t_state
== T_RUNNING
);
2011 set_buffer_jbddirty(bh
);
2015 * For the unlocked version of this call, also make sure that any
2016 * hanging journal_head is cleaned up if necessary.
2018 * __jbd2_journal_refile_buffer is usually called as part of a single locked
2019 * operation on a buffer_head, in which the caller is probably going to
2020 * be hooking the journal_head onto other lists. In that case it is up
2021 * to the caller to remove the journal_head if necessary. For the
2022 * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2023 * doing anything else to the buffer so we need to do the cleanup
2024 * ourselves to avoid a jh leak.
2026 * *** The journal_head may be freed by this call! ***
2028 void jbd2_journal_refile_buffer(journal_t
*journal
, struct journal_head
*jh
)
2030 struct buffer_head
*bh
= jh2bh(jh
);
2032 jbd_lock_bh_state(bh
);
2033 spin_lock(&journal
->j_list_lock
);
2035 __jbd2_journal_refile_buffer(jh
);
2036 jbd_unlock_bh_state(bh
);
2037 jbd2_journal_remove_journal_head(bh
);
2039 spin_unlock(&journal
->j_list_lock
);
2044 * File inode in the inode list of the handle's transaction
2046 int jbd2_journal_file_inode(handle_t
*handle
, struct jbd2_inode
*jinode
)
2048 transaction_t
*transaction
= handle
->h_transaction
;
2049 journal_t
*journal
= transaction
->t_journal
;
2051 if (is_handle_aborted(handle
))
2054 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode
->i_vfs_inode
->i_ino
,
2055 transaction
->t_tid
);
2058 * First check whether inode isn't already on the transaction's
2059 * lists without taking the lock. Note that this check is safe
2060 * without the lock as we cannot race with somebody removing inode
2061 * from the transaction. The reason is that we remove inode from the
2062 * transaction only in journal_release_jbd_inode() and when we commit
2063 * the transaction. We are guarded from the first case by holding
2064 * a reference to the inode. We are safe against the second case
2065 * because if jinode->i_transaction == transaction, commit code
2066 * cannot touch the transaction because we hold reference to it,
2067 * and if jinode->i_next_transaction == transaction, commit code
2068 * will only file the inode where we want it.
2070 if (jinode
->i_transaction
== transaction
||
2071 jinode
->i_next_transaction
== transaction
)
2074 spin_lock(&journal
->j_list_lock
);
2076 if (jinode
->i_transaction
== transaction
||
2077 jinode
->i_next_transaction
== transaction
)
2080 /* On some different transaction's list - should be
2081 * the committing one */
2082 if (jinode
->i_transaction
) {
2083 J_ASSERT(jinode
->i_next_transaction
== NULL
);
2084 J_ASSERT(jinode
->i_transaction
==
2085 journal
->j_committing_transaction
);
2086 jinode
->i_next_transaction
= transaction
;
2089 /* Not on any transaction list... */
2090 J_ASSERT(!jinode
->i_next_transaction
);
2091 jinode
->i_transaction
= transaction
;
2092 list_add(&jinode
->i_list
, &transaction
->t_inode_list
);
2094 spin_unlock(&journal
->j_list_lock
);
2100 * This function must be called when inode is journaled in ordered mode
2101 * before truncation happens. It starts writeout of truncated part in
2102 * case it is in the committing transaction so that we stand to ordered
2103 * mode consistency guarantees.
2105 int jbd2_journal_begin_ordered_truncate(struct jbd2_inode
*inode
,
2109 transaction_t
*commit_trans
;
2112 if (!inode
->i_transaction
&& !inode
->i_next_transaction
)
2114 journal
= inode
->i_transaction
->t_journal
;
2115 spin_lock(&journal
->j_state_lock
);
2116 commit_trans
= journal
->j_committing_transaction
;
2117 spin_unlock(&journal
->j_state_lock
);
2118 if (inode
->i_transaction
== commit_trans
) {
2119 ret
= filemap_fdatawrite_range(inode
->i_vfs_inode
->i_mapping
,
2120 new_size
, LLONG_MAX
);
2122 jbd2_journal_abort(journal
, ret
);