2 * linux/fs/jbd2/transaction.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Generic filesystem transaction handling code; part of the ext2fs
15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the
20 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 #include <linux/backing-dev.h>
30 #include <linux/bug.h>
31 #include <linux/module.h>
33 #include <trace/events/jbd2.h>
35 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
);
36 static void __jbd2_journal_unfile_buffer(struct journal_head
*jh
);
38 static struct kmem_cache
*transaction_cache
;
39 int __init
jbd2_journal_init_transaction_cache(void)
41 J_ASSERT(!transaction_cache
);
42 transaction_cache
= kmem_cache_create("jbd2_transaction_s",
43 sizeof(transaction_t
),
45 SLAB_HWCACHE_ALIGN
|SLAB_TEMPORARY
,
47 if (transaction_cache
)
52 void jbd2_journal_destroy_transaction_cache(void)
54 if (transaction_cache
) {
55 kmem_cache_destroy(transaction_cache
);
56 transaction_cache
= NULL
;
60 void jbd2_journal_free_transaction(transaction_t
*transaction
)
62 if (unlikely(ZERO_OR_NULL_PTR(transaction
)))
64 kmem_cache_free(transaction_cache
, transaction
);
68 * jbd2_get_transaction: obtain a new transaction_t object.
70 * Simply allocate and initialise a new transaction. Create it in
71 * RUNNING state and add it to the current journal (which should not
72 * have an existing running transaction: we only make a new transaction
73 * once we have started to commit the old one).
76 * The journal MUST be locked. We don't perform atomic mallocs on the
77 * new transaction and we can't block without protecting against other
78 * processes trying to touch the journal while it is in transition.
82 static transaction_t
*
83 jbd2_get_transaction(journal_t
*journal
, transaction_t
*transaction
)
85 transaction
->t_journal
= journal
;
86 transaction
->t_state
= T_RUNNING
;
87 transaction
->t_start_time
= ktime_get();
88 transaction
->t_tid
= journal
->j_transaction_sequence
++;
89 transaction
->t_expires
= jiffies
+ journal
->j_commit_interval
;
90 spin_lock_init(&transaction
->t_handle_lock
);
91 atomic_set(&transaction
->t_updates
, 0);
92 atomic_set(&transaction
->t_outstanding_credits
, 0);
93 atomic_set(&transaction
->t_handle_count
, 0);
94 INIT_LIST_HEAD(&transaction
->t_inode_list
);
95 INIT_LIST_HEAD(&transaction
->t_private_list
);
97 /* Set up the commit timer for the new transaction. */
98 journal
->j_commit_timer
.expires
= round_jiffies_up(transaction
->t_expires
);
99 add_timer(&journal
->j_commit_timer
);
101 J_ASSERT(journal
->j_running_transaction
== NULL
);
102 journal
->j_running_transaction
= transaction
;
103 transaction
->t_max_wait
= 0;
104 transaction
->t_start
= jiffies
;
105 transaction
->t_requested
= 0;
113 * A handle_t is an object which represents a single atomic update to a
114 * filesystem, and which tracks all of the modifications which form part
115 * of that one update.
119 * Update transaction's maximum wait time, if debugging is enabled.
121 * In order for t_max_wait to be reliable, it must be protected by a
122 * lock. But doing so will mean that start_this_handle() can not be
123 * run in parallel on SMP systems, which limits our scalability. So
124 * unless debugging is enabled, we no longer update t_max_wait, which
125 * means that maximum wait time reported by the jbd2_run_stats
126 * tracepoint will always be zero.
128 static inline void update_t_max_wait(transaction_t
*transaction
,
131 #ifdef CONFIG_JBD2_DEBUG
132 if (jbd2_journal_enable_debug
&&
133 time_after(transaction
->t_start
, ts
)) {
134 ts
= jbd2_time_diff(ts
, transaction
->t_start
);
135 spin_lock(&transaction
->t_handle_lock
);
136 if (ts
> transaction
->t_max_wait
)
137 transaction
->t_max_wait
= ts
;
138 spin_unlock(&transaction
->t_handle_lock
);
144 * start_this_handle: Given a handle, deal with any locking or stalling
145 * needed to make sure that there is enough journal space for the handle
146 * to begin. Attach the handle to a transaction and set up the
147 * transaction's buffer credits.
150 static int start_this_handle(journal_t
*journal
, handle_t
*handle
,
153 transaction_t
*transaction
, *new_transaction
= NULL
;
155 int needed
, need_to_start
;
156 int nblocks
= handle
->h_buffer_credits
;
157 unsigned long ts
= jiffies
;
159 if (nblocks
> journal
->j_max_transaction_buffers
) {
160 printk(KERN_ERR
"JBD2: %s wants too many credits (%d > %d)\n",
161 current
->comm
, nblocks
,
162 journal
->j_max_transaction_buffers
);
167 if (!journal
->j_running_transaction
) {
168 new_transaction
= kmem_cache_zalloc(transaction_cache
,
170 if (!new_transaction
) {
172 * If __GFP_FS is not present, then we may be
173 * being called from inside the fs writeback
174 * layer, so we MUST NOT fail. Since
175 * __GFP_NOFAIL is going away, we will arrange
176 * to retry the allocation ourselves.
178 if ((gfp_mask
& __GFP_FS
) == 0) {
179 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
180 goto alloc_transaction
;
186 jbd_debug(3, "New handle %p going live.\n", handle
);
189 * We need to hold j_state_lock until t_updates has been incremented,
190 * for proper journal barrier handling
193 read_lock(&journal
->j_state_lock
);
194 BUG_ON(journal
->j_flags
& JBD2_UNMOUNT
);
195 if (is_journal_aborted(journal
) ||
196 (journal
->j_errno
!= 0 && !(journal
->j_flags
& JBD2_ACK_ERR
))) {
197 read_unlock(&journal
->j_state_lock
);
198 jbd2_journal_free_transaction(new_transaction
);
202 /* Wait on the journal's transaction barrier if necessary */
203 if (journal
->j_barrier_count
) {
204 read_unlock(&journal
->j_state_lock
);
205 wait_event(journal
->j_wait_transaction_locked
,
206 journal
->j_barrier_count
== 0);
210 if (!journal
->j_running_transaction
) {
211 read_unlock(&journal
->j_state_lock
);
212 if (!new_transaction
)
213 goto alloc_transaction
;
214 write_lock(&journal
->j_state_lock
);
215 if (!journal
->j_running_transaction
&&
216 !journal
->j_barrier_count
) {
217 jbd2_get_transaction(journal
, new_transaction
);
218 new_transaction
= NULL
;
220 write_unlock(&journal
->j_state_lock
);
224 transaction
= journal
->j_running_transaction
;
227 * If the current transaction is locked down for commit, wait for the
228 * lock to be released.
230 if (transaction
->t_state
== T_LOCKED
) {
233 prepare_to_wait(&journal
->j_wait_transaction_locked
,
234 &wait
, TASK_UNINTERRUPTIBLE
);
235 read_unlock(&journal
->j_state_lock
);
237 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
242 * If there is not enough space left in the log to write all potential
243 * buffers requested by this operation, we need to stall pending a log
244 * checkpoint to free some more log space.
246 needed
= atomic_add_return(nblocks
,
247 &transaction
->t_outstanding_credits
);
249 if (needed
> journal
->j_max_transaction_buffers
) {
251 * If the current transaction is already too large, then start
252 * to commit it: we can then go back and attach this handle to
257 jbd_debug(2, "Handle %p starting new commit...\n", handle
);
258 atomic_sub(nblocks
, &transaction
->t_outstanding_credits
);
259 prepare_to_wait(&journal
->j_wait_transaction_locked
, &wait
,
260 TASK_UNINTERRUPTIBLE
);
261 tid
= transaction
->t_tid
;
262 need_to_start
= !tid_geq(journal
->j_commit_request
, tid
);
263 read_unlock(&journal
->j_state_lock
);
265 jbd2_log_start_commit(journal
, tid
);
267 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
272 * The commit code assumes that it can get enough log space
273 * without forcing a checkpoint. This is *critical* for
274 * correctness: a checkpoint of a buffer which is also
275 * associated with a committing transaction creates a deadlock,
276 * so commit simply cannot force through checkpoints.
278 * We must therefore ensure the necessary space in the journal
279 * *before* starting to dirty potentially checkpointed buffers
280 * in the new transaction.
282 * The worst part is, any transaction currently committing can
283 * reduce the free space arbitrarily. Be careful to account for
284 * those buffers when checkpointing.
286 if (jbd2_log_space_left(journal
) < jbd2_space_needed(journal
)) {
287 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle
);
288 atomic_sub(nblocks
, &transaction
->t_outstanding_credits
);
289 read_unlock(&journal
->j_state_lock
);
290 write_lock(&journal
->j_state_lock
);
291 if (jbd2_log_space_left(journal
) < jbd2_space_needed(journal
))
292 __jbd2_log_wait_for_space(journal
);
293 write_unlock(&journal
->j_state_lock
);
297 /* OK, account for the buffers that this operation expects to
298 * use and add the handle to the running transaction.
300 update_t_max_wait(transaction
, ts
);
301 handle
->h_transaction
= transaction
;
302 handle
->h_requested_credits
= nblocks
;
303 handle
->h_start_jiffies
= jiffies
;
304 atomic_inc(&transaction
->t_updates
);
305 atomic_inc(&transaction
->t_handle_count
);
306 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
308 atomic_read(&transaction
->t_outstanding_credits
),
309 jbd2_log_space_left(journal
));
310 read_unlock(&journal
->j_state_lock
);
312 lock_map_acquire(&handle
->h_lockdep_map
);
313 jbd2_journal_free_transaction(new_transaction
);
317 static struct lock_class_key jbd2_handle_key
;
319 /* Allocate a new handle. This should probably be in a slab... */
320 static handle_t
*new_handle(int nblocks
)
322 handle_t
*handle
= jbd2_alloc_handle(GFP_NOFS
);
325 handle
->h_buffer_credits
= nblocks
;
328 lockdep_init_map(&handle
->h_lockdep_map
, "jbd2_handle",
329 &jbd2_handle_key
, 0);
335 * handle_t *jbd2_journal_start() - Obtain a new handle.
336 * @journal: Journal to start transaction on.
337 * @nblocks: number of block buffer we might modify
339 * We make sure that the transaction can guarantee at least nblocks of
340 * modified buffers in the log. We block until the log can guarantee
343 * This function is visible to journal users (like ext3fs), so is not
344 * called with the journal already locked.
346 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
349 handle_t
*jbd2__journal_start(journal_t
*journal
, int nblocks
, gfp_t gfp_mask
,
350 unsigned int type
, unsigned int line_no
)
352 handle_t
*handle
= journal_current_handle();
356 return ERR_PTR(-EROFS
);
359 J_ASSERT(handle
->h_transaction
->t_journal
== journal
);
364 handle
= new_handle(nblocks
);
366 return ERR_PTR(-ENOMEM
);
368 current
->journal_info
= handle
;
370 err
= start_this_handle(journal
, handle
, gfp_mask
);
372 jbd2_free_handle(handle
);
373 current
->journal_info
= NULL
;
376 handle
->h_type
= type
;
377 handle
->h_line_no
= line_no
;
378 trace_jbd2_handle_start(journal
->j_fs_dev
->bd_dev
,
379 handle
->h_transaction
->t_tid
, type
,
383 EXPORT_SYMBOL(jbd2__journal_start
);
386 handle_t
*jbd2_journal_start(journal_t
*journal
, int nblocks
)
388 return jbd2__journal_start(journal
, nblocks
, GFP_NOFS
, 0, 0);
390 EXPORT_SYMBOL(jbd2_journal_start
);
394 * int jbd2_journal_extend() - extend buffer credits.
395 * @handle: handle to 'extend'
396 * @nblocks: nr blocks to try to extend by.
398 * Some transactions, such as large extends and truncates, can be done
399 * atomically all at once or in several stages. The operation requests
400 * a credit for a number of buffer modications in advance, but can
401 * extend its credit if it needs more.
403 * jbd2_journal_extend tries to give the running handle more buffer credits.
404 * It does not guarantee that allocation - this is a best-effort only.
405 * The calling process MUST be able to deal cleanly with a failure to
408 * Return 0 on success, non-zero on failure.
410 * return code < 0 implies an error
411 * return code > 0 implies normal transaction-full status.
413 int jbd2_journal_extend(handle_t
*handle
, int nblocks
)
415 transaction_t
*transaction
= handle
->h_transaction
;
416 journal_t
*journal
= transaction
->t_journal
;
421 if (is_handle_aborted(handle
))
426 read_lock(&journal
->j_state_lock
);
428 /* Don't extend a locked-down transaction! */
429 if (handle
->h_transaction
->t_state
!= T_RUNNING
) {
430 jbd_debug(3, "denied handle %p %d blocks: "
431 "transaction not running\n", handle
, nblocks
);
435 spin_lock(&transaction
->t_handle_lock
);
436 wanted
= atomic_read(&transaction
->t_outstanding_credits
) + nblocks
;
438 if (wanted
> journal
->j_max_transaction_buffers
) {
439 jbd_debug(3, "denied handle %p %d blocks: "
440 "transaction too large\n", handle
, nblocks
);
444 if (wanted
+ (wanted
>> JBD2_CONTROL_BLOCKS_SHIFT
) >
445 jbd2_log_space_left(journal
)) {
446 jbd_debug(3, "denied handle %p %d blocks: "
447 "insufficient log space\n", handle
, nblocks
);
451 trace_jbd2_handle_extend(journal
->j_fs_dev
->bd_dev
,
452 handle
->h_transaction
->t_tid
,
453 handle
->h_type
, handle
->h_line_no
,
454 handle
->h_buffer_credits
,
457 handle
->h_buffer_credits
+= nblocks
;
458 handle
->h_requested_credits
+= nblocks
;
459 atomic_add(nblocks
, &transaction
->t_outstanding_credits
);
462 jbd_debug(3, "extended handle %p by %d\n", handle
, nblocks
);
464 spin_unlock(&transaction
->t_handle_lock
);
466 read_unlock(&journal
->j_state_lock
);
473 * int jbd2_journal_restart() - restart a handle .
474 * @handle: handle to restart
475 * @nblocks: nr credits requested
477 * Restart a handle for a multi-transaction filesystem
480 * If the jbd2_journal_extend() call above fails to grant new buffer credits
481 * to a running handle, a call to jbd2_journal_restart will commit the
482 * handle's transaction so far and reattach the handle to a new
483 * transaction capabable of guaranteeing the requested number of
486 int jbd2__journal_restart(handle_t
*handle
, int nblocks
, gfp_t gfp_mask
)
488 transaction_t
*transaction
= handle
->h_transaction
;
489 journal_t
*journal
= transaction
->t_journal
;
491 int need_to_start
, ret
;
493 /* If we've had an abort of any type, don't even think about
494 * actually doing the restart! */
495 if (is_handle_aborted(handle
))
499 * First unlink the handle from its current transaction, and start the
502 J_ASSERT(atomic_read(&transaction
->t_updates
) > 0);
503 J_ASSERT(journal_current_handle() == handle
);
505 read_lock(&journal
->j_state_lock
);
506 spin_lock(&transaction
->t_handle_lock
);
507 atomic_sub(handle
->h_buffer_credits
,
508 &transaction
->t_outstanding_credits
);
509 if (atomic_dec_and_test(&transaction
->t_updates
))
510 wake_up(&journal
->j_wait_updates
);
511 spin_unlock(&transaction
->t_handle_lock
);
513 jbd_debug(2, "restarting handle %p\n", handle
);
514 tid
= transaction
->t_tid
;
515 need_to_start
= !tid_geq(journal
->j_commit_request
, tid
);
516 read_unlock(&journal
->j_state_lock
);
518 jbd2_log_start_commit(journal
, tid
);
520 lock_map_release(&handle
->h_lockdep_map
);
521 handle
->h_buffer_credits
= nblocks
;
522 ret
= start_this_handle(journal
, handle
, gfp_mask
);
525 EXPORT_SYMBOL(jbd2__journal_restart
);
528 int jbd2_journal_restart(handle_t
*handle
, int nblocks
)
530 return jbd2__journal_restart(handle
, nblocks
, GFP_NOFS
);
532 EXPORT_SYMBOL(jbd2_journal_restart
);
535 * void jbd2_journal_lock_updates () - establish a transaction barrier.
536 * @journal: Journal to establish a barrier on.
538 * This locks out any further updates from being started, and blocks
539 * until all existing updates have completed, returning only once the
540 * journal is in a quiescent state with no updates running.
542 * The journal lock should not be held on entry.
544 void jbd2_journal_lock_updates(journal_t
*journal
)
548 write_lock(&journal
->j_state_lock
);
549 ++journal
->j_barrier_count
;
551 /* Wait until there are no running updates */
553 transaction_t
*transaction
= journal
->j_running_transaction
;
558 spin_lock(&transaction
->t_handle_lock
);
559 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
560 TASK_UNINTERRUPTIBLE
);
561 if (!atomic_read(&transaction
->t_updates
)) {
562 spin_unlock(&transaction
->t_handle_lock
);
563 finish_wait(&journal
->j_wait_updates
, &wait
);
566 spin_unlock(&transaction
->t_handle_lock
);
567 write_unlock(&journal
->j_state_lock
);
569 finish_wait(&journal
->j_wait_updates
, &wait
);
570 write_lock(&journal
->j_state_lock
);
572 write_unlock(&journal
->j_state_lock
);
575 * We have now established a barrier against other normal updates, but
576 * we also need to barrier against other jbd2_journal_lock_updates() calls
577 * to make sure that we serialise special journal-locked operations
580 mutex_lock(&journal
->j_barrier
);
584 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
585 * @journal: Journal to release the barrier on.
587 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
589 * Should be called without the journal lock held.
591 void jbd2_journal_unlock_updates (journal_t
*journal
)
593 J_ASSERT(journal
->j_barrier_count
!= 0);
595 mutex_unlock(&journal
->j_barrier
);
596 write_lock(&journal
->j_state_lock
);
597 --journal
->j_barrier_count
;
598 write_unlock(&journal
->j_state_lock
);
599 wake_up(&journal
->j_wait_transaction_locked
);
602 static void warn_dirty_buffer(struct buffer_head
*bh
)
604 char b
[BDEVNAME_SIZE
];
607 "JBD2: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
608 "There's a risk of filesystem corruption in case of system "
610 bdevname(bh
->b_bdev
, b
), (unsigned long long)bh
->b_blocknr
);
613 static int sleep_on_shadow_bh(void *word
)
620 * If the buffer is already part of the current transaction, then there
621 * is nothing we need to do. If it is already part of a prior
622 * transaction which we are still committing to disk, then we need to
623 * make sure that we do not overwrite the old copy: we do copy-out to
624 * preserve the copy going to disk. We also account the buffer against
625 * the handle's metadata buffer credits (unless the buffer is already
626 * part of the transaction, that is).
630 do_get_write_access(handle_t
*handle
, struct journal_head
*jh
,
633 struct buffer_head
*bh
;
634 transaction_t
*transaction
;
637 char *frozen_buffer
= NULL
;
639 unsigned long start_lock
, time_lock
;
641 if (is_handle_aborted(handle
))
644 transaction
= handle
->h_transaction
;
645 journal
= transaction
->t_journal
;
647 jbd_debug(5, "journal_head %p, force_copy %d\n", jh
, force_copy
);
649 JBUFFER_TRACE(jh
, "entry");
653 /* @@@ Need to check for errors here at some point. */
655 start_lock
= jiffies
;
657 jbd_lock_bh_state(bh
);
659 /* If it takes too long to lock the buffer, trace it */
660 time_lock
= jbd2_time_diff(start_lock
, jiffies
);
661 if (time_lock
> HZ
/10)
662 trace_jbd2_lock_buffer_stall(bh
->b_bdev
->bd_dev
,
663 jiffies_to_msecs(time_lock
));
665 /* We now hold the buffer lock so it is safe to query the buffer
666 * state. Is the buffer dirty?
668 * If so, there are two possibilities. The buffer may be
669 * non-journaled, and undergoing a quite legitimate writeback.
670 * Otherwise, it is journaled, and we don't expect dirty buffers
671 * in that state (the buffers should be marked JBD_Dirty
672 * instead.) So either the IO is being done under our own
673 * control and this is a bug, or it's a third party IO such as
674 * dump(8) (which may leave the buffer scheduled for read ---
675 * ie. locked but not dirty) or tune2fs (which may actually have
676 * the buffer dirtied, ugh.) */
678 if (buffer_dirty(bh
)) {
680 * First question: is this buffer already part of the current
681 * transaction or the existing committing transaction?
683 if (jh
->b_transaction
) {
685 jh
->b_transaction
== transaction
||
687 journal
->j_committing_transaction
);
688 if (jh
->b_next_transaction
)
689 J_ASSERT_JH(jh
, jh
->b_next_transaction
==
691 warn_dirty_buffer(bh
);
694 * In any case we need to clean the dirty flag and we must
695 * do it under the buffer lock to be sure we don't race
696 * with running write-out.
698 JBUFFER_TRACE(jh
, "Journalling dirty buffer");
699 clear_buffer_dirty(bh
);
700 set_buffer_jbddirty(bh
);
706 if (is_handle_aborted(handle
)) {
707 jbd_unlock_bh_state(bh
);
713 * The buffer is already part of this transaction if b_transaction or
714 * b_next_transaction points to it
716 if (jh
->b_transaction
== transaction
||
717 jh
->b_next_transaction
== transaction
)
721 * this is the first time this transaction is touching this buffer,
722 * reset the modified flag
727 * If there is already a copy-out version of this buffer, then we don't
728 * need to make another one
730 if (jh
->b_frozen_data
) {
731 JBUFFER_TRACE(jh
, "has frozen data");
732 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
733 jh
->b_next_transaction
= transaction
;
737 /* Is there data here we need to preserve? */
739 if (jh
->b_transaction
&& jh
->b_transaction
!= transaction
) {
740 JBUFFER_TRACE(jh
, "owned by older transaction");
741 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
742 J_ASSERT_JH(jh
, jh
->b_transaction
==
743 journal
->j_committing_transaction
);
745 /* There is one case we have to be very careful about.
746 * If the committing transaction is currently writing
747 * this buffer out to disk and has NOT made a copy-out,
748 * then we cannot modify the buffer contents at all
749 * right now. The essence of copy-out is that it is the
750 * extra copy, not the primary copy, which gets
751 * journaled. If the primary copy is already going to
752 * disk then we cannot do copy-out here. */
754 if (buffer_shadow(bh
)) {
755 JBUFFER_TRACE(jh
, "on shadow: sleep");
756 jbd_unlock_bh_state(bh
);
757 wait_on_bit(&bh
->b_state
, BH_Shadow
,
758 sleep_on_shadow_bh
, TASK_UNINTERRUPTIBLE
);
763 * Only do the copy if the currently-owning transaction still
764 * needs it. If buffer isn't on BJ_Metadata list, the
765 * committing transaction is past that stage (here we use the
766 * fact that BH_Shadow is set under bh_state lock together with
767 * refiling to BJ_Shadow list and at this point we know the
768 * buffer doesn't have BH_Shadow set).
770 * Subtle point, though: if this is a get_undo_access,
771 * then we will be relying on the frozen_data to contain
772 * the new value of the committed_data record after the
773 * transaction, so we HAVE to force the frozen_data copy
776 if (jh
->b_jlist
== BJ_Metadata
|| force_copy
) {
777 JBUFFER_TRACE(jh
, "generate frozen data");
778 if (!frozen_buffer
) {
779 JBUFFER_TRACE(jh
, "allocate memory for buffer");
780 jbd_unlock_bh_state(bh
);
782 jbd2_alloc(jh2bh(jh
)->b_size
,
784 if (!frozen_buffer
) {
786 "%s: OOM for frozen_buffer\n",
788 JBUFFER_TRACE(jh
, "oom!");
790 jbd_lock_bh_state(bh
);
795 jh
->b_frozen_data
= frozen_buffer
;
796 frozen_buffer
= NULL
;
799 jh
->b_next_transaction
= transaction
;
804 * Finally, if the buffer is not journaled right now, we need to make
805 * sure it doesn't get written to disk before the caller actually
806 * commits the new data
808 if (!jh
->b_transaction
) {
809 JBUFFER_TRACE(jh
, "no transaction");
810 J_ASSERT_JH(jh
, !jh
->b_next_transaction
);
811 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
812 spin_lock(&journal
->j_list_lock
);
813 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
814 spin_unlock(&journal
->j_list_lock
);
823 J_EXPECT_JH(jh
, buffer_uptodate(jh2bh(jh
)),
824 "Possible IO failure.\n");
825 page
= jh2bh(jh
)->b_page
;
826 offset
= offset_in_page(jh2bh(jh
)->b_data
);
827 source
= kmap_atomic(page
);
828 /* Fire data frozen trigger just before we copy the data */
829 jbd2_buffer_frozen_trigger(jh
, source
+ offset
,
831 memcpy(jh
->b_frozen_data
, source
+offset
, jh2bh(jh
)->b_size
);
832 kunmap_atomic(source
);
835 * Now that the frozen data is saved off, we need to store
836 * any matching triggers.
838 jh
->b_frozen_triggers
= jh
->b_triggers
;
840 jbd_unlock_bh_state(bh
);
843 * If we are about to journal a buffer, then any revoke pending on it is
846 jbd2_journal_cancel_revoke(handle
, jh
);
849 if (unlikely(frozen_buffer
)) /* It's usually NULL */
850 jbd2_free(frozen_buffer
, bh
->b_size
);
852 JBUFFER_TRACE(jh
, "exit");
857 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
858 * @handle: transaction to add buffer modifications to
859 * @bh: bh to be used for metadata writes
861 * Returns an error code or 0 on success.
863 * In full data journalling mode the buffer may be of type BJ_AsyncData,
864 * because we're write()ing a buffer which is also part of a shared mapping.
867 int jbd2_journal_get_write_access(handle_t
*handle
, struct buffer_head
*bh
)
869 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
872 /* We do not want to get caught playing with fields which the
873 * log thread also manipulates. Make sure that the buffer
874 * completes any outstanding IO before proceeding. */
875 rc
= do_get_write_access(handle
, jh
, 0);
876 jbd2_journal_put_journal_head(jh
);
882 * When the user wants to journal a newly created buffer_head
883 * (ie. getblk() returned a new buffer and we are going to populate it
884 * manually rather than reading off disk), then we need to keep the
885 * buffer_head locked until it has been completely filled with new
886 * data. In this case, we should be able to make the assertion that
887 * the bh is not already part of an existing transaction.
889 * The buffer should already be locked by the caller by this point.
890 * There is no lock ranking violation: it was a newly created,
891 * unlocked buffer beforehand. */
894 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
895 * @handle: transaction to new buffer to
898 * Call this if you create a new bh.
900 int jbd2_journal_get_create_access(handle_t
*handle
, struct buffer_head
*bh
)
902 transaction_t
*transaction
= handle
->h_transaction
;
903 journal_t
*journal
= transaction
->t_journal
;
904 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
907 jbd_debug(5, "journal_head %p\n", jh
);
909 if (is_handle_aborted(handle
))
913 JBUFFER_TRACE(jh
, "entry");
915 * The buffer may already belong to this transaction due to pre-zeroing
916 * in the filesystem's new_block code. It may also be on the previous,
917 * committing transaction's lists, but it HAS to be in Forget state in
918 * that case: the transaction must have deleted the buffer for it to be
921 jbd_lock_bh_state(bh
);
922 spin_lock(&journal
->j_list_lock
);
923 J_ASSERT_JH(jh
, (jh
->b_transaction
== transaction
||
924 jh
->b_transaction
== NULL
||
925 (jh
->b_transaction
== journal
->j_committing_transaction
&&
926 jh
->b_jlist
== BJ_Forget
)));
928 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
929 J_ASSERT_JH(jh
, buffer_locked(jh2bh(jh
)));
931 if (jh
->b_transaction
== NULL
) {
933 * Previous jbd2_journal_forget() could have left the buffer
934 * with jbddirty bit set because it was being committed. When
935 * the commit finished, we've filed the buffer for
936 * checkpointing and marked it dirty. Now we are reallocating
937 * the buffer so the transaction freeing it must have
938 * committed and so it's safe to clear the dirty bit.
940 clear_buffer_dirty(jh2bh(jh
));
941 /* first access by this transaction */
944 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
945 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
946 } else if (jh
->b_transaction
== journal
->j_committing_transaction
) {
947 /* first access by this transaction */
950 JBUFFER_TRACE(jh
, "set next transaction");
951 jh
->b_next_transaction
= transaction
;
953 spin_unlock(&journal
->j_list_lock
);
954 jbd_unlock_bh_state(bh
);
957 * akpm: I added this. ext3_alloc_branch can pick up new indirect
958 * blocks which contain freed but then revoked metadata. We need
959 * to cancel the revoke in case we end up freeing it yet again
960 * and the reallocating as data - this would cause a second revoke,
961 * which hits an assertion error.
963 JBUFFER_TRACE(jh
, "cancelling revoke");
964 jbd2_journal_cancel_revoke(handle
, jh
);
966 jbd2_journal_put_journal_head(jh
);
971 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
972 * non-rewindable consequences
973 * @handle: transaction
974 * @bh: buffer to undo
976 * Sometimes there is a need to distinguish between metadata which has
977 * been committed to disk and that which has not. The ext3fs code uses
978 * this for freeing and allocating space, we have to make sure that we
979 * do not reuse freed space until the deallocation has been committed,
980 * since if we overwrote that space we would make the delete
981 * un-rewindable in case of a crash.
983 * To deal with that, jbd2_journal_get_undo_access requests write access to a
984 * buffer for parts of non-rewindable operations such as delete
985 * operations on the bitmaps. The journaling code must keep a copy of
986 * the buffer's contents prior to the undo_access call until such time
987 * as we know that the buffer has definitely been committed to disk.
989 * We never need to know which transaction the committed data is part
990 * of, buffers touched here are guaranteed to be dirtied later and so
991 * will be committed to a new transaction in due course, at which point
992 * we can discard the old committed data pointer.
994 * Returns error number or 0 on success.
996 int jbd2_journal_get_undo_access(handle_t
*handle
, struct buffer_head
*bh
)
999 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
1000 char *committed_data
= NULL
;
1002 JBUFFER_TRACE(jh
, "entry");
1005 * Do this first --- it can drop the journal lock, so we want to
1006 * make sure that obtaining the committed_data is done
1007 * atomically wrt. completion of any outstanding commits.
1009 err
= do_get_write_access(handle
, jh
, 1);
1014 if (!jh
->b_committed_data
) {
1015 committed_data
= jbd2_alloc(jh2bh(jh
)->b_size
, GFP_NOFS
);
1016 if (!committed_data
) {
1017 printk(KERN_EMERG
"%s: No memory for committed data\n",
1024 jbd_lock_bh_state(bh
);
1025 if (!jh
->b_committed_data
) {
1026 /* Copy out the current buffer contents into the
1027 * preserved, committed copy. */
1028 JBUFFER_TRACE(jh
, "generate b_committed data");
1029 if (!committed_data
) {
1030 jbd_unlock_bh_state(bh
);
1034 jh
->b_committed_data
= committed_data
;
1035 committed_data
= NULL
;
1036 memcpy(jh
->b_committed_data
, bh
->b_data
, bh
->b_size
);
1038 jbd_unlock_bh_state(bh
);
1040 jbd2_journal_put_journal_head(jh
);
1041 if (unlikely(committed_data
))
1042 jbd2_free(committed_data
, bh
->b_size
);
1047 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1048 * @bh: buffer to trigger on
1049 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1051 * Set any triggers on this journal_head. This is always safe, because
1052 * triggers for a committing buffer will be saved off, and triggers for
1053 * a running transaction will match the buffer in that transaction.
1055 * Call with NULL to clear the triggers.
1057 void jbd2_journal_set_triggers(struct buffer_head
*bh
,
1058 struct jbd2_buffer_trigger_type
*type
)
1060 struct journal_head
*jh
= jbd2_journal_grab_journal_head(bh
);
1064 jh
->b_triggers
= type
;
1065 jbd2_journal_put_journal_head(jh
);
1068 void jbd2_buffer_frozen_trigger(struct journal_head
*jh
, void *mapped_data
,
1069 struct jbd2_buffer_trigger_type
*triggers
)
1071 struct buffer_head
*bh
= jh2bh(jh
);
1073 if (!triggers
|| !triggers
->t_frozen
)
1076 triggers
->t_frozen(triggers
, bh
, mapped_data
, bh
->b_size
);
1079 void jbd2_buffer_abort_trigger(struct journal_head
*jh
,
1080 struct jbd2_buffer_trigger_type
*triggers
)
1082 if (!triggers
|| !triggers
->t_abort
)
1085 triggers
->t_abort(triggers
, jh2bh(jh
));
1091 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1092 * @handle: transaction to add buffer to.
1093 * @bh: buffer to mark
1095 * mark dirty metadata which needs to be journaled as part of the current
1098 * The buffer must have previously had jbd2_journal_get_write_access()
1099 * called so that it has a valid journal_head attached to the buffer
1102 * The buffer is placed on the transaction's metadata list and is marked
1103 * as belonging to the transaction.
1105 * Returns error number or 0 on success.
1107 * Special care needs to be taken if the buffer already belongs to the
1108 * current committing transaction (in which case we should have frozen
1109 * data present for that commit). In that case, we don't relink the
1110 * buffer: that only gets done when the old transaction finally
1111 * completes its commit.
1113 int jbd2_journal_dirty_metadata(handle_t
*handle
, struct buffer_head
*bh
)
1115 transaction_t
*transaction
= handle
->h_transaction
;
1116 journal_t
*journal
= transaction
->t_journal
;
1117 struct journal_head
*jh
;
1120 if (is_handle_aborted(handle
))
1122 jh
= jbd2_journal_grab_journal_head(bh
);
1127 jbd_debug(5, "journal_head %p\n", jh
);
1128 JBUFFER_TRACE(jh
, "entry");
1130 jbd_lock_bh_state(bh
);
1132 if (jh
->b_modified
== 0) {
1134 * This buffer's got modified and becoming part
1135 * of the transaction. This needs to be done
1136 * once a transaction -bzzz
1139 J_ASSERT_JH(jh
, handle
->h_buffer_credits
> 0);
1140 handle
->h_buffer_credits
--;
1144 * fastpath, to avoid expensive locking. If this buffer is already
1145 * on the running transaction's metadata list there is nothing to do.
1146 * Nobody can take it off again because there is a handle open.
1147 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1148 * result in this test being false, so we go in and take the locks.
1150 if (jh
->b_transaction
== transaction
&& jh
->b_jlist
== BJ_Metadata
) {
1151 JBUFFER_TRACE(jh
, "fastpath");
1152 if (unlikely(jh
->b_transaction
!=
1153 journal
->j_running_transaction
)) {
1154 printk(KERN_EMERG
"JBD: %s: "
1155 "jh->b_transaction (%llu, %p, %u) != "
1156 "journal->j_running_transaction (%p, %u)",
1158 (unsigned long long) bh
->b_blocknr
,
1160 jh
->b_transaction
? jh
->b_transaction
->t_tid
: 0,
1161 journal
->j_running_transaction
,
1162 journal
->j_running_transaction
?
1163 journal
->j_running_transaction
->t_tid
: 0);
1169 set_buffer_jbddirty(bh
);
1172 * Metadata already on the current transaction list doesn't
1173 * need to be filed. Metadata on another transaction's list must
1174 * be committing, and will be refiled once the commit completes:
1175 * leave it alone for now.
1177 if (jh
->b_transaction
!= transaction
) {
1178 JBUFFER_TRACE(jh
, "already on other transaction");
1179 if (unlikely(jh
->b_transaction
!=
1180 journal
->j_committing_transaction
)) {
1181 printk(KERN_EMERG
"JBD: %s: "
1182 "jh->b_transaction (%llu, %p, %u) != "
1183 "journal->j_committing_transaction (%p, %u)",
1185 (unsigned long long) bh
->b_blocknr
,
1187 jh
->b_transaction
? jh
->b_transaction
->t_tid
: 0,
1188 journal
->j_committing_transaction
,
1189 journal
->j_committing_transaction
?
1190 journal
->j_committing_transaction
->t_tid
: 0);
1193 if (unlikely(jh
->b_next_transaction
!= transaction
)) {
1194 printk(KERN_EMERG
"JBD: %s: "
1195 "jh->b_next_transaction (%llu, %p, %u) != "
1196 "transaction (%p, %u)",
1198 (unsigned long long) bh
->b_blocknr
,
1199 jh
->b_next_transaction
,
1200 jh
->b_next_transaction
?
1201 jh
->b_next_transaction
->t_tid
: 0,
1202 transaction
, transaction
->t_tid
);
1205 /* And this case is illegal: we can't reuse another
1206 * transaction's data buffer, ever. */
1210 /* That test should have eliminated the following case: */
1211 J_ASSERT_JH(jh
, jh
->b_frozen_data
== NULL
);
1213 JBUFFER_TRACE(jh
, "file as BJ_Metadata");
1214 spin_lock(&journal
->j_list_lock
);
1215 __jbd2_journal_file_buffer(jh
, handle
->h_transaction
, BJ_Metadata
);
1216 spin_unlock(&journal
->j_list_lock
);
1218 jbd_unlock_bh_state(bh
);
1219 jbd2_journal_put_journal_head(jh
);
1221 JBUFFER_TRACE(jh
, "exit");
1222 WARN_ON(ret
); /* All errors are bugs, so dump the stack */
1227 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1228 * @handle: transaction handle
1229 * @bh: bh to 'forget'
1231 * We can only do the bforget if there are no commits pending against the
1232 * buffer. If the buffer is dirty in the current running transaction we
1233 * can safely unlink it.
1235 * bh may not be a journalled buffer at all - it may be a non-JBD
1236 * buffer which came off the hashtable. Check for this.
1238 * Decrements bh->b_count by one.
1240 * Allow this call even if the handle has aborted --- it may be part of
1241 * the caller's cleanup after an abort.
1243 int jbd2_journal_forget (handle_t
*handle
, struct buffer_head
*bh
)
1245 transaction_t
*transaction
= handle
->h_transaction
;
1246 journal_t
*journal
= transaction
->t_journal
;
1247 struct journal_head
*jh
;
1248 int drop_reserve
= 0;
1250 int was_modified
= 0;
1252 BUFFER_TRACE(bh
, "entry");
1254 jbd_lock_bh_state(bh
);
1255 spin_lock(&journal
->j_list_lock
);
1257 if (!buffer_jbd(bh
))
1261 /* Critical error: attempting to delete a bitmap buffer, maybe?
1262 * Don't do any jbd operations, and return an error. */
1263 if (!J_EXPECT_JH(jh
, !jh
->b_committed_data
,
1264 "inconsistent data on disk")) {
1269 /* keep track of whether or not this transaction modified us */
1270 was_modified
= jh
->b_modified
;
1273 * The buffer's going from the transaction, we must drop
1274 * all references -bzzz
1278 if (jh
->b_transaction
== handle
->h_transaction
) {
1279 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1281 /* If we are forgetting a buffer which is already part
1282 * of this transaction, then we can just drop it from
1283 * the transaction immediately. */
1284 clear_buffer_dirty(bh
);
1285 clear_buffer_jbddirty(bh
);
1287 JBUFFER_TRACE(jh
, "belongs to current transaction: unfile");
1290 * we only want to drop a reference if this transaction
1291 * modified the buffer
1297 * We are no longer going to journal this buffer.
1298 * However, the commit of this transaction is still
1299 * important to the buffer: the delete that we are now
1300 * processing might obsolete an old log entry, so by
1301 * committing, we can satisfy the buffer's checkpoint.
1303 * So, if we have a checkpoint on the buffer, we should
1304 * now refile the buffer on our BJ_Forget list so that
1305 * we know to remove the checkpoint after we commit.
1308 if (jh
->b_cp_transaction
) {
1309 __jbd2_journal_temp_unlink_buffer(jh
);
1310 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1312 __jbd2_journal_unfile_buffer(jh
);
1313 if (!buffer_jbd(bh
)) {
1314 spin_unlock(&journal
->j_list_lock
);
1315 jbd_unlock_bh_state(bh
);
1320 } else if (jh
->b_transaction
) {
1321 J_ASSERT_JH(jh
, (jh
->b_transaction
==
1322 journal
->j_committing_transaction
));
1323 /* However, if the buffer is still owned by a prior
1324 * (committing) transaction, we can't drop it yet... */
1325 JBUFFER_TRACE(jh
, "belongs to older transaction");
1326 /* ... but we CAN drop it from the new transaction if we
1327 * have also modified it since the original commit. */
1329 if (jh
->b_next_transaction
) {
1330 J_ASSERT(jh
->b_next_transaction
== transaction
);
1331 jh
->b_next_transaction
= NULL
;
1334 * only drop a reference if this transaction modified
1343 spin_unlock(&journal
->j_list_lock
);
1344 jbd_unlock_bh_state(bh
);
1348 /* no need to reserve log space for this block -bzzz */
1349 handle
->h_buffer_credits
++;
1355 * int jbd2_journal_stop() - complete a transaction
1356 * @handle: tranaction to complete.
1358 * All done for a particular handle.
1360 * There is not much action needed here. We just return any remaining
1361 * buffer credits to the transaction and remove the handle. The only
1362 * complication is that we need to start a commit operation if the
1363 * filesystem is marked for synchronous update.
1365 * jbd2_journal_stop itself will not usually return an error, but it may
1366 * do so in unusual circumstances. In particular, expect it to
1367 * return -EIO if a jbd2_journal_abort has been executed since the
1368 * transaction began.
1370 int jbd2_journal_stop(handle_t
*handle
)
1372 transaction_t
*transaction
= handle
->h_transaction
;
1373 journal_t
*journal
= transaction
->t_journal
;
1374 int err
, wait_for_commit
= 0;
1378 J_ASSERT(journal_current_handle() == handle
);
1380 if (is_handle_aborted(handle
))
1383 J_ASSERT(atomic_read(&transaction
->t_updates
) > 0);
1387 if (--handle
->h_ref
> 0) {
1388 jbd_debug(4, "h_ref %d -> %d\n", handle
->h_ref
+ 1,
1393 jbd_debug(4, "Handle %p going down\n", handle
);
1394 trace_jbd2_handle_stats(journal
->j_fs_dev
->bd_dev
,
1395 handle
->h_transaction
->t_tid
,
1396 handle
->h_type
, handle
->h_line_no
,
1397 jiffies
- handle
->h_start_jiffies
,
1398 handle
->h_sync
, handle
->h_requested_credits
,
1399 (handle
->h_requested_credits
-
1400 handle
->h_buffer_credits
));
1403 * Implement synchronous transaction batching. If the handle
1404 * was synchronous, don't force a commit immediately. Let's
1405 * yield and let another thread piggyback onto this
1406 * transaction. Keep doing that while new threads continue to
1407 * arrive. It doesn't cost much - we're about to run a commit
1408 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1409 * operations by 30x or more...
1411 * We try and optimize the sleep time against what the
1412 * underlying disk can do, instead of having a static sleep
1413 * time. This is useful for the case where our storage is so
1414 * fast that it is more optimal to go ahead and force a flush
1415 * and wait for the transaction to be committed than it is to
1416 * wait for an arbitrary amount of time for new writers to
1417 * join the transaction. We achieve this by measuring how
1418 * long it takes to commit a transaction, and compare it with
1419 * how long this transaction has been running, and if run time
1420 * < commit time then we sleep for the delta and commit. This
1421 * greatly helps super fast disks that would see slowdowns as
1422 * more threads started doing fsyncs.
1424 * But don't do this if this process was the most recent one
1425 * to perform a synchronous write. We do this to detect the
1426 * case where a single process is doing a stream of sync
1427 * writes. No point in waiting for joiners in that case.
1430 if (handle
->h_sync
&& journal
->j_last_sync_writer
!= pid
) {
1431 u64 commit_time
, trans_time
;
1433 journal
->j_last_sync_writer
= pid
;
1435 read_lock(&journal
->j_state_lock
);
1436 commit_time
= journal
->j_average_commit_time
;
1437 read_unlock(&journal
->j_state_lock
);
1439 trans_time
= ktime_to_ns(ktime_sub(ktime_get(),
1440 transaction
->t_start_time
));
1442 commit_time
= max_t(u64
, commit_time
,
1443 1000*journal
->j_min_batch_time
);
1444 commit_time
= min_t(u64
, commit_time
,
1445 1000*journal
->j_max_batch_time
);
1447 if (trans_time
< commit_time
) {
1448 ktime_t expires
= ktime_add_ns(ktime_get(),
1450 set_current_state(TASK_UNINTERRUPTIBLE
);
1451 schedule_hrtimeout(&expires
, HRTIMER_MODE_ABS
);
1456 transaction
->t_synchronous_commit
= 1;
1457 current
->journal_info
= NULL
;
1458 atomic_sub(handle
->h_buffer_credits
,
1459 &transaction
->t_outstanding_credits
);
1462 * If the handle is marked SYNC, we need to set another commit
1463 * going! We also want to force a commit if the current
1464 * transaction is occupying too much of the log, or if the
1465 * transaction is too old now.
1467 if (handle
->h_sync
||
1468 (atomic_read(&transaction
->t_outstanding_credits
) >
1469 journal
->j_max_transaction_buffers
) ||
1470 time_after_eq(jiffies
, transaction
->t_expires
)) {
1471 /* Do this even for aborted journals: an abort still
1472 * completes the commit thread, it just doesn't write
1473 * anything to disk. */
1475 jbd_debug(2, "transaction too old, requesting commit for "
1476 "handle %p\n", handle
);
1477 /* This is non-blocking */
1478 jbd2_log_start_commit(journal
, transaction
->t_tid
);
1481 * Special case: JBD2_SYNC synchronous updates require us
1482 * to wait for the commit to complete.
1484 if (handle
->h_sync
&& !(current
->flags
& PF_MEMALLOC
))
1485 wait_for_commit
= 1;
1489 * Once we drop t_updates, if it goes to zero the transaction
1490 * could start committing on us and eventually disappear. So
1491 * once we do this, we must not dereference transaction
1494 tid
= transaction
->t_tid
;
1495 if (atomic_dec_and_test(&transaction
->t_updates
)) {
1496 wake_up(&journal
->j_wait_updates
);
1497 if (journal
->j_barrier_count
)
1498 wake_up(&journal
->j_wait_transaction_locked
);
1501 if (wait_for_commit
)
1502 err
= jbd2_log_wait_commit(journal
, tid
);
1504 lock_map_release(&handle
->h_lockdep_map
);
1506 jbd2_free_handle(handle
);
1511 * int jbd2_journal_force_commit() - force any uncommitted transactions
1512 * @journal: journal to force
1514 * For synchronous operations: force any uncommitted transactions
1515 * to disk. May seem kludgy, but it reuses all the handle batching
1516 * code in a very simple manner.
1518 int jbd2_journal_force_commit(journal_t
*journal
)
1523 handle
= jbd2_journal_start(journal
, 1);
1524 if (IS_ERR(handle
)) {
1525 ret
= PTR_ERR(handle
);
1528 ret
= jbd2_journal_stop(handle
);
1535 * List management code snippets: various functions for manipulating the
1536 * transaction buffer lists.
1541 * Append a buffer to a transaction list, given the transaction's list head
1544 * j_list_lock is held.
1546 * jbd_lock_bh_state(jh2bh(jh)) is held.
1550 __blist_add_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1553 jh
->b_tnext
= jh
->b_tprev
= jh
;
1556 /* Insert at the tail of the list to preserve order */
1557 struct journal_head
*first
= *list
, *last
= first
->b_tprev
;
1559 jh
->b_tnext
= first
;
1560 last
->b_tnext
= first
->b_tprev
= jh
;
1565 * Remove a buffer from a transaction list, given the transaction's list
1568 * Called with j_list_lock held, and the journal may not be locked.
1570 * jbd_lock_bh_state(jh2bh(jh)) is held.
1574 __blist_del_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1577 *list
= jh
->b_tnext
;
1581 jh
->b_tprev
->b_tnext
= jh
->b_tnext
;
1582 jh
->b_tnext
->b_tprev
= jh
->b_tprev
;
1586 * Remove a buffer from the appropriate transaction list.
1588 * Note that this function can *change* the value of
1589 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1590 * t_reserved_list. If the caller is holding onto a copy of one of these
1591 * pointers, it could go bad. Generally the caller needs to re-read the
1592 * pointer from the transaction_t.
1594 * Called under j_list_lock.
1596 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
)
1598 struct journal_head
**list
= NULL
;
1599 transaction_t
*transaction
;
1600 struct buffer_head
*bh
= jh2bh(jh
);
1602 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
1603 transaction
= jh
->b_transaction
;
1605 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1607 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1608 if (jh
->b_jlist
!= BJ_None
)
1609 J_ASSERT_JH(jh
, transaction
!= NULL
);
1611 switch (jh
->b_jlist
) {
1615 transaction
->t_nr_buffers
--;
1616 J_ASSERT_JH(jh
, transaction
->t_nr_buffers
>= 0);
1617 list
= &transaction
->t_buffers
;
1620 list
= &transaction
->t_forget
;
1623 list
= &transaction
->t_shadow_list
;
1626 list
= &transaction
->t_reserved_list
;
1630 __blist_del_buffer(list
, jh
);
1631 jh
->b_jlist
= BJ_None
;
1632 if (test_clear_buffer_jbddirty(bh
))
1633 mark_buffer_dirty(bh
); /* Expose it to the VM */
1637 * Remove buffer from all transactions.
1639 * Called with bh_state lock and j_list_lock
1641 * jh and bh may be already freed when this function returns.
1643 static void __jbd2_journal_unfile_buffer(struct journal_head
*jh
)
1645 __jbd2_journal_temp_unlink_buffer(jh
);
1646 jh
->b_transaction
= NULL
;
1647 jbd2_journal_put_journal_head(jh
);
1650 void jbd2_journal_unfile_buffer(journal_t
*journal
, struct journal_head
*jh
)
1652 struct buffer_head
*bh
= jh2bh(jh
);
1654 /* Get reference so that buffer cannot be freed before we unlock it */
1656 jbd_lock_bh_state(bh
);
1657 spin_lock(&journal
->j_list_lock
);
1658 __jbd2_journal_unfile_buffer(jh
);
1659 spin_unlock(&journal
->j_list_lock
);
1660 jbd_unlock_bh_state(bh
);
1665 * Called from jbd2_journal_try_to_free_buffers().
1667 * Called under jbd_lock_bh_state(bh)
1670 __journal_try_to_free_buffer(journal_t
*journal
, struct buffer_head
*bh
)
1672 struct journal_head
*jh
;
1676 if (buffer_locked(bh
) || buffer_dirty(bh
))
1679 if (jh
->b_next_transaction
!= NULL
)
1682 spin_lock(&journal
->j_list_lock
);
1683 if (jh
->b_cp_transaction
!= NULL
&& jh
->b_transaction
== NULL
) {
1684 /* written-back checkpointed metadata buffer */
1685 JBUFFER_TRACE(jh
, "remove from checkpoint list");
1686 __jbd2_journal_remove_checkpoint(jh
);
1688 spin_unlock(&journal
->j_list_lock
);
1694 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1695 * @journal: journal for operation
1696 * @page: to try and free
1697 * @gfp_mask: we use the mask to detect how hard should we try to release
1698 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1699 * release the buffers.
1702 * For all the buffers on this page,
1703 * if they are fully written out ordered data, move them onto BUF_CLEAN
1704 * so try_to_free_buffers() can reap them.
1706 * This function returns non-zero if we wish try_to_free_buffers()
1707 * to be called. We do this if the page is releasable by try_to_free_buffers().
1708 * We also do it if the page has locked or dirty buffers and the caller wants
1709 * us to perform sync or async writeout.
1711 * This complicates JBD locking somewhat. We aren't protected by the
1712 * BKL here. We wish to remove the buffer from its committing or
1713 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1715 * This may *change* the value of transaction_t->t_datalist, so anyone
1716 * who looks at t_datalist needs to lock against this function.
1718 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1719 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1720 * will come out of the lock with the buffer dirty, which makes it
1721 * ineligible for release here.
1723 * Who else is affected by this? hmm... Really the only contender
1724 * is do_get_write_access() - it could be looking at the buffer while
1725 * journal_try_to_free_buffer() is changing its state. But that
1726 * cannot happen because we never reallocate freed data as metadata
1727 * while the data is part of a transaction. Yes?
1729 * Return 0 on failure, 1 on success
1731 int jbd2_journal_try_to_free_buffers(journal_t
*journal
,
1732 struct page
*page
, gfp_t gfp_mask
)
1734 struct buffer_head
*head
;
1735 struct buffer_head
*bh
;
1738 J_ASSERT(PageLocked(page
));
1740 head
= page_buffers(page
);
1743 struct journal_head
*jh
;
1746 * We take our own ref against the journal_head here to avoid
1747 * having to add tons of locking around each instance of
1748 * jbd2_journal_put_journal_head().
1750 jh
= jbd2_journal_grab_journal_head(bh
);
1754 jbd_lock_bh_state(bh
);
1755 __journal_try_to_free_buffer(journal
, bh
);
1756 jbd2_journal_put_journal_head(jh
);
1757 jbd_unlock_bh_state(bh
);
1760 } while ((bh
= bh
->b_this_page
) != head
);
1762 ret
= try_to_free_buffers(page
);
1769 * This buffer is no longer needed. If it is on an older transaction's
1770 * checkpoint list we need to record it on this transaction's forget list
1771 * to pin this buffer (and hence its checkpointing transaction) down until
1772 * this transaction commits. If the buffer isn't on a checkpoint list, we
1774 * Returns non-zero if JBD no longer has an interest in the buffer.
1776 * Called under j_list_lock.
1778 * Called under jbd_lock_bh_state(bh).
1780 static int __dispose_buffer(struct journal_head
*jh
, transaction_t
*transaction
)
1783 struct buffer_head
*bh
= jh2bh(jh
);
1785 if (jh
->b_cp_transaction
) {
1786 JBUFFER_TRACE(jh
, "on running+cp transaction");
1787 __jbd2_journal_temp_unlink_buffer(jh
);
1789 * We don't want to write the buffer anymore, clear the
1790 * bit so that we don't confuse checks in
1791 * __journal_file_buffer
1793 clear_buffer_dirty(bh
);
1794 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1797 JBUFFER_TRACE(jh
, "on running transaction");
1798 __jbd2_journal_unfile_buffer(jh
);
1804 * jbd2_journal_invalidatepage
1806 * This code is tricky. It has a number of cases to deal with.
1808 * There are two invariants which this code relies on:
1810 * i_size must be updated on disk before we start calling invalidatepage on the
1813 * This is done in ext3 by defining an ext3_setattr method which
1814 * updates i_size before truncate gets going. By maintaining this
1815 * invariant, we can be sure that it is safe to throw away any buffers
1816 * attached to the current transaction: once the transaction commits,
1817 * we know that the data will not be needed.
1819 * Note however that we can *not* throw away data belonging to the
1820 * previous, committing transaction!
1822 * Any disk blocks which *are* part of the previous, committing
1823 * transaction (and which therefore cannot be discarded immediately) are
1824 * not going to be reused in the new running transaction
1826 * The bitmap committed_data images guarantee this: any block which is
1827 * allocated in one transaction and removed in the next will be marked
1828 * as in-use in the committed_data bitmap, so cannot be reused until
1829 * the next transaction to delete the block commits. This means that
1830 * leaving committing buffers dirty is quite safe: the disk blocks
1831 * cannot be reallocated to a different file and so buffer aliasing is
1835 * The above applies mainly to ordered data mode. In writeback mode we
1836 * don't make guarantees about the order in which data hits disk --- in
1837 * particular we don't guarantee that new dirty data is flushed before
1838 * transaction commit --- so it is always safe just to discard data
1839 * immediately in that mode. --sct
1843 * The journal_unmap_buffer helper function returns zero if the buffer
1844 * concerned remains pinned as an anonymous buffer belonging to an older
1847 * We're outside-transaction here. Either or both of j_running_transaction
1848 * and j_committing_transaction may be NULL.
1850 static int journal_unmap_buffer(journal_t
*journal
, struct buffer_head
*bh
,
1853 transaction_t
*transaction
;
1854 struct journal_head
*jh
;
1857 BUFFER_TRACE(bh
, "entry");
1860 * It is safe to proceed here without the j_list_lock because the
1861 * buffers cannot be stolen by try_to_free_buffers as long as we are
1862 * holding the page lock. --sct
1865 if (!buffer_jbd(bh
))
1866 goto zap_buffer_unlocked
;
1868 /* OK, we have data buffer in journaled mode */
1869 write_lock(&journal
->j_state_lock
);
1870 jbd_lock_bh_state(bh
);
1871 spin_lock(&journal
->j_list_lock
);
1873 jh
= jbd2_journal_grab_journal_head(bh
);
1875 goto zap_buffer_no_jh
;
1878 * We cannot remove the buffer from checkpoint lists until the
1879 * transaction adding inode to orphan list (let's call it T)
1880 * is committed. Otherwise if the transaction changing the
1881 * buffer would be cleaned from the journal before T is
1882 * committed, a crash will cause that the correct contents of
1883 * the buffer will be lost. On the other hand we have to
1884 * clear the buffer dirty bit at latest at the moment when the
1885 * transaction marking the buffer as freed in the filesystem
1886 * structures is committed because from that moment on the
1887 * block can be reallocated and used by a different page.
1888 * Since the block hasn't been freed yet but the inode has
1889 * already been added to orphan list, it is safe for us to add
1890 * the buffer to BJ_Forget list of the newest transaction.
1892 * Also we have to clear buffer_mapped flag of a truncated buffer
1893 * because the buffer_head may be attached to the page straddling
1894 * i_size (can happen only when blocksize < pagesize) and thus the
1895 * buffer_head can be reused when the file is extended again. So we end
1896 * up keeping around invalidated buffers attached to transactions'
1897 * BJ_Forget list just to stop checkpointing code from cleaning up
1898 * the transaction this buffer was modified in.
1900 transaction
= jh
->b_transaction
;
1901 if (transaction
== NULL
) {
1902 /* First case: not on any transaction. If it
1903 * has no checkpoint link, then we can zap it:
1904 * it's a writeback-mode buffer so we don't care
1905 * if it hits disk safely. */
1906 if (!jh
->b_cp_transaction
) {
1907 JBUFFER_TRACE(jh
, "not on any transaction: zap");
1911 if (!buffer_dirty(bh
)) {
1912 /* bdflush has written it. We can drop it now */
1916 /* OK, it must be in the journal but still not
1917 * written fully to disk: it's metadata or
1918 * journaled data... */
1920 if (journal
->j_running_transaction
) {
1921 /* ... and once the current transaction has
1922 * committed, the buffer won't be needed any
1924 JBUFFER_TRACE(jh
, "checkpointed: add to BJ_Forget");
1925 may_free
= __dispose_buffer(jh
,
1926 journal
->j_running_transaction
);
1929 /* There is no currently-running transaction. So the
1930 * orphan record which we wrote for this file must have
1931 * passed into commit. We must attach this buffer to
1932 * the committing transaction, if it exists. */
1933 if (journal
->j_committing_transaction
) {
1934 JBUFFER_TRACE(jh
, "give to committing trans");
1935 may_free
= __dispose_buffer(jh
,
1936 journal
->j_committing_transaction
);
1939 /* The orphan record's transaction has
1940 * committed. We can cleanse this buffer */
1941 clear_buffer_jbddirty(bh
);
1945 } else if (transaction
== journal
->j_committing_transaction
) {
1946 JBUFFER_TRACE(jh
, "on committing transaction");
1948 * The buffer is committing, we simply cannot touch
1949 * it. If the page is straddling i_size we have to wait
1950 * for commit and try again.
1953 jbd2_journal_put_journal_head(jh
);
1954 spin_unlock(&journal
->j_list_lock
);
1955 jbd_unlock_bh_state(bh
);
1956 write_unlock(&journal
->j_state_lock
);
1960 * OK, buffer won't be reachable after truncate. We just set
1961 * j_next_transaction to the running transaction (if there is
1962 * one) and mark buffer as freed so that commit code knows it
1963 * should clear dirty bits when it is done with the buffer.
1965 set_buffer_freed(bh
);
1966 if (journal
->j_running_transaction
&& buffer_jbddirty(bh
))
1967 jh
->b_next_transaction
= journal
->j_running_transaction
;
1968 jbd2_journal_put_journal_head(jh
);
1969 spin_unlock(&journal
->j_list_lock
);
1970 jbd_unlock_bh_state(bh
);
1971 write_unlock(&journal
->j_state_lock
);
1974 /* Good, the buffer belongs to the running transaction.
1975 * We are writing our own transaction's data, not any
1976 * previous one's, so it is safe to throw it away
1977 * (remember that we expect the filesystem to have set
1978 * i_size already for this truncate so recovery will not
1979 * expose the disk blocks we are discarding here.) */
1980 J_ASSERT_JH(jh
, transaction
== journal
->j_running_transaction
);
1981 JBUFFER_TRACE(jh
, "on running transaction");
1982 may_free
= __dispose_buffer(jh
, transaction
);
1987 * This is tricky. Although the buffer is truncated, it may be reused
1988 * if blocksize < pagesize and it is attached to the page straddling
1989 * EOF. Since the buffer might have been added to BJ_Forget list of the
1990 * running transaction, journal_get_write_access() won't clear
1991 * b_modified and credit accounting gets confused. So clear b_modified
1995 jbd2_journal_put_journal_head(jh
);
1997 spin_unlock(&journal
->j_list_lock
);
1998 jbd_unlock_bh_state(bh
);
1999 write_unlock(&journal
->j_state_lock
);
2000 zap_buffer_unlocked
:
2001 clear_buffer_dirty(bh
);
2002 J_ASSERT_BH(bh
, !buffer_jbddirty(bh
));
2003 clear_buffer_mapped(bh
);
2004 clear_buffer_req(bh
);
2005 clear_buffer_new(bh
);
2006 clear_buffer_delay(bh
);
2007 clear_buffer_unwritten(bh
);
2013 * void jbd2_journal_invalidatepage()
2014 * @journal: journal to use for flush...
2015 * @page: page to flush
2016 * @offset: start of the range to invalidate
2017 * @length: length of the range to invalidate
2019 * Reap page buffers containing data after in the specified range in page.
2020 * Can return -EBUSY if buffers are part of the committing transaction and
2021 * the page is straddling i_size. Caller then has to wait for current commit
2024 int jbd2_journal_invalidatepage(journal_t
*journal
,
2026 unsigned int offset
,
2027 unsigned int length
)
2029 struct buffer_head
*head
, *bh
, *next
;
2030 unsigned int stop
= offset
+ length
;
2031 unsigned int curr_off
= 0;
2032 int partial_page
= (offset
|| length
< PAGE_CACHE_SIZE
);
2036 if (!PageLocked(page
))
2038 if (!page_has_buffers(page
))
2041 BUG_ON(stop
> PAGE_CACHE_SIZE
|| stop
< length
);
2043 /* We will potentially be playing with lists other than just the
2044 * data lists (especially for journaled data mode), so be
2045 * cautious in our locking. */
2047 head
= bh
= page_buffers(page
);
2049 unsigned int next_off
= curr_off
+ bh
->b_size
;
2050 next
= bh
->b_this_page
;
2052 if (next_off
> stop
)
2055 if (offset
<= curr_off
) {
2056 /* This block is wholly outside the truncation point */
2058 ret
= journal_unmap_buffer(journal
, bh
, partial_page
);
2064 curr_off
= next_off
;
2067 } while (bh
!= head
);
2069 if (!partial_page
) {
2070 if (may_free
&& try_to_free_buffers(page
))
2071 J_ASSERT(!page_has_buffers(page
));
2077 * File a buffer on the given transaction list.
2079 void __jbd2_journal_file_buffer(struct journal_head
*jh
,
2080 transaction_t
*transaction
, int jlist
)
2082 struct journal_head
**list
= NULL
;
2084 struct buffer_head
*bh
= jh2bh(jh
);
2086 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
2087 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
2089 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
2090 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
2091 jh
->b_transaction
== NULL
);
2093 if (jh
->b_transaction
&& jh
->b_jlist
== jlist
)
2096 if (jlist
== BJ_Metadata
|| jlist
== BJ_Reserved
||
2097 jlist
== BJ_Shadow
|| jlist
== BJ_Forget
) {
2099 * For metadata buffers, we track dirty bit in buffer_jbddirty
2100 * instead of buffer_dirty. We should not see a dirty bit set
2101 * here because we clear it in do_get_write_access but e.g.
2102 * tune2fs can modify the sb and set the dirty bit at any time
2103 * so we try to gracefully handle that.
2105 if (buffer_dirty(bh
))
2106 warn_dirty_buffer(bh
);
2107 if (test_clear_buffer_dirty(bh
) ||
2108 test_clear_buffer_jbddirty(bh
))
2112 if (jh
->b_transaction
)
2113 __jbd2_journal_temp_unlink_buffer(jh
);
2115 jbd2_journal_grab_journal_head(bh
);
2116 jh
->b_transaction
= transaction
;
2120 J_ASSERT_JH(jh
, !jh
->b_committed_data
);
2121 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
2124 transaction
->t_nr_buffers
++;
2125 list
= &transaction
->t_buffers
;
2128 list
= &transaction
->t_forget
;
2131 list
= &transaction
->t_shadow_list
;
2134 list
= &transaction
->t_reserved_list
;
2138 __blist_add_buffer(list
, jh
);
2139 jh
->b_jlist
= jlist
;
2142 set_buffer_jbddirty(bh
);
2145 void jbd2_journal_file_buffer(struct journal_head
*jh
,
2146 transaction_t
*transaction
, int jlist
)
2148 jbd_lock_bh_state(jh2bh(jh
));
2149 spin_lock(&transaction
->t_journal
->j_list_lock
);
2150 __jbd2_journal_file_buffer(jh
, transaction
, jlist
);
2151 spin_unlock(&transaction
->t_journal
->j_list_lock
);
2152 jbd_unlock_bh_state(jh2bh(jh
));
2156 * Remove a buffer from its current buffer list in preparation for
2157 * dropping it from its current transaction entirely. If the buffer has
2158 * already started to be used by a subsequent transaction, refile the
2159 * buffer on that transaction's metadata list.
2161 * Called under j_list_lock
2162 * Called under jbd_lock_bh_state(jh2bh(jh))
2164 * jh and bh may be already free when this function returns
2166 void __jbd2_journal_refile_buffer(struct journal_head
*jh
)
2168 int was_dirty
, jlist
;
2169 struct buffer_head
*bh
= jh2bh(jh
);
2171 J_ASSERT_JH(jh
, jbd_is_locked_bh_state(bh
));
2172 if (jh
->b_transaction
)
2173 assert_spin_locked(&jh
->b_transaction
->t_journal
->j_list_lock
);
2175 /* If the buffer is now unused, just drop it. */
2176 if (jh
->b_next_transaction
== NULL
) {
2177 __jbd2_journal_unfile_buffer(jh
);
2182 * It has been modified by a later transaction: add it to the new
2183 * transaction's metadata list.
2186 was_dirty
= test_clear_buffer_jbddirty(bh
);
2187 __jbd2_journal_temp_unlink_buffer(jh
);
2189 * We set b_transaction here because b_next_transaction will inherit
2190 * our jh reference and thus __jbd2_journal_file_buffer() must not
2193 jh
->b_transaction
= jh
->b_next_transaction
;
2194 jh
->b_next_transaction
= NULL
;
2195 if (buffer_freed(bh
))
2197 else if (jh
->b_modified
)
2198 jlist
= BJ_Metadata
;
2200 jlist
= BJ_Reserved
;
2201 __jbd2_journal_file_buffer(jh
, jh
->b_transaction
, jlist
);
2202 J_ASSERT_JH(jh
, jh
->b_transaction
->t_state
== T_RUNNING
);
2205 set_buffer_jbddirty(bh
);
2209 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2210 * bh reference so that we can safely unlock bh.
2212 * The jh and bh may be freed by this call.
2214 void jbd2_journal_refile_buffer(journal_t
*journal
, struct journal_head
*jh
)
2216 struct buffer_head
*bh
= jh2bh(jh
);
2218 /* Get reference so that buffer cannot be freed before we unlock it */
2220 jbd_lock_bh_state(bh
);
2221 spin_lock(&journal
->j_list_lock
);
2222 __jbd2_journal_refile_buffer(jh
);
2223 jbd_unlock_bh_state(bh
);
2224 spin_unlock(&journal
->j_list_lock
);
2229 * File inode in the inode list of the handle's transaction
2231 int jbd2_journal_file_inode(handle_t
*handle
, struct jbd2_inode
*jinode
)
2233 transaction_t
*transaction
= handle
->h_transaction
;
2234 journal_t
*journal
= transaction
->t_journal
;
2236 if (is_handle_aborted(handle
))
2239 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode
->i_vfs_inode
->i_ino
,
2240 transaction
->t_tid
);
2243 * First check whether inode isn't already on the transaction's
2244 * lists without taking the lock. Note that this check is safe
2245 * without the lock as we cannot race with somebody removing inode
2246 * from the transaction. The reason is that we remove inode from the
2247 * transaction only in journal_release_jbd_inode() and when we commit
2248 * the transaction. We are guarded from the first case by holding
2249 * a reference to the inode. We are safe against the second case
2250 * because if jinode->i_transaction == transaction, commit code
2251 * cannot touch the transaction because we hold reference to it,
2252 * and if jinode->i_next_transaction == transaction, commit code
2253 * will only file the inode where we want it.
2255 if (jinode
->i_transaction
== transaction
||
2256 jinode
->i_next_transaction
== transaction
)
2259 spin_lock(&journal
->j_list_lock
);
2261 if (jinode
->i_transaction
== transaction
||
2262 jinode
->i_next_transaction
== transaction
)
2266 * We only ever set this variable to 1 so the test is safe. Since
2267 * t_need_data_flush is likely to be set, we do the test to save some
2268 * cacheline bouncing
2270 if (!transaction
->t_need_data_flush
)
2271 transaction
->t_need_data_flush
= 1;
2272 /* On some different transaction's list - should be
2273 * the committing one */
2274 if (jinode
->i_transaction
) {
2275 J_ASSERT(jinode
->i_next_transaction
== NULL
);
2276 J_ASSERT(jinode
->i_transaction
==
2277 journal
->j_committing_transaction
);
2278 jinode
->i_next_transaction
= transaction
;
2281 /* Not on any transaction list... */
2282 J_ASSERT(!jinode
->i_next_transaction
);
2283 jinode
->i_transaction
= transaction
;
2284 list_add(&jinode
->i_list
, &transaction
->t_inode_list
);
2286 spin_unlock(&journal
->j_list_lock
);
2292 * File truncate and transaction commit interact with each other in a
2293 * non-trivial way. If a transaction writing data block A is
2294 * committing, we cannot discard the data by truncate until we have
2295 * written them. Otherwise if we crashed after the transaction with
2296 * write has committed but before the transaction with truncate has
2297 * committed, we could see stale data in block A. This function is a
2298 * helper to solve this problem. It starts writeout of the truncated
2299 * part in case it is in the committing transaction.
2301 * Filesystem code must call this function when inode is journaled in
2302 * ordered mode before truncation happens and after the inode has been
2303 * placed on orphan list with the new inode size. The second condition
2304 * avoids the race that someone writes new data and we start
2305 * committing the transaction after this function has been called but
2306 * before a transaction for truncate is started (and furthermore it
2307 * allows us to optimize the case where the addition to orphan list
2308 * happens in the same transaction as write --- we don't have to write
2309 * any data in such case).
2311 int jbd2_journal_begin_ordered_truncate(journal_t
*journal
,
2312 struct jbd2_inode
*jinode
,
2315 transaction_t
*inode_trans
, *commit_trans
;
2318 /* This is a quick check to avoid locking if not necessary */
2319 if (!jinode
->i_transaction
)
2321 /* Locks are here just to force reading of recent values, it is
2322 * enough that the transaction was not committing before we started
2323 * a transaction adding the inode to orphan list */
2324 read_lock(&journal
->j_state_lock
);
2325 commit_trans
= journal
->j_committing_transaction
;
2326 read_unlock(&journal
->j_state_lock
);
2327 spin_lock(&journal
->j_list_lock
);
2328 inode_trans
= jinode
->i_transaction
;
2329 spin_unlock(&journal
->j_list_lock
);
2330 if (inode_trans
== commit_trans
) {
2331 ret
= filemap_fdatawrite_range(jinode
->i_vfs_inode
->i_mapping
,
2332 new_size
, LLONG_MAX
);
2334 jbd2_journal_abort(journal
, ret
);