transaction_t *transaction;
tid_t tid;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
transaction = journal->j_running_transaction;
else
tid = transaction->t_tid;
else
tid = journal->j_commit_sequence;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
ei->i_sync_tid = tid;
ei->i_datasync_tid = tid;
}
journal->j_min_batch_time = sbi->s_min_batch_time;
journal->j_max_batch_time = sbi->s_max_batch_time;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (test_opt(sb, BARRIER))
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
else
journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
static journal_t *ext4_get_journal(struct super_block *sb,
void __jbd2_log_wait_for_space(journal_t *journal)
{
int nblocks, space_left;
- assert_spin_locked(&journal->j_state_lock);
+ /* assert_spin_locked(&journal->j_state_lock); */
nblocks = jbd_space_needed(journal);
while (__jbd2_log_space_left(journal) < nblocks) {
if (journal->j_flags & JBD2_ABORT)
return;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
mutex_lock(&journal->j_checkpoint_mutex);
/*
* filesystem, so abort the journal and leave a stack
* trace for forensic evidence.
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
space_left = __jbd2_log_space_left(journal);
if (journal->j_committing_transaction)
tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
} else if (jbd2_cleanup_journal_tail(journal) == 0) {
WARN_ON(1);
jbd2_journal_abort(journal, 0);
}
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
} else {
spin_unlock(&journal->j_list_lock);
}
* next transaction ID we will write, and where it will
* start. */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
transaction = journal->j_checkpoint_transactions;
if (transaction) {
/* If the oldest pinned transaction is at the tail of the log
already then there's not much we can do right now. */
if (journal->j_tail_sequence == first_tid) {
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return 1;
}
journal->j_free += freed;
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
/*
* If there is an external journal, we need to make sure that
printk(KERN_WARNING
"JBD2: Disabling barriers on %s, "
"not supported by device\n", journal->j_devname);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
lock_buffer(bh);
printk(KERN_WARNING
"JBD2: %s: disabling barries on %s - not supported "
"by device\n", __func__, journal->j_devname);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
lock_buffer(bh);
clear_buffer_dirty(bh);
jbd_debug(1, "JBD: starting commit of transaction %d\n",
commit_transaction->t_tid);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
/*
TASK_UNINTERRUPTIBLE);
if (atomic_read(&commit_transaction->t_updates)) {
spin_unlock(&commit_transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
schedule();
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&commit_transaction->t_handle_lock);
}
finish_wait(&journal->j_wait_updates, &wait);
start_time = ktime_get();
commit_transaction->t_log_start = journal->j_head;
wake_up(&journal->j_wait_transaction_locked);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
jbd_debug (3, "JBD: commit phase 2\n");
* transaction! Now comes the tricky part: we need to write out
* metadata. Loop over the transaction's entire buffer list:
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_COMMIT;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
trace_jbd2_commit_logging(journal, commit_transaction);
stats.run.rs_logging = jiffies;
* __jbd2_journal_drop_transaction(). Otherwise we could race with
* other checkpointing code processing the transaction...
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
/*
* Now recheck if some buffers did not get attached to the transaction
*/
if (commit_transaction->t_forget) {
spin_unlock(&journal->j_list_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
goto restart_loop;
}
journal->j_average_commit_time*3) / 4;
else
journal->j_average_commit_time = commit_time;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
/*
* And now, wait forever for commit wakeup events.
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
loop:
if (journal->j_flags & JBD2_UNMOUNT)
if (journal->j_commit_sequence != journal->j_commit_request) {
jbd_debug(1, "OK, requests differ\n");
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
jbd2_journal_commit_transaction(journal);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
goto loop;
}
* be already stopped.
*/
jbd_debug(1, "Now suspending kjournald2\n");
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
refrigerator();
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
} else {
/*
* We assume on resume that commits are already there,
if (journal->j_flags & JBD2_UNMOUNT)
should_sleep = 0;
if (should_sleep) {
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
schedule();
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
}
finish_wait(&journal->j_wait_commit, &wait);
}
goto loop;
end_loop:
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
static void journal_kill_thread(journal_t *journal)
{
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_UNMOUNT;
while (journal->j_task) {
wake_up(&journal->j_wait_commit);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_done_commit, journal->j_task == NULL);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
}
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/*
{
int left = journal->j_free;
- assert_spin_locked(&journal->j_state_lock);
+ /* assert_spin_locked(&journal->j_state_lock); */
/*
* Be pessimistic here about the number of those free blocks which
{
int ret;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
ret = __jbd2_log_start_commit(journal, tid);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
}
transaction_t *transaction = NULL;
tid_t tid;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
if (journal->j_running_transaction && !current->journal_info) {
transaction = journal->j_running_transaction;
__jbd2_log_start_commit(journal, transaction->t_tid);
transaction = journal->j_committing_transaction;
if (!transaction) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
return 0; /* Nothing to retry */
}
tid = transaction->t_tid;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
jbd2_log_wait_commit(journal, tid);
return 1;
}
{
int ret = 0;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
*ptid = journal->j_committing_transaction->t_tid;
ret = 1;
}
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
}
{
int err = 0;
+ read_lock(&journal->j_state_lock);
#ifdef CONFIG_JBD2_DEBUG
- spin_lock(&journal->j_state_lock);
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
"%s: error: j_commit_request=%d, tid=%d\n",
__func__, journal->j_commit_request, tid);
}
- spin_unlock(&journal->j_state_lock);
#endif
- spin_lock(&journal->j_state_lock);
while (tid_gt(tid, journal->j_commit_sequence)) {
jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
tid, journal->j_commit_sequence);
wake_up(&journal->j_wait_commit);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_done_commit,
!tid_gt(tid, journal->j_commit_sequence));
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
}
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
if (unlikely(is_journal_aborted(journal))) {
printk(KERN_EMERG "journal commit I/O error\n");
{
unsigned long blocknr;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
J_ASSERT(journal->j_free > 1);
blocknr = journal->j_head;
journal->j_free--;
if (journal->j_head == journal->j_last)
journal->j_head = journal->j_first;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return jbd2_journal_bmap(journal, blocknr, retp);
}
mutex_init(&journal->j_checkpoint_mutex);
spin_lock_init(&journal->j_revoke_lock);
spin_lock_init(&journal->j_list_lock);
- spin_lock_init(&journal->j_state_lock);
+ rwlock_init(&journal->j_state_lock);
journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
journal->j_min_batch_time = 0;
set_buffer_uptodate(bh);
}
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n",
journal->j_tail, journal->j_tail_sequence, journal->j_errno);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
sb->s_start = cpu_to_be32(journal->j_tail);
sb->s_errno = cpu_to_be32(journal->j_errno);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
* any future commit will have to be careful to update the
* superblock again to re-record the true start of the log. */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (sb->s_start)
journal->j_flags &= ~JBD2_FLUSHED;
else
journal->j_flags |= JBD2_FLUSHED;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/*
transaction_t *transaction = NULL;
unsigned long old_tail;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
/* Force everything buffered to the log... */
if (journal->j_running_transaction) {
if (transaction) {
tid_t tid = transaction->t_tid;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
jbd2_log_wait_commit(journal, tid);
} else {
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/* ...and flush everything in the log out to disk. */
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
old_tail = journal->j_tail;
journal->j_tail = 0;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
jbd2_journal_update_superblock(journal, 1);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_tail = old_tail;
J_ASSERT(!journal->j_running_transaction);
J_ASSERT(!journal->j_checkpoint_transactions);
J_ASSERT(journal->j_head == journal->j_tail);
J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return 0;
}
printk(KERN_ERR "Aborting journal on device %s.\n",
journal->j_devname);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_ABORT;
transaction = journal->j_running_transaction;
if (transaction)
__jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/* Soft abort: record the abort error status in the journal superblock,
{
int err;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
if (journal->j_flags & JBD2_ABORT)
err = -EROFS;
else
err = journal->j_errno;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
return err;
}
{
int err = 0;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (journal->j_flags & JBD2_ABORT)
err = -EROFS;
else
journal->j_errno = 0;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return err;
}
*/
void jbd2_journal_ack_err(journal_t *journal)
{
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (journal->j_errno)
journal->j_flags |= JBD2_ACK_ERR;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
int jbd2_journal_blocks_per_page(struct inode *inode)
jbd_debug(3, "New handle %p going live.\n", handle);
-repeat:
-
/*
* We need to hold j_state_lock until t_updates has been incremented,
* for proper journal barrier handling
*/
- spin_lock(&journal->j_state_lock);
-repeat_locked:
+repeat:
+ read_lock(&journal->j_state_lock);
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
kfree(new_transaction);
return -EROFS;
}
/* Wait on the journal's transaction barrier if necessary */
if (journal->j_barrier_count) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_transaction_locked,
journal->j_barrier_count == 0);
goto repeat;
}
if (!journal->j_running_transaction) {
- if (!new_transaction) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
+ if (!new_transaction)
goto alloc_transaction;
+ write_lock(&journal->j_state_lock);
+ if (!journal->j_running_transaction) {
+ jbd2_get_transaction(journal, new_transaction);
+ new_transaction = NULL;
}
- jbd2_get_transaction(journal, new_transaction);
- new_transaction = NULL;
+ write_unlock(&journal->j_state_lock);
+ goto repeat;
}
transaction = journal->j_running_transaction;
prepare_to_wait(&journal->j_wait_transaction_locked,
&wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
goto repeat;
prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
__jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
goto repeat;
if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
spin_unlock(&transaction->t_handle_lock);
- __jbd2_log_wait_for_space(journal);
- goto repeat_locked;
+ read_unlock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
+ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
+ __jbd2_log_wait_for_space(journal);
+ write_unlock(&journal->j_state_lock);
+ goto repeat;
}
/* OK, account for the buffers that this operation expects to
atomic_read(&transaction->t_outstanding_credits),
__jbd2_log_space_left(journal));
spin_unlock(&transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
lock_map_acquire(&handle->h_lockdep_map);
kfree(new_transaction);
result = 1;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
/* Don't extend a locked-down transaction! */
if (handle->h_transaction->t_state != T_RUNNING) {
unlock:
spin_unlock(&transaction->t_handle_lock);
error_out:
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
out:
return result;
}
J_ASSERT(atomic_read(&transaction->t_updates) > 0);
J_ASSERT(journal_current_handle() == handle);
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
spin_lock(&transaction->t_handle_lock);
atomic_sub(handle->h_buffer_credits,
&transaction->t_outstanding_credits);
jbd_debug(2, "restarting handle %p\n", handle);
__jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
lock_map_release(&handle->h_lockdep_map);
handle->h_buffer_credits = nblocks;
{
DEFINE_WAIT(wait);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
++journal->j_barrier_count;
/* Wait until there are no running updates */
prepare_to_wait(&journal->j_wait_updates, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock(&transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_updates, &wait);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
}
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
/*
* We have now established a barrier against other normal updates, but
J_ASSERT(journal->j_barrier_count != 0);
mutex_unlock(&journal->j_barrier);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
--journal->j_barrier_count;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_transaction_locked);
}
journal->j_last_sync_writer = pid;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
commit_time = journal->j_average_commit_time;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
trans_time = ktime_to_ns(ktime_sub(ktime_get(),
transaction->t_start_time));
goto zap_buffer_unlocked;
/* OK, we have data buffer in journaled mode */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
} else {
/* There is no currently-running transaction. So the
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
} else {
/* The orphan record's transaction has
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
zap_buffer_unlocked:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
/* Locks are here just to force reading of recent values, it is
* enough that the transaction was not committing before we started
* a transaction adding the inode to orphan list */
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
commit_trans = journal->j_committing_transaction;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
inode_trans = jinode->i_transaction;
spin_unlock(&journal->j_list_lock);
if (osb->osb_commit_interval)
commit_interval = osb->osb_commit_interval;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_commit_interval = commit_interval;
if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
/*
* Protect the various scalars in the journal
*/
- spinlock_t j_state_lock;
+ rwlock_t j_state_lock;
/*
* Number of processes waiting to create a barrier lock [j_state_lock]