Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[deliverable/linux.git] / fs / xfs / xfs_log_recover.c
index 594f7e63b432427fd5b6448afa0a75eb0b71d558..396565f4324764058b979cf5e4c5bd96744f8ef8 100644 (file)
@@ -190,7 +190,7 @@ xlog_bread_noalign(
        ASSERT(nbblks <= bp->b_length);
 
        XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
-       XFS_BUF_READ(bp);
+       bp->b_flags |= XBF_READ;
        bp->b_io_length = nbblks;
        bp->b_error = 0;
 
@@ -275,7 +275,6 @@ xlog_bwrite(
        ASSERT(nbblks <= bp->b_length);
 
        XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
-       XFS_BUF_ZEROFLAGS(bp);
        xfs_buf_hold(bp);
        xfs_buf_lock(bp);
        bp->b_io_length = nbblks;
@@ -1109,27 +1108,10 @@ xlog_verify_head(
        bool                    tmp_wrapped;
 
        /*
-        * Search backwards through the log looking for the log record header
-        * block. This wraps all the way back around to the head so something is
-        * seriously wrong if we can't find it.
-        */
-       found = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp, rhead_blk,
-                                     rhead, wrapped);
-       if (found < 0)
-               return found;
-       if (!found) {
-               xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
-               return -EIO;
-       }
-
-       *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
-
-       /*
-        * Now that we have a tail block, check the head of the log for torn
-        * writes. Search again until we hit the tail or the maximum number of
-        * log record I/Os that could have been in flight at one time. Use a
-        * temporary buffer so we don't trash the rhead/bp pointer from the
-        * call above.
+        * Check the head of the log for torn writes. Search backwards from the
+        * head until we hit the tail or the maximum number of log record I/Os
+        * that could have been in flight at one time. Use a temporary buffer so
+        * we don't trash the rhead/bp pointers from the caller.
         */
        tmp_bp = xlog_get_bp(log, 1);
        if (!tmp_bp)
@@ -1215,6 +1197,115 @@ xlog_verify_head(
        return error;
 }
 
+/*
+ * Check whether the head of the log points to an unmount record. In other
+ * words, determine whether the log is clean. If so, update the in-core state
+ * appropriately.
+ */
+static int
+xlog_check_unmount_rec(
+       struct xlog             *log,
+       xfs_daddr_t             *head_blk,
+       xfs_daddr_t             *tail_blk,
+       struct xlog_rec_header  *rhead,
+       xfs_daddr_t             rhead_blk,
+       struct xfs_buf          *bp,
+       bool                    *clean)
+{
+       struct xlog_op_header   *op_head;
+       xfs_daddr_t             umount_data_blk;
+       xfs_daddr_t             after_umount_blk;
+       int                     hblks;
+       int                     error;
+       char                    *offset;
+
+       *clean = false;
+
+       /*
+        * Look for unmount record. If we find it, then we know there was a
+        * clean unmount. Since 'i' could be the last block in the physical
+        * log, we convert to a log block before comparing to the head_blk.
+        *
+        * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
+        * below. We won't want to clear the unmount record if there is one, so
+        * we pass the lsn of the unmount record rather than the block after it.
+        */
+       if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+               int     h_size = be32_to_cpu(rhead->h_size);
+               int     h_version = be32_to_cpu(rhead->h_version);
+
+               if ((h_version & XLOG_VERSION_2) &&
+                   (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+                       hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+                       if (h_size % XLOG_HEADER_CYCLE_SIZE)
+                               hblks++;
+               } else {
+                       hblks = 1;
+               }
+       } else {
+               hblks = 1;
+       }
+       after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
+       after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
+       if (*head_blk == after_umount_blk &&
+           be32_to_cpu(rhead->h_num_logops) == 1) {
+               umount_data_blk = rhead_blk + hblks;
+               umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
+               error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
+               if (error)
+                       return error;
+
+               op_head = (struct xlog_op_header *)offset;
+               if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
+                       /*
+                        * Set tail and last sync so that newly written log
+                        * records will point recovery to after the current
+                        * unmount record.
+                        */
+                       xlog_assign_atomic_lsn(&log->l_tail_lsn,
+                                       log->l_curr_cycle, after_umount_blk);
+                       xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+                                       log->l_curr_cycle, after_umount_blk);
+                       *tail_blk = after_umount_blk;
+
+                       *clean = true;
+               }
+       }
+
+       return 0;
+}
+
+static void
+xlog_set_state(
+       struct xlog             *log,
+       xfs_daddr_t             head_blk,
+       struct xlog_rec_header  *rhead,
+       xfs_daddr_t             rhead_blk,
+       bool                    bump_cycle)
+{
+       /*
+        * Reset log values according to the state of the log when we
+        * crashed.  In the case where head_blk == 0, we bump curr_cycle
+        * one because the next write starts a new cycle rather than
+        * continuing the cycle of the last good log record.  At this
+        * point we have guaranteed that all partial log records have been
+        * accounted for.  Therefore, we know that the last good log record
+        * written was complete and ended exactly on the end boundary
+        * of the physical log.
+        */
+       log->l_prev_block = rhead_blk;
+       log->l_curr_block = (int)head_blk;
+       log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
+       if (bump_cycle)
+               log->l_curr_cycle++;
+       atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+       atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+       xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
+                                       BBTOB(log->l_curr_block));
+       xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
+                                       BBTOB(log->l_curr_block));
+}
+
 /*
  * Find the sync block number or the tail of the log.
  *
@@ -1238,22 +1329,20 @@ xlog_find_tail(
        xfs_daddr_t             *tail_blk)
 {
        xlog_rec_header_t       *rhead;
-       xlog_op_header_t        *op_head;
        char                    *offset = NULL;
        xfs_buf_t               *bp;
        int                     error;
-       xfs_daddr_t             umount_data_blk;
-       xfs_daddr_t             after_umount_blk;
        xfs_daddr_t             rhead_blk;
        xfs_lsn_t               tail_lsn;
-       int                     hblks;
        bool                    wrapped = false;
+       bool                    clean = false;
 
        /*
         * Find previous log record
         */
        if ((error = xlog_find_head(log, head_blk)))
                return error;
+       ASSERT(*head_blk < INT_MAX);
 
        bp = xlog_get_bp(log, 1);
        if (!bp)
@@ -1271,99 +1360,74 @@ xlog_find_tail(
        }
 
        /*
-        * Trim the head block back to skip over torn records. We can have
-        * multiple log I/Os in flight at any time, so we assume CRC failures
-        * back through the previous several records are torn writes and skip
-        * them.
+        * Search backwards through the log looking for the log record header
+        * block. This wraps all the way back around to the head so something is
+        * seriously wrong if we can't find it.
         */
-       ASSERT(*head_blk < INT_MAX);
-       error = xlog_verify_head(log, head_blk, tail_blk, bp, &rhead_blk,
-                                &rhead, &wrapped);
-       if (error)
-               goto done;
+       error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
+                                     &rhead_blk, &rhead, &wrapped);
+       if (error < 0)
+               return error;
+       if (!error) {
+               xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
+               return -EIO;
+       }
+       *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
 
        /*
-        * Reset log values according to the state of the log when we
-        * crashed.  In the case where head_blk == 0, we bump curr_cycle
-        * one because the next write starts a new cycle rather than
-        * continuing the cycle of the last good log record.  At this
-        * point we have guaranteed that all partial log records have been
-        * accounted for.  Therefore, we know that the last good log record
-        * written was complete and ended exactly on the end boundary
-        * of the physical log.
+        * Set the log state based on the current head record.
         */
-       log->l_prev_block = rhead_blk;
-       log->l_curr_block = (int)*head_blk;
-       log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
-       if (wrapped)
-               log->l_curr_cycle++;
-       atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
-       atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
-       xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
-                                       BBTOB(log->l_curr_block));
-       xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
-                                       BBTOB(log->l_curr_block));
+       xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
+       tail_lsn = atomic64_read(&log->l_tail_lsn);
 
        /*
-        * Look for unmount record.  If we find it, then we know there
-        * was a clean unmount.  Since 'i' could be the last block in
-        * the physical log, we convert to a log block before comparing
-        * to the head_blk.
+        * Look for an unmount record at the head of the log. This sets the log
+        * state to determine whether recovery is necessary.
+        */
+       error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
+                                      rhead_blk, bp, &clean);
+       if (error)
+               goto done;
+
+       /*
+        * Verify the log head if the log is not clean (e.g., we have anything
+        * but an unmount record at the head). This uses CRC verification to
+        * detect and trim torn writes. If discovered, CRC failures are
+        * considered torn writes and the log head is trimmed accordingly.
         *
-        * Save the current tail lsn to use to pass to
-        * xlog_clear_stale_blocks() below.  We won't want to clear the
-        * unmount record if there is one, so we pass the lsn of the
-        * unmount record rather than the block after it.
+        * Note that we can only run CRC verification when the log is dirty
+        * because there's no guarantee that the log data behind an unmount
+        * record is compatible with the current architecture.
         */
-       if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
-               int     h_size = be32_to_cpu(rhead->h_size);
-               int     h_version = be32_to_cpu(rhead->h_version);
+       if (!clean) {
+               xfs_daddr_t     orig_head = *head_blk;
 
-               if ((h_version & XLOG_VERSION_2) &&
-                   (h_size > XLOG_HEADER_CYCLE_SIZE)) {
-                       hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
-                       if (h_size % XLOG_HEADER_CYCLE_SIZE)
-                               hblks++;
-               } else {
-                       hblks = 1;
-               }
-       } else {
-               hblks = 1;
-       }
-       after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
-       after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
-       tail_lsn = atomic64_read(&log->l_tail_lsn);
-       if (*head_blk == after_umount_blk &&
-           be32_to_cpu(rhead->h_num_logops) == 1) {
-               umount_data_blk = rhead_blk + hblks;
-               umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
-               error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
+               error = xlog_verify_head(log, head_blk, tail_blk, bp,
+                                        &rhead_blk, &rhead, &wrapped);
                if (error)
                        goto done;
 
-               op_head = (xlog_op_header_t *)offset;
-               if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
-                       /*
-                        * Set tail and last sync so that newly written
-                        * log records will point recovery to after the
-                        * current unmount record.
-                        */
-                       xlog_assign_atomic_lsn(&log->l_tail_lsn,
-                                       log->l_curr_cycle, after_umount_blk);
-                       xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
-                                       log->l_curr_cycle, after_umount_blk);
-                       *tail_blk = after_umount_blk;
-
-                       /*
-                        * Note that the unmount was clean. If the unmount
-                        * was not clean, we need to know this to rebuild the
-                        * superblock counters from the perag headers if we
-                        * have a filesystem using non-persistent counters.
-                        */
-                       log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
+               /* update in-core state again if the head changed */
+               if (*head_blk != orig_head) {
+                       xlog_set_state(log, *head_blk, rhead, rhead_blk,
+                                      wrapped);
+                       tail_lsn = atomic64_read(&log->l_tail_lsn);
+                       error = xlog_check_unmount_rec(log, head_blk, tail_blk,
+                                                      rhead, rhead_blk, bp,
+                                                      &clean);
+                       if (error)
+                               goto done;
                }
        }
 
+       /*
+        * Note that the unmount was clean. If the unmount was not clean, we
+        * need to know this to rebuild the superblock counters from the perag
+        * headers if we have a filesystem using non-persistent counters.
+        */
+       if (clean)
+               log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
+
        /*
         * Make sure that there are no blocks in front of the head
         * with the same cycle number as the head.  This can happen
@@ -2473,6 +2537,13 @@ xlog_recover_validate_buf_type(
                }
                bp->b_ops = &xfs_sb_buf_ops;
                break;
+#ifdef CONFIG_XFS_RT
+       case XFS_BLFT_RTBITMAP_BUF:
+       case XFS_BLFT_RTSUMMARY_BUF:
+               /* no magic numbers for verification of RT buffers */
+               bp->b_ops = &xfs_rtbuf_ops;
+               break;
+#endif /* CONFIG_XFS_RT */
        default:
                xfs_warn(mp, "Unknown buffer type %d!",
                         xfs_blft_from_flags(buf_f));
@@ -2793,7 +2864,7 @@ xfs_recover_inode_owner_change(
                return -ENOMEM;
 
        /* instantiate the inode */
-       xfs_dinode_from_disk(&ip->i_d, dip);
+       xfs_inode_from_disk(ip, dip);
        ASSERT(ip->i_d.di_version >= 3);
 
        error = xfs_iformat_fork(ip, dip);
@@ -2839,7 +2910,7 @@ xlog_recover_inode_pass2(
        int                     error;
        int                     attr_index;
        uint                    fields;
-       xfs_icdinode_t          *dicp;
+       struct xfs_log_dinode   *ldip;
        uint                    isize;
        int                     need_free = 0;
 
@@ -2892,8 +2963,8 @@ xlog_recover_inode_pass2(
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       dicp = item->ri_buf[1].i_addr;
-       if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
+       ldip = item->ri_buf[1].i_addr;
+       if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
                xfs_alert(mp,
                        "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
                        __func__, item, in_f->ilf_ino);
@@ -2929,13 +3000,13 @@ xlog_recover_inode_pass2(
         * to skip replay when the on disk inode is newer than the log one
         */
        if (!xfs_sb_version_hascrc(&mp->m_sb) &&
-           dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+           ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
                /*
                 * Deal with the wrap case, DI_MAX_FLUSH is less
                 * than smaller numbers
                 */
                if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
-                   dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+                   ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
                        /* do nothing */
                } else {
                        trace_xfs_log_recover_inode_skip(log, in_f);
@@ -2945,13 +3016,13 @@ xlog_recover_inode_pass2(
        }
 
        /* Take the opportunity to reset the flush iteration count */
-       dicp->di_flushiter = 0;
+       ldip->di_flushiter = 0;
 
-       if (unlikely(S_ISREG(dicp->di_mode))) {
-               if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
-                   (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
+       if (unlikely(S_ISREG(ldip->di_mode))) {
+               if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+                   (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
-                                        XFS_ERRLEVEL_LOW, mp, dicp);
+                                        XFS_ERRLEVEL_LOW, mp, ldip);
                        xfs_alert(mp,
                "%s: Bad regular inode log record, rec ptr 0x%p, "
                "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
@@ -2959,12 +3030,12 @@ xlog_recover_inode_pass2(
                        error = -EFSCORRUPTED;
                        goto out_release;
                }
-       } else if (unlikely(S_ISDIR(dicp->di_mode))) {
-               if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
-                   (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
-                   (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
+       } else if (unlikely(S_ISDIR(ldip->di_mode))) {
+               if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+                   (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
+                   (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
-                                            XFS_ERRLEVEL_LOW, mp, dicp);
+                                            XFS_ERRLEVEL_LOW, mp, ldip);
                        xfs_alert(mp,
                "%s: Bad dir inode log record, rec ptr 0x%p, "
                "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
@@ -2973,32 +3044,32 @@ xlog_recover_inode_pass2(
                        goto out_release;
                }
        }
-       if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
+       if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
        "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
        "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
                        __func__, item, dip, bp, in_f->ilf_ino,
-                       dicp->di_nextents + dicp->di_anextents,
-                       dicp->di_nblocks);
+                       ldip->di_nextents + ldip->di_anextents,
+                       ldip->di_nblocks);
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
+       if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
        "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
        "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
-                       item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
+                       item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       isize = xfs_icdinode_size(dicp->di_version);
+       isize = xfs_log_dinode_size(ldip->di_version);
        if (unlikely(item->ri_buf[1].i_len > isize)) {
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
                        "%s: Bad inode log record length %d, rec ptr 0x%p",
                        __func__, item->ri_buf[1].i_len, item);
@@ -3006,8 +3077,8 @@ xlog_recover_inode_pass2(
                goto out_release;
        }
 
-       /* The core is in in-core format */
-       xfs_dinode_to_disk(dip, dicp);
+       /* recover the log dinode inode into the on disk inode */
+       xfs_log_dinode_to_disk(ldip, dip);
 
        /* the rest is in on-disk format */
        if (item->ri_buf[1].i_len > isize) {
@@ -4337,8 +4408,8 @@ xlog_recover_process_one_iunlink(
        if (error)
                goto fail_iput;
 
-       ASSERT(ip->i_d.di_nlink == 0);
-       ASSERT(ip->i_d.di_mode != 0);
+       ASSERT(VFS_I(ip)->i_nlink == 0);
+       ASSERT(VFS_I(ip)->i_mode != 0);
 
        /* setup for the next pass */
        agino = be32_to_cpu(dip->di_next_unlinked);
@@ -4892,6 +4963,7 @@ xlog_do_recover(
        xfs_daddr_t     head_blk,
        xfs_daddr_t     tail_blk)
 {
+       struct xfs_mount *mp = log->l_mp;
        int             error;
        xfs_buf_t       *bp;
        xfs_sb_t        *sbp;
@@ -4906,7 +4978,7 @@ xlog_do_recover(
        /*
         * If IO errors happened during recovery, bail out.
         */
-       if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
+       if (XFS_FORCED_SHUTDOWN(mp)) {
                return -EIO;
        }
 
@@ -4919,22 +4991,21 @@ xlog_do_recover(
         * or iunlinks they will have some entries in the AIL; so we look at
         * the AIL to determine how to set the tail_lsn.
         */
-       xlog_assign_tail_lsn(log->l_mp);
+       xlog_assign_tail_lsn(mp);
 
        /*
         * Now that we've finished replaying all buffer and inode
         * updates, re-read in the superblock and reverify it.
         */
-       bp = xfs_getsb(log->l_mp, 0);
-       XFS_BUF_UNDONE(bp);
-       ASSERT(!(XFS_BUF_ISWRITE(bp)));
-       XFS_BUF_READ(bp);
-       XFS_BUF_UNASYNC(bp);
+       bp = xfs_getsb(mp, 0);
+       bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
+       ASSERT(!(bp->b_flags & XBF_WRITE));
+       bp->b_flags |= XBF_READ;
        bp->b_ops = &xfs_sb_buf_ops;
 
        error = xfs_buf_submit_wait(bp);
        if (error) {
-               if (!XFS_FORCED_SHUTDOWN(log->l_mp)) {
+               if (!XFS_FORCED_SHUTDOWN(mp)) {
                        xfs_buf_ioerror_alert(bp, __func__);
                        ASSERT(0);
                }
@@ -4943,14 +5014,17 @@ xlog_do_recover(
        }
 
        /* Convert superblock from on-disk format */
-       sbp = &log->l_mp->m_sb;
+       sbp = &mp->m_sb;
        xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
-       ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
-       ASSERT(xfs_sb_good_version(sbp));
-       xfs_reinit_percpu_counters(log->l_mp);
-
        xfs_buf_relse(bp);
 
+       /* re-initialise in-core superblock and geometry structures */
+       xfs_reinit_percpu_counters(mp);
+       error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
+       if (error) {
+               xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
+               return error;
+       }
 
        xlog_recover_check_summary(log);
 
This page took 0.035383 seconds and 5 git commands to generate.