07ab52ca8aba5786b9df50719be58457542d4db4
[deliverable/linux.git] / fs / xfs / xfs_log_recover.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_inum.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_log.h"
33 #include "xfs_log_priv.h"
34 #include "xfs_log_recover.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_extfree_item.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_alloc.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_bmap_btree.h"
45 #include "xfs_dinode.h"
46 #include "xfs_error.h"
47 #include "xfs_dir2.h"
48
49 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
50
51 STATIC int
52 xlog_find_zeroed(
53 struct xlog *,
54 xfs_daddr_t *);
55 STATIC int
56 xlog_clear_stale_blocks(
57 struct xlog *,
58 xfs_lsn_t);
59 #if defined(DEBUG)
60 STATIC void
61 xlog_recover_check_summary(
62 struct xlog *);
63 #else
64 #define xlog_recover_check_summary(log)
65 #endif
66
67 /*
68 * This structure is used during recovery to record the buf log items which
69 * have been canceled and should not be replayed.
70 */
71 struct xfs_buf_cancel {
72 xfs_daddr_t bc_blkno;
73 uint bc_len;
74 int bc_refcount;
75 struct list_head bc_list;
76 };
77
78 /*
79 * Sector aligned buffer routines for buffer create/read/write/access
80 */
81
82 /*
83 * Verify the given count of basic blocks is valid number of blocks
84 * to specify for an operation involving the given XFS log buffer.
85 * Returns nonzero if the count is valid, 0 otherwise.
86 */
87
88 static inline int
89 xlog_buf_bbcount_valid(
90 struct xlog *log,
91 int bbcount)
92 {
93 return bbcount > 0 && bbcount <= log->l_logBBsize;
94 }
95
96 /*
97 * Allocate a buffer to hold log data. The buffer needs to be able
98 * to map to a range of nbblks basic blocks at any valid (basic
99 * block) offset within the log.
100 */
101 STATIC xfs_buf_t *
102 xlog_get_bp(
103 struct xlog *log,
104 int nbblks)
105 {
106 struct xfs_buf *bp;
107
108 if (!xlog_buf_bbcount_valid(log, nbblks)) {
109 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
110 nbblks);
111 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
112 return NULL;
113 }
114
115 /*
116 * We do log I/O in units of log sectors (a power-of-2
117 * multiple of the basic block size), so we round up the
118 * requested size to accommodate the basic blocks required
119 * for complete log sectors.
120 *
121 * In addition, the buffer may be used for a non-sector-
122 * aligned block offset, in which case an I/O of the
123 * requested size could extend beyond the end of the
124 * buffer. If the requested size is only 1 basic block it
125 * will never straddle a sector boundary, so this won't be
126 * an issue. Nor will this be a problem if the log I/O is
127 * done in basic blocks (sector size 1). But otherwise we
128 * extend the buffer by one extra log sector to ensure
129 * there's space to accommodate this possibility.
130 */
131 if (nbblks > 1 && log->l_sectBBsize > 1)
132 nbblks += log->l_sectBBsize;
133 nbblks = round_up(nbblks, log->l_sectBBsize);
134
135 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
136 if (bp)
137 xfs_buf_unlock(bp);
138 return bp;
139 }
140
141 STATIC void
142 xlog_put_bp(
143 xfs_buf_t *bp)
144 {
145 xfs_buf_free(bp);
146 }
147
148 /*
149 * Return the address of the start of the given block number's data
150 * in a log buffer. The buffer covers a log sector-aligned region.
151 */
152 STATIC xfs_caddr_t
153 xlog_align(
154 struct xlog *log,
155 xfs_daddr_t blk_no,
156 int nbblks,
157 struct xfs_buf *bp)
158 {
159 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
160
161 ASSERT(offset + nbblks <= bp->b_length);
162 return bp->b_addr + BBTOB(offset);
163 }
164
165
166 /*
167 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
168 */
169 STATIC int
170 xlog_bread_noalign(
171 struct xlog *log,
172 xfs_daddr_t blk_no,
173 int nbblks,
174 struct xfs_buf *bp)
175 {
176 int error;
177
178 if (!xlog_buf_bbcount_valid(log, nbblks)) {
179 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
180 nbblks);
181 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
182 return EFSCORRUPTED;
183 }
184
185 blk_no = round_down(blk_no, log->l_sectBBsize);
186 nbblks = round_up(nbblks, log->l_sectBBsize);
187
188 ASSERT(nbblks > 0);
189 ASSERT(nbblks <= bp->b_length);
190
191 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
192 XFS_BUF_READ(bp);
193 bp->b_io_length = nbblks;
194 bp->b_error = 0;
195
196 xfsbdstrat(log->l_mp, bp);
197 error = xfs_buf_iowait(bp);
198 if (error)
199 xfs_buf_ioerror_alert(bp, __func__);
200 return error;
201 }
202
203 STATIC int
204 xlog_bread(
205 struct xlog *log,
206 xfs_daddr_t blk_no,
207 int nbblks,
208 struct xfs_buf *bp,
209 xfs_caddr_t *offset)
210 {
211 int error;
212
213 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
214 if (error)
215 return error;
216
217 *offset = xlog_align(log, blk_no, nbblks, bp);
218 return 0;
219 }
220
221 /*
222 * Read at an offset into the buffer. Returns with the buffer in it's original
223 * state regardless of the result of the read.
224 */
225 STATIC int
226 xlog_bread_offset(
227 struct xlog *log,
228 xfs_daddr_t blk_no, /* block to read from */
229 int nbblks, /* blocks to read */
230 struct xfs_buf *bp,
231 xfs_caddr_t offset)
232 {
233 xfs_caddr_t orig_offset = bp->b_addr;
234 int orig_len = BBTOB(bp->b_length);
235 int error, error2;
236
237 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
238 if (error)
239 return error;
240
241 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
242
243 /* must reset buffer pointer even on error */
244 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
245 if (error)
246 return error;
247 return error2;
248 }
249
250 /*
251 * Write out the buffer at the given block for the given number of blocks.
252 * The buffer is kept locked across the write and is returned locked.
253 * This can only be used for synchronous log writes.
254 */
255 STATIC int
256 xlog_bwrite(
257 struct xlog *log,
258 xfs_daddr_t blk_no,
259 int nbblks,
260 struct xfs_buf *bp)
261 {
262 int error;
263
264 if (!xlog_buf_bbcount_valid(log, nbblks)) {
265 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
266 nbblks);
267 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
268 return EFSCORRUPTED;
269 }
270
271 blk_no = round_down(blk_no, log->l_sectBBsize);
272 nbblks = round_up(nbblks, log->l_sectBBsize);
273
274 ASSERT(nbblks > 0);
275 ASSERT(nbblks <= bp->b_length);
276
277 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
278 XFS_BUF_ZEROFLAGS(bp);
279 xfs_buf_hold(bp);
280 xfs_buf_lock(bp);
281 bp->b_io_length = nbblks;
282 bp->b_error = 0;
283
284 error = xfs_bwrite(bp);
285 if (error)
286 xfs_buf_ioerror_alert(bp, __func__);
287 xfs_buf_relse(bp);
288 return error;
289 }
290
291 #ifdef DEBUG
292 /*
293 * dump debug superblock and log record information
294 */
295 STATIC void
296 xlog_header_check_dump(
297 xfs_mount_t *mp,
298 xlog_rec_header_t *head)
299 {
300 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
301 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
302 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
303 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
304 }
305 #else
306 #define xlog_header_check_dump(mp, head)
307 #endif
308
309 /*
310 * check log record header for recovery
311 */
312 STATIC int
313 xlog_header_check_recover(
314 xfs_mount_t *mp,
315 xlog_rec_header_t *head)
316 {
317 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
318
319 /*
320 * IRIX doesn't write the h_fmt field and leaves it zeroed
321 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
322 * a dirty log created in IRIX.
323 */
324 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
325 xfs_warn(mp,
326 "dirty log written in incompatible format - can't recover");
327 xlog_header_check_dump(mp, head);
328 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
329 XFS_ERRLEVEL_HIGH, mp);
330 return XFS_ERROR(EFSCORRUPTED);
331 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
332 xfs_warn(mp,
333 "dirty log entry has mismatched uuid - can't recover");
334 xlog_header_check_dump(mp, head);
335 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
336 XFS_ERRLEVEL_HIGH, mp);
337 return XFS_ERROR(EFSCORRUPTED);
338 }
339 return 0;
340 }
341
342 /*
343 * read the head block of the log and check the header
344 */
345 STATIC int
346 xlog_header_check_mount(
347 xfs_mount_t *mp,
348 xlog_rec_header_t *head)
349 {
350 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
351
352 if (uuid_is_nil(&head->h_fs_uuid)) {
353 /*
354 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
355 * h_fs_uuid is nil, we assume this log was last mounted
356 * by IRIX and continue.
357 */
358 xfs_warn(mp, "nil uuid in log - IRIX style log");
359 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
360 xfs_warn(mp, "log has mismatched uuid - can't recover");
361 xlog_header_check_dump(mp, head);
362 XFS_ERROR_REPORT("xlog_header_check_mount",
363 XFS_ERRLEVEL_HIGH, mp);
364 return XFS_ERROR(EFSCORRUPTED);
365 }
366 return 0;
367 }
368
369 STATIC void
370 xlog_recover_iodone(
371 struct xfs_buf *bp)
372 {
373 if (bp->b_error) {
374 /*
375 * We're not going to bother about retrying
376 * this during recovery. One strike!
377 */
378 xfs_buf_ioerror_alert(bp, __func__);
379 xfs_force_shutdown(bp->b_target->bt_mount,
380 SHUTDOWN_META_IO_ERROR);
381 }
382 bp->b_iodone = NULL;
383 xfs_buf_ioend(bp, 0);
384 }
385
386 /*
387 * This routine finds (to an approximation) the first block in the physical
388 * log which contains the given cycle. It uses a binary search algorithm.
389 * Note that the algorithm can not be perfect because the disk will not
390 * necessarily be perfect.
391 */
392 STATIC int
393 xlog_find_cycle_start(
394 struct xlog *log,
395 struct xfs_buf *bp,
396 xfs_daddr_t first_blk,
397 xfs_daddr_t *last_blk,
398 uint cycle)
399 {
400 xfs_caddr_t offset;
401 xfs_daddr_t mid_blk;
402 xfs_daddr_t end_blk;
403 uint mid_cycle;
404 int error;
405
406 end_blk = *last_blk;
407 mid_blk = BLK_AVG(first_blk, end_blk);
408 while (mid_blk != first_blk && mid_blk != end_blk) {
409 error = xlog_bread(log, mid_blk, 1, bp, &offset);
410 if (error)
411 return error;
412 mid_cycle = xlog_get_cycle(offset);
413 if (mid_cycle == cycle)
414 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
415 else
416 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
417 mid_blk = BLK_AVG(first_blk, end_blk);
418 }
419 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
420 (mid_blk == end_blk && mid_blk-1 == first_blk));
421
422 *last_blk = end_blk;
423
424 return 0;
425 }
426
427 /*
428 * Check that a range of blocks does not contain stop_on_cycle_no.
429 * Fill in *new_blk with the block offset where such a block is
430 * found, or with -1 (an invalid block number) if there is no such
431 * block in the range. The scan needs to occur from front to back
432 * and the pointer into the region must be updated since a later
433 * routine will need to perform another test.
434 */
435 STATIC int
436 xlog_find_verify_cycle(
437 struct xlog *log,
438 xfs_daddr_t start_blk,
439 int nbblks,
440 uint stop_on_cycle_no,
441 xfs_daddr_t *new_blk)
442 {
443 xfs_daddr_t i, j;
444 uint cycle;
445 xfs_buf_t *bp;
446 xfs_daddr_t bufblks;
447 xfs_caddr_t buf = NULL;
448 int error = 0;
449
450 /*
451 * Greedily allocate a buffer big enough to handle the full
452 * range of basic blocks we'll be examining. If that fails,
453 * try a smaller size. We need to be able to read at least
454 * a log sector, or we're out of luck.
455 */
456 bufblks = 1 << ffs(nbblks);
457 while (bufblks > log->l_logBBsize)
458 bufblks >>= 1;
459 while (!(bp = xlog_get_bp(log, bufblks))) {
460 bufblks >>= 1;
461 if (bufblks < log->l_sectBBsize)
462 return ENOMEM;
463 }
464
465 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
466 int bcount;
467
468 bcount = min(bufblks, (start_blk + nbblks - i));
469
470 error = xlog_bread(log, i, bcount, bp, &buf);
471 if (error)
472 goto out;
473
474 for (j = 0; j < bcount; j++) {
475 cycle = xlog_get_cycle(buf);
476 if (cycle == stop_on_cycle_no) {
477 *new_blk = i+j;
478 goto out;
479 }
480
481 buf += BBSIZE;
482 }
483 }
484
485 *new_blk = -1;
486
487 out:
488 xlog_put_bp(bp);
489 return error;
490 }
491
492 /*
493 * Potentially backup over partial log record write.
494 *
495 * In the typical case, last_blk is the number of the block directly after
496 * a good log record. Therefore, we subtract one to get the block number
497 * of the last block in the given buffer. extra_bblks contains the number
498 * of blocks we would have read on a previous read. This happens when the
499 * last log record is split over the end of the physical log.
500 *
501 * extra_bblks is the number of blocks potentially verified on a previous
502 * call to this routine.
503 */
504 STATIC int
505 xlog_find_verify_log_record(
506 struct xlog *log,
507 xfs_daddr_t start_blk,
508 xfs_daddr_t *last_blk,
509 int extra_bblks)
510 {
511 xfs_daddr_t i;
512 xfs_buf_t *bp;
513 xfs_caddr_t offset = NULL;
514 xlog_rec_header_t *head = NULL;
515 int error = 0;
516 int smallmem = 0;
517 int num_blks = *last_blk - start_blk;
518 int xhdrs;
519
520 ASSERT(start_blk != 0 || *last_blk != start_blk);
521
522 if (!(bp = xlog_get_bp(log, num_blks))) {
523 if (!(bp = xlog_get_bp(log, 1)))
524 return ENOMEM;
525 smallmem = 1;
526 } else {
527 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
528 if (error)
529 goto out;
530 offset += ((num_blks - 1) << BBSHIFT);
531 }
532
533 for (i = (*last_blk) - 1; i >= 0; i--) {
534 if (i < start_blk) {
535 /* valid log record not found */
536 xfs_warn(log->l_mp,
537 "Log inconsistent (didn't find previous header)");
538 ASSERT(0);
539 error = XFS_ERROR(EIO);
540 goto out;
541 }
542
543 if (smallmem) {
544 error = xlog_bread(log, i, 1, bp, &offset);
545 if (error)
546 goto out;
547 }
548
549 head = (xlog_rec_header_t *)offset;
550
551 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
552 break;
553
554 if (!smallmem)
555 offset -= BBSIZE;
556 }
557
558 /*
559 * We hit the beginning of the physical log & still no header. Return
560 * to caller. If caller can handle a return of -1, then this routine
561 * will be called again for the end of the physical log.
562 */
563 if (i == -1) {
564 error = -1;
565 goto out;
566 }
567
568 /*
569 * We have the final block of the good log (the first block
570 * of the log record _before_ the head. So we check the uuid.
571 */
572 if ((error = xlog_header_check_mount(log->l_mp, head)))
573 goto out;
574
575 /*
576 * We may have found a log record header before we expected one.
577 * last_blk will be the 1st block # with a given cycle #. We may end
578 * up reading an entire log record. In this case, we don't want to
579 * reset last_blk. Only when last_blk points in the middle of a log
580 * record do we update last_blk.
581 */
582 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
583 uint h_size = be32_to_cpu(head->h_size);
584
585 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
586 if (h_size % XLOG_HEADER_CYCLE_SIZE)
587 xhdrs++;
588 } else {
589 xhdrs = 1;
590 }
591
592 if (*last_blk - i + extra_bblks !=
593 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
594 *last_blk = i;
595
596 out:
597 xlog_put_bp(bp);
598 return error;
599 }
600
601 /*
602 * Head is defined to be the point of the log where the next log write
603 * could go. This means that incomplete LR writes at the end are
604 * eliminated when calculating the head. We aren't guaranteed that previous
605 * LR have complete transactions. We only know that a cycle number of
606 * current cycle number -1 won't be present in the log if we start writing
607 * from our current block number.
608 *
609 * last_blk contains the block number of the first block with a given
610 * cycle number.
611 *
612 * Return: zero if normal, non-zero if error.
613 */
614 STATIC int
615 xlog_find_head(
616 struct xlog *log,
617 xfs_daddr_t *return_head_blk)
618 {
619 xfs_buf_t *bp;
620 xfs_caddr_t offset;
621 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
622 int num_scan_bblks;
623 uint first_half_cycle, last_half_cycle;
624 uint stop_on_cycle;
625 int error, log_bbnum = log->l_logBBsize;
626
627 /* Is the end of the log device zeroed? */
628 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
629 *return_head_blk = first_blk;
630
631 /* Is the whole lot zeroed? */
632 if (!first_blk) {
633 /* Linux XFS shouldn't generate totally zeroed logs -
634 * mkfs etc write a dummy unmount record to a fresh
635 * log so we can store the uuid in there
636 */
637 xfs_warn(log->l_mp, "totally zeroed log");
638 }
639
640 return 0;
641 } else if (error) {
642 xfs_warn(log->l_mp, "empty log check failed");
643 return error;
644 }
645
646 first_blk = 0; /* get cycle # of 1st block */
647 bp = xlog_get_bp(log, 1);
648 if (!bp)
649 return ENOMEM;
650
651 error = xlog_bread(log, 0, 1, bp, &offset);
652 if (error)
653 goto bp_err;
654
655 first_half_cycle = xlog_get_cycle(offset);
656
657 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
658 error = xlog_bread(log, last_blk, 1, bp, &offset);
659 if (error)
660 goto bp_err;
661
662 last_half_cycle = xlog_get_cycle(offset);
663 ASSERT(last_half_cycle != 0);
664
665 /*
666 * If the 1st half cycle number is equal to the last half cycle number,
667 * then the entire log is stamped with the same cycle number. In this
668 * case, head_blk can't be set to zero (which makes sense). The below
669 * math doesn't work out properly with head_blk equal to zero. Instead,
670 * we set it to log_bbnum which is an invalid block number, but this
671 * value makes the math correct. If head_blk doesn't changed through
672 * all the tests below, *head_blk is set to zero at the very end rather
673 * than log_bbnum. In a sense, log_bbnum and zero are the same block
674 * in a circular file.
675 */
676 if (first_half_cycle == last_half_cycle) {
677 /*
678 * In this case we believe that the entire log should have
679 * cycle number last_half_cycle. We need to scan backwards
680 * from the end verifying that there are no holes still
681 * containing last_half_cycle - 1. If we find such a hole,
682 * then the start of that hole will be the new head. The
683 * simple case looks like
684 * x | x ... | x - 1 | x
685 * Another case that fits this picture would be
686 * x | x + 1 | x ... | x
687 * In this case the head really is somewhere at the end of the
688 * log, as one of the latest writes at the beginning was
689 * incomplete.
690 * One more case is
691 * x | x + 1 | x ... | x - 1 | x
692 * This is really the combination of the above two cases, and
693 * the head has to end up at the start of the x-1 hole at the
694 * end of the log.
695 *
696 * In the 256k log case, we will read from the beginning to the
697 * end of the log and search for cycle numbers equal to x-1.
698 * We don't worry about the x+1 blocks that we encounter,
699 * because we know that they cannot be the head since the log
700 * started with x.
701 */
702 head_blk = log_bbnum;
703 stop_on_cycle = last_half_cycle - 1;
704 } else {
705 /*
706 * In this case we want to find the first block with cycle
707 * number matching last_half_cycle. We expect the log to be
708 * some variation on
709 * x + 1 ... | x ... | x
710 * The first block with cycle number x (last_half_cycle) will
711 * be where the new head belongs. First we do a binary search
712 * for the first occurrence of last_half_cycle. The binary
713 * search may not be totally accurate, so then we scan back
714 * from there looking for occurrences of last_half_cycle before
715 * us. If that backwards scan wraps around the beginning of
716 * the log, then we look for occurrences of last_half_cycle - 1
717 * at the end of the log. The cases we're looking for look
718 * like
719 * v binary search stopped here
720 * x + 1 ... | x | x + 1 | x ... | x
721 * ^ but we want to locate this spot
722 * or
723 * <---------> less than scan distance
724 * x + 1 ... | x ... | x - 1 | x
725 * ^ we want to locate this spot
726 */
727 stop_on_cycle = last_half_cycle;
728 if ((error = xlog_find_cycle_start(log, bp, first_blk,
729 &head_blk, last_half_cycle)))
730 goto bp_err;
731 }
732
733 /*
734 * Now validate the answer. Scan back some number of maximum possible
735 * blocks and make sure each one has the expected cycle number. The
736 * maximum is determined by the total possible amount of buffering
737 * in the in-core log. The following number can be made tighter if
738 * we actually look at the block size of the filesystem.
739 */
740 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
741 if (head_blk >= num_scan_bblks) {
742 /*
743 * We are guaranteed that the entire check can be performed
744 * in one buffer.
745 */
746 start_blk = head_blk - num_scan_bblks;
747 if ((error = xlog_find_verify_cycle(log,
748 start_blk, num_scan_bblks,
749 stop_on_cycle, &new_blk)))
750 goto bp_err;
751 if (new_blk != -1)
752 head_blk = new_blk;
753 } else { /* need to read 2 parts of log */
754 /*
755 * We are going to scan backwards in the log in two parts.
756 * First we scan the physical end of the log. In this part
757 * of the log, we are looking for blocks with cycle number
758 * last_half_cycle - 1.
759 * If we find one, then we know that the log starts there, as
760 * we've found a hole that didn't get written in going around
761 * the end of the physical log. The simple case for this is
762 * x + 1 ... | x ... | x - 1 | x
763 * <---------> less than scan distance
764 * If all of the blocks at the end of the log have cycle number
765 * last_half_cycle, then we check the blocks at the start of
766 * the log looking for occurrences of last_half_cycle. If we
767 * find one, then our current estimate for the location of the
768 * first occurrence of last_half_cycle is wrong and we move
769 * back to the hole we've found. This case looks like
770 * x + 1 ... | x | x + 1 | x ...
771 * ^ binary search stopped here
772 * Another case we need to handle that only occurs in 256k
773 * logs is
774 * x + 1 ... | x ... | x+1 | x ...
775 * ^ binary search stops here
776 * In a 256k log, the scan at the end of the log will see the
777 * x + 1 blocks. We need to skip past those since that is
778 * certainly not the head of the log. By searching for
779 * last_half_cycle-1 we accomplish that.
780 */
781 ASSERT(head_blk <= INT_MAX &&
782 (xfs_daddr_t) num_scan_bblks >= head_blk);
783 start_blk = log_bbnum - (num_scan_bblks - head_blk);
784 if ((error = xlog_find_verify_cycle(log, start_blk,
785 num_scan_bblks - (int)head_blk,
786 (stop_on_cycle - 1), &new_blk)))
787 goto bp_err;
788 if (new_blk != -1) {
789 head_blk = new_blk;
790 goto validate_head;
791 }
792
793 /*
794 * Scan beginning of log now. The last part of the physical
795 * log is good. This scan needs to verify that it doesn't find
796 * the last_half_cycle.
797 */
798 start_blk = 0;
799 ASSERT(head_blk <= INT_MAX);
800 if ((error = xlog_find_verify_cycle(log,
801 start_blk, (int)head_blk,
802 stop_on_cycle, &new_blk)))
803 goto bp_err;
804 if (new_blk != -1)
805 head_blk = new_blk;
806 }
807
808 validate_head:
809 /*
810 * Now we need to make sure head_blk is not pointing to a block in
811 * the middle of a log record.
812 */
813 num_scan_bblks = XLOG_REC_SHIFT(log);
814 if (head_blk >= num_scan_bblks) {
815 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
816
817 /* start ptr at last block ptr before head_blk */
818 if ((error = xlog_find_verify_log_record(log, start_blk,
819 &head_blk, 0)) == -1) {
820 error = XFS_ERROR(EIO);
821 goto bp_err;
822 } else if (error)
823 goto bp_err;
824 } else {
825 start_blk = 0;
826 ASSERT(head_blk <= INT_MAX);
827 if ((error = xlog_find_verify_log_record(log, start_blk,
828 &head_blk, 0)) == -1) {
829 /* We hit the beginning of the log during our search */
830 start_blk = log_bbnum - (num_scan_bblks - head_blk);
831 new_blk = log_bbnum;
832 ASSERT(start_blk <= INT_MAX &&
833 (xfs_daddr_t) log_bbnum-start_blk >= 0);
834 ASSERT(head_blk <= INT_MAX);
835 if ((error = xlog_find_verify_log_record(log,
836 start_blk, &new_blk,
837 (int)head_blk)) == -1) {
838 error = XFS_ERROR(EIO);
839 goto bp_err;
840 } else if (error)
841 goto bp_err;
842 if (new_blk != log_bbnum)
843 head_blk = new_blk;
844 } else if (error)
845 goto bp_err;
846 }
847
848 xlog_put_bp(bp);
849 if (head_blk == log_bbnum)
850 *return_head_blk = 0;
851 else
852 *return_head_blk = head_blk;
853 /*
854 * When returning here, we have a good block number. Bad block
855 * means that during a previous crash, we didn't have a clean break
856 * from cycle number N to cycle number N-1. In this case, we need
857 * to find the first block with cycle number N-1.
858 */
859 return 0;
860
861 bp_err:
862 xlog_put_bp(bp);
863
864 if (error)
865 xfs_warn(log->l_mp, "failed to find log head");
866 return error;
867 }
868
869 /*
870 * Find the sync block number or the tail of the log.
871 *
872 * This will be the block number of the last record to have its
873 * associated buffers synced to disk. Every log record header has
874 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
875 * to get a sync block number. The only concern is to figure out which
876 * log record header to believe.
877 *
878 * The following algorithm uses the log record header with the largest
879 * lsn. The entire log record does not need to be valid. We only care
880 * that the header is valid.
881 *
882 * We could speed up search by using current head_blk buffer, but it is not
883 * available.
884 */
885 STATIC int
886 xlog_find_tail(
887 struct xlog *log,
888 xfs_daddr_t *head_blk,
889 xfs_daddr_t *tail_blk)
890 {
891 xlog_rec_header_t *rhead;
892 xlog_op_header_t *op_head;
893 xfs_caddr_t offset = NULL;
894 xfs_buf_t *bp;
895 int error, i, found;
896 xfs_daddr_t umount_data_blk;
897 xfs_daddr_t after_umount_blk;
898 xfs_lsn_t tail_lsn;
899 int hblks;
900
901 found = 0;
902
903 /*
904 * Find previous log record
905 */
906 if ((error = xlog_find_head(log, head_blk)))
907 return error;
908
909 bp = xlog_get_bp(log, 1);
910 if (!bp)
911 return ENOMEM;
912 if (*head_blk == 0) { /* special case */
913 error = xlog_bread(log, 0, 1, bp, &offset);
914 if (error)
915 goto done;
916
917 if (xlog_get_cycle(offset) == 0) {
918 *tail_blk = 0;
919 /* leave all other log inited values alone */
920 goto done;
921 }
922 }
923
924 /*
925 * Search backwards looking for log record header block
926 */
927 ASSERT(*head_blk < INT_MAX);
928 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
929 error = xlog_bread(log, i, 1, bp, &offset);
930 if (error)
931 goto done;
932
933 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
934 found = 1;
935 break;
936 }
937 }
938 /*
939 * If we haven't found the log record header block, start looking
940 * again from the end of the physical log. XXXmiken: There should be
941 * a check here to make sure we didn't search more than N blocks in
942 * the previous code.
943 */
944 if (!found) {
945 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
946 error = xlog_bread(log, i, 1, bp, &offset);
947 if (error)
948 goto done;
949
950 if (*(__be32 *)offset ==
951 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
952 found = 2;
953 break;
954 }
955 }
956 }
957 if (!found) {
958 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
959 xlog_put_bp(bp);
960 ASSERT(0);
961 return XFS_ERROR(EIO);
962 }
963
964 /* find blk_no of tail of log */
965 rhead = (xlog_rec_header_t *)offset;
966 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
967
968 /*
969 * Reset log values according to the state of the log when we
970 * crashed. In the case where head_blk == 0, we bump curr_cycle
971 * one because the next write starts a new cycle rather than
972 * continuing the cycle of the last good log record. At this
973 * point we have guaranteed that all partial log records have been
974 * accounted for. Therefore, we know that the last good log record
975 * written was complete and ended exactly on the end boundary
976 * of the physical log.
977 */
978 log->l_prev_block = i;
979 log->l_curr_block = (int)*head_blk;
980 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
981 if (found == 2)
982 log->l_curr_cycle++;
983 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
984 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
985 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
986 BBTOB(log->l_curr_block));
987 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
988 BBTOB(log->l_curr_block));
989
990 /*
991 * Look for unmount record. If we find it, then we know there
992 * was a clean unmount. Since 'i' could be the last block in
993 * the physical log, we convert to a log block before comparing
994 * to the head_blk.
995 *
996 * Save the current tail lsn to use to pass to
997 * xlog_clear_stale_blocks() below. We won't want to clear the
998 * unmount record if there is one, so we pass the lsn of the
999 * unmount record rather than the block after it.
1000 */
1001 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1002 int h_size = be32_to_cpu(rhead->h_size);
1003 int h_version = be32_to_cpu(rhead->h_version);
1004
1005 if ((h_version & XLOG_VERSION_2) &&
1006 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1007 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1008 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1009 hblks++;
1010 } else {
1011 hblks = 1;
1012 }
1013 } else {
1014 hblks = 1;
1015 }
1016 after_umount_blk = (i + hblks + (int)
1017 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1018 tail_lsn = atomic64_read(&log->l_tail_lsn);
1019 if (*head_blk == after_umount_blk &&
1020 be32_to_cpu(rhead->h_num_logops) == 1) {
1021 umount_data_blk = (i + hblks) % log->l_logBBsize;
1022 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1023 if (error)
1024 goto done;
1025
1026 op_head = (xlog_op_header_t *)offset;
1027 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1028 /*
1029 * Set tail and last sync so that newly written
1030 * log records will point recovery to after the
1031 * current unmount record.
1032 */
1033 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1034 log->l_curr_cycle, after_umount_blk);
1035 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1036 log->l_curr_cycle, after_umount_blk);
1037 *tail_blk = after_umount_blk;
1038
1039 /*
1040 * Note that the unmount was clean. If the unmount
1041 * was not clean, we need to know this to rebuild the
1042 * superblock counters from the perag headers if we
1043 * have a filesystem using non-persistent counters.
1044 */
1045 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1046 }
1047 }
1048
1049 /*
1050 * Make sure that there are no blocks in front of the head
1051 * with the same cycle number as the head. This can happen
1052 * because we allow multiple outstanding log writes concurrently,
1053 * and the later writes might make it out before earlier ones.
1054 *
1055 * We use the lsn from before modifying it so that we'll never
1056 * overwrite the unmount record after a clean unmount.
1057 *
1058 * Do this only if we are going to recover the filesystem
1059 *
1060 * NOTE: This used to say "if (!readonly)"
1061 * However on Linux, we can & do recover a read-only filesystem.
1062 * We only skip recovery if NORECOVERY is specified on mount,
1063 * in which case we would not be here.
1064 *
1065 * But... if the -device- itself is readonly, just skip this.
1066 * We can't recover this device anyway, so it won't matter.
1067 */
1068 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1069 error = xlog_clear_stale_blocks(log, tail_lsn);
1070
1071 done:
1072 xlog_put_bp(bp);
1073
1074 if (error)
1075 xfs_warn(log->l_mp, "failed to locate log tail");
1076 return error;
1077 }
1078
1079 /*
1080 * Is the log zeroed at all?
1081 *
1082 * The last binary search should be changed to perform an X block read
1083 * once X becomes small enough. You can then search linearly through
1084 * the X blocks. This will cut down on the number of reads we need to do.
1085 *
1086 * If the log is partially zeroed, this routine will pass back the blkno
1087 * of the first block with cycle number 0. It won't have a complete LR
1088 * preceding it.
1089 *
1090 * Return:
1091 * 0 => the log is completely written to
1092 * -1 => use *blk_no as the first block of the log
1093 * >0 => error has occurred
1094 */
1095 STATIC int
1096 xlog_find_zeroed(
1097 struct xlog *log,
1098 xfs_daddr_t *blk_no)
1099 {
1100 xfs_buf_t *bp;
1101 xfs_caddr_t offset;
1102 uint first_cycle, last_cycle;
1103 xfs_daddr_t new_blk, last_blk, start_blk;
1104 xfs_daddr_t num_scan_bblks;
1105 int error, log_bbnum = log->l_logBBsize;
1106
1107 *blk_no = 0;
1108
1109 /* check totally zeroed log */
1110 bp = xlog_get_bp(log, 1);
1111 if (!bp)
1112 return ENOMEM;
1113 error = xlog_bread(log, 0, 1, bp, &offset);
1114 if (error)
1115 goto bp_err;
1116
1117 first_cycle = xlog_get_cycle(offset);
1118 if (first_cycle == 0) { /* completely zeroed log */
1119 *blk_no = 0;
1120 xlog_put_bp(bp);
1121 return -1;
1122 }
1123
1124 /* check partially zeroed log */
1125 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1126 if (error)
1127 goto bp_err;
1128
1129 last_cycle = xlog_get_cycle(offset);
1130 if (last_cycle != 0) { /* log completely written to */
1131 xlog_put_bp(bp);
1132 return 0;
1133 } else if (first_cycle != 1) {
1134 /*
1135 * If the cycle of the last block is zero, the cycle of
1136 * the first block must be 1. If it's not, maybe we're
1137 * not looking at a log... Bail out.
1138 */
1139 xfs_warn(log->l_mp,
1140 "Log inconsistent or not a log (last==0, first!=1)");
1141 error = XFS_ERROR(EINVAL);
1142 goto bp_err;
1143 }
1144
1145 /* we have a partially zeroed log */
1146 last_blk = log_bbnum-1;
1147 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1148 goto bp_err;
1149
1150 /*
1151 * Validate the answer. Because there is no way to guarantee that
1152 * the entire log is made up of log records which are the same size,
1153 * we scan over the defined maximum blocks. At this point, the maximum
1154 * is not chosen to mean anything special. XXXmiken
1155 */
1156 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1157 ASSERT(num_scan_bblks <= INT_MAX);
1158
1159 if (last_blk < num_scan_bblks)
1160 num_scan_bblks = last_blk;
1161 start_blk = last_blk - num_scan_bblks;
1162
1163 /*
1164 * We search for any instances of cycle number 0 that occur before
1165 * our current estimate of the head. What we're trying to detect is
1166 * 1 ... | 0 | 1 | 0...
1167 * ^ binary search ends here
1168 */
1169 if ((error = xlog_find_verify_cycle(log, start_blk,
1170 (int)num_scan_bblks, 0, &new_blk)))
1171 goto bp_err;
1172 if (new_blk != -1)
1173 last_blk = new_blk;
1174
1175 /*
1176 * Potentially backup over partial log record write. We don't need
1177 * to search the end of the log because we know it is zero.
1178 */
1179 if ((error = xlog_find_verify_log_record(log, start_blk,
1180 &last_blk, 0)) == -1) {
1181 error = XFS_ERROR(EIO);
1182 goto bp_err;
1183 } else if (error)
1184 goto bp_err;
1185
1186 *blk_no = last_blk;
1187 bp_err:
1188 xlog_put_bp(bp);
1189 if (error)
1190 return error;
1191 return -1;
1192 }
1193
1194 /*
1195 * These are simple subroutines used by xlog_clear_stale_blocks() below
1196 * to initialize a buffer full of empty log record headers and write
1197 * them into the log.
1198 */
1199 STATIC void
1200 xlog_add_record(
1201 struct xlog *log,
1202 xfs_caddr_t buf,
1203 int cycle,
1204 int block,
1205 int tail_cycle,
1206 int tail_block)
1207 {
1208 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1209
1210 memset(buf, 0, BBSIZE);
1211 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1212 recp->h_cycle = cpu_to_be32(cycle);
1213 recp->h_version = cpu_to_be32(
1214 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1215 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1216 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1217 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1218 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1219 }
1220
1221 STATIC int
1222 xlog_write_log_records(
1223 struct xlog *log,
1224 int cycle,
1225 int start_block,
1226 int blocks,
1227 int tail_cycle,
1228 int tail_block)
1229 {
1230 xfs_caddr_t offset;
1231 xfs_buf_t *bp;
1232 int balign, ealign;
1233 int sectbb = log->l_sectBBsize;
1234 int end_block = start_block + blocks;
1235 int bufblks;
1236 int error = 0;
1237 int i, j = 0;
1238
1239 /*
1240 * Greedily allocate a buffer big enough to handle the full
1241 * range of basic blocks to be written. If that fails, try
1242 * a smaller size. We need to be able to write at least a
1243 * log sector, or we're out of luck.
1244 */
1245 bufblks = 1 << ffs(blocks);
1246 while (bufblks > log->l_logBBsize)
1247 bufblks >>= 1;
1248 while (!(bp = xlog_get_bp(log, bufblks))) {
1249 bufblks >>= 1;
1250 if (bufblks < sectbb)
1251 return ENOMEM;
1252 }
1253
1254 /* We may need to do a read at the start to fill in part of
1255 * the buffer in the starting sector not covered by the first
1256 * write below.
1257 */
1258 balign = round_down(start_block, sectbb);
1259 if (balign != start_block) {
1260 error = xlog_bread_noalign(log, start_block, 1, bp);
1261 if (error)
1262 goto out_put_bp;
1263
1264 j = start_block - balign;
1265 }
1266
1267 for (i = start_block; i < end_block; i += bufblks) {
1268 int bcount, endcount;
1269
1270 bcount = min(bufblks, end_block - start_block);
1271 endcount = bcount - j;
1272
1273 /* We may need to do a read at the end to fill in part of
1274 * the buffer in the final sector not covered by the write.
1275 * If this is the same sector as the above read, skip it.
1276 */
1277 ealign = round_down(end_block, sectbb);
1278 if (j == 0 && (start_block + endcount > ealign)) {
1279 offset = bp->b_addr + BBTOB(ealign - start_block);
1280 error = xlog_bread_offset(log, ealign, sectbb,
1281 bp, offset);
1282 if (error)
1283 break;
1284
1285 }
1286
1287 offset = xlog_align(log, start_block, endcount, bp);
1288 for (; j < endcount; j++) {
1289 xlog_add_record(log, offset, cycle, i+j,
1290 tail_cycle, tail_block);
1291 offset += BBSIZE;
1292 }
1293 error = xlog_bwrite(log, start_block, endcount, bp);
1294 if (error)
1295 break;
1296 start_block += endcount;
1297 j = 0;
1298 }
1299
1300 out_put_bp:
1301 xlog_put_bp(bp);
1302 return error;
1303 }
1304
1305 /*
1306 * This routine is called to blow away any incomplete log writes out
1307 * in front of the log head. We do this so that we won't become confused
1308 * if we come up, write only a little bit more, and then crash again.
1309 * If we leave the partial log records out there, this situation could
1310 * cause us to think those partial writes are valid blocks since they
1311 * have the current cycle number. We get rid of them by overwriting them
1312 * with empty log records with the old cycle number rather than the
1313 * current one.
1314 *
1315 * The tail lsn is passed in rather than taken from
1316 * the log so that we will not write over the unmount record after a
1317 * clean unmount in a 512 block log. Doing so would leave the log without
1318 * any valid log records in it until a new one was written. If we crashed
1319 * during that time we would not be able to recover.
1320 */
1321 STATIC int
1322 xlog_clear_stale_blocks(
1323 struct xlog *log,
1324 xfs_lsn_t tail_lsn)
1325 {
1326 int tail_cycle, head_cycle;
1327 int tail_block, head_block;
1328 int tail_distance, max_distance;
1329 int distance;
1330 int error;
1331
1332 tail_cycle = CYCLE_LSN(tail_lsn);
1333 tail_block = BLOCK_LSN(tail_lsn);
1334 head_cycle = log->l_curr_cycle;
1335 head_block = log->l_curr_block;
1336
1337 /*
1338 * Figure out the distance between the new head of the log
1339 * and the tail. We want to write over any blocks beyond the
1340 * head that we may have written just before the crash, but
1341 * we don't want to overwrite the tail of the log.
1342 */
1343 if (head_cycle == tail_cycle) {
1344 /*
1345 * The tail is behind the head in the physical log,
1346 * so the distance from the head to the tail is the
1347 * distance from the head to the end of the log plus
1348 * the distance from the beginning of the log to the
1349 * tail.
1350 */
1351 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1352 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1353 XFS_ERRLEVEL_LOW, log->l_mp);
1354 return XFS_ERROR(EFSCORRUPTED);
1355 }
1356 tail_distance = tail_block + (log->l_logBBsize - head_block);
1357 } else {
1358 /*
1359 * The head is behind the tail in the physical log,
1360 * so the distance from the head to the tail is just
1361 * the tail block minus the head block.
1362 */
1363 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1364 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1365 XFS_ERRLEVEL_LOW, log->l_mp);
1366 return XFS_ERROR(EFSCORRUPTED);
1367 }
1368 tail_distance = tail_block - head_block;
1369 }
1370
1371 /*
1372 * If the head is right up against the tail, we can't clear
1373 * anything.
1374 */
1375 if (tail_distance <= 0) {
1376 ASSERT(tail_distance == 0);
1377 return 0;
1378 }
1379
1380 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1381 /*
1382 * Take the smaller of the maximum amount of outstanding I/O
1383 * we could have and the distance to the tail to clear out.
1384 * We take the smaller so that we don't overwrite the tail and
1385 * we don't waste all day writing from the head to the tail
1386 * for no reason.
1387 */
1388 max_distance = MIN(max_distance, tail_distance);
1389
1390 if ((head_block + max_distance) <= log->l_logBBsize) {
1391 /*
1392 * We can stomp all the blocks we need to without
1393 * wrapping around the end of the log. Just do it
1394 * in a single write. Use the cycle number of the
1395 * current cycle minus one so that the log will look like:
1396 * n ... | n - 1 ...
1397 */
1398 error = xlog_write_log_records(log, (head_cycle - 1),
1399 head_block, max_distance, tail_cycle,
1400 tail_block);
1401 if (error)
1402 return error;
1403 } else {
1404 /*
1405 * We need to wrap around the end of the physical log in
1406 * order to clear all the blocks. Do it in two separate
1407 * I/Os. The first write should be from the head to the
1408 * end of the physical log, and it should use the current
1409 * cycle number minus one just like above.
1410 */
1411 distance = log->l_logBBsize - head_block;
1412 error = xlog_write_log_records(log, (head_cycle - 1),
1413 head_block, distance, tail_cycle,
1414 tail_block);
1415
1416 if (error)
1417 return error;
1418
1419 /*
1420 * Now write the blocks at the start of the physical log.
1421 * This writes the remainder of the blocks we want to clear.
1422 * It uses the current cycle number since we're now on the
1423 * same cycle as the head so that we get:
1424 * n ... n ... | n - 1 ...
1425 * ^^^^^ blocks we're writing
1426 */
1427 distance = max_distance - (log->l_logBBsize - head_block);
1428 error = xlog_write_log_records(log, head_cycle, 0, distance,
1429 tail_cycle, tail_block);
1430 if (error)
1431 return error;
1432 }
1433
1434 return 0;
1435 }
1436
1437 /******************************************************************************
1438 *
1439 * Log recover routines
1440 *
1441 ******************************************************************************
1442 */
1443
1444 STATIC xlog_recover_t *
1445 xlog_recover_find_tid(
1446 struct hlist_head *head,
1447 xlog_tid_t tid)
1448 {
1449 xlog_recover_t *trans;
1450
1451 hlist_for_each_entry(trans, head, r_list) {
1452 if (trans->r_log_tid == tid)
1453 return trans;
1454 }
1455 return NULL;
1456 }
1457
1458 STATIC void
1459 xlog_recover_new_tid(
1460 struct hlist_head *head,
1461 xlog_tid_t tid,
1462 xfs_lsn_t lsn)
1463 {
1464 xlog_recover_t *trans;
1465
1466 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1467 trans->r_log_tid = tid;
1468 trans->r_lsn = lsn;
1469 INIT_LIST_HEAD(&trans->r_itemq);
1470
1471 INIT_HLIST_NODE(&trans->r_list);
1472 hlist_add_head(&trans->r_list, head);
1473 }
1474
1475 STATIC void
1476 xlog_recover_add_item(
1477 struct list_head *head)
1478 {
1479 xlog_recover_item_t *item;
1480
1481 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1482 INIT_LIST_HEAD(&item->ri_list);
1483 list_add_tail(&item->ri_list, head);
1484 }
1485
1486 STATIC int
1487 xlog_recover_add_to_cont_trans(
1488 struct xlog *log,
1489 struct xlog_recover *trans,
1490 xfs_caddr_t dp,
1491 int len)
1492 {
1493 xlog_recover_item_t *item;
1494 xfs_caddr_t ptr, old_ptr;
1495 int old_len;
1496
1497 if (list_empty(&trans->r_itemq)) {
1498 /* finish copying rest of trans header */
1499 xlog_recover_add_item(&trans->r_itemq);
1500 ptr = (xfs_caddr_t) &trans->r_theader +
1501 sizeof(xfs_trans_header_t) - len;
1502 memcpy(ptr, dp, len); /* d, s, l */
1503 return 0;
1504 }
1505 /* take the tail entry */
1506 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1507
1508 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1509 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1510
1511 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1512 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1513 item->ri_buf[item->ri_cnt-1].i_len += len;
1514 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1515 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1516 return 0;
1517 }
1518
1519 /*
1520 * The next region to add is the start of a new region. It could be
1521 * a whole region or it could be the first part of a new region. Because
1522 * of this, the assumption here is that the type and size fields of all
1523 * format structures fit into the first 32 bits of the structure.
1524 *
1525 * This works because all regions must be 32 bit aligned. Therefore, we
1526 * either have both fields or we have neither field. In the case we have
1527 * neither field, the data part of the region is zero length. We only have
1528 * a log_op_header and can throw away the header since a new one will appear
1529 * later. If we have at least 4 bytes, then we can determine how many regions
1530 * will appear in the current log item.
1531 */
1532 STATIC int
1533 xlog_recover_add_to_trans(
1534 struct xlog *log,
1535 struct xlog_recover *trans,
1536 xfs_caddr_t dp,
1537 int len)
1538 {
1539 xfs_inode_log_format_t *in_f; /* any will do */
1540 xlog_recover_item_t *item;
1541 xfs_caddr_t ptr;
1542
1543 if (!len)
1544 return 0;
1545 if (list_empty(&trans->r_itemq)) {
1546 /* we need to catch log corruptions here */
1547 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1548 xfs_warn(log->l_mp, "%s: bad header magic number",
1549 __func__);
1550 ASSERT(0);
1551 return XFS_ERROR(EIO);
1552 }
1553 if (len == sizeof(xfs_trans_header_t))
1554 xlog_recover_add_item(&trans->r_itemq);
1555 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1556 return 0;
1557 }
1558
1559 ptr = kmem_alloc(len, KM_SLEEP);
1560 memcpy(ptr, dp, len);
1561 in_f = (xfs_inode_log_format_t *)ptr;
1562
1563 /* take the tail entry */
1564 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1565 if (item->ri_total != 0 &&
1566 item->ri_total == item->ri_cnt) {
1567 /* tail item is in use, get a new one */
1568 xlog_recover_add_item(&trans->r_itemq);
1569 item = list_entry(trans->r_itemq.prev,
1570 xlog_recover_item_t, ri_list);
1571 }
1572
1573 if (item->ri_total == 0) { /* first region to be added */
1574 if (in_f->ilf_size == 0 ||
1575 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1576 xfs_warn(log->l_mp,
1577 "bad number of regions (%d) in inode log format",
1578 in_f->ilf_size);
1579 ASSERT(0);
1580 kmem_free(ptr);
1581 return XFS_ERROR(EIO);
1582 }
1583
1584 item->ri_total = in_f->ilf_size;
1585 item->ri_buf =
1586 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1587 KM_SLEEP);
1588 }
1589 ASSERT(item->ri_total > item->ri_cnt);
1590 /* Description region is ri_buf[0] */
1591 item->ri_buf[item->ri_cnt].i_addr = ptr;
1592 item->ri_buf[item->ri_cnt].i_len = len;
1593 item->ri_cnt++;
1594 trace_xfs_log_recover_item_add(log, trans, item, 0);
1595 return 0;
1596 }
1597
1598 /*
1599 * Sort the log items in the transaction.
1600 *
1601 * The ordering constraints are defined by the inode allocation and unlink
1602 * behaviour. The rules are:
1603 *
1604 * 1. Every item is only logged once in a given transaction. Hence it
1605 * represents the last logged state of the item. Hence ordering is
1606 * dependent on the order in which operations need to be performed so
1607 * required initial conditions are always met.
1608 *
1609 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1610 * there's nothing to replay from them so we can simply cull them
1611 * from the transaction. However, we can't do that until after we've
1612 * replayed all the other items because they may be dependent on the
1613 * cancelled buffer and replaying the cancelled buffer can remove it
1614 * form the cancelled buffer table. Hence they have tobe done last.
1615 *
1616 * 3. Inode allocation buffers must be replayed before inode items that
1617 * read the buffer and replay changes into it. For filesystems using the
1618 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1619 * treated the same as inode allocation buffers as they create and
1620 * initialise the buffers directly.
1621 *
1622 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1623 * This ensures that inodes are completely flushed to the inode buffer
1624 * in a "free" state before we remove the unlinked inode list pointer.
1625 *
1626 * Hence the ordering needs to be inode allocation buffers first, inode items
1627 * second, inode unlink buffers third and cancelled buffers last.
1628 *
1629 * But there's a problem with that - we can't tell an inode allocation buffer
1630 * apart from a regular buffer, so we can't separate them. We can, however,
1631 * tell an inode unlink buffer from the others, and so we can separate them out
1632 * from all the other buffers and move them to last.
1633 *
1634 * Hence, 4 lists, in order from head to tail:
1635 * - buffer_list for all buffers except cancelled/inode unlink buffers
1636 * - item_list for all non-buffer items
1637 * - inode_buffer_list for inode unlink buffers
1638 * - cancel_list for the cancelled buffers
1639 *
1640 * Note that we add objects to the tail of the lists so that first-to-last
1641 * ordering is preserved within the lists. Adding objects to the head of the
1642 * list means when we traverse from the head we walk them in last-to-first
1643 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1644 * but for all other items there may be specific ordering that we need to
1645 * preserve.
1646 */
1647 STATIC int
1648 xlog_recover_reorder_trans(
1649 struct xlog *log,
1650 struct xlog_recover *trans,
1651 int pass)
1652 {
1653 xlog_recover_item_t *item, *n;
1654 int error = 0;
1655 LIST_HEAD(sort_list);
1656 LIST_HEAD(cancel_list);
1657 LIST_HEAD(buffer_list);
1658 LIST_HEAD(inode_buffer_list);
1659 LIST_HEAD(inode_list);
1660
1661 list_splice_init(&trans->r_itemq, &sort_list);
1662 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1663 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1664
1665 switch (ITEM_TYPE(item)) {
1666 case XFS_LI_ICREATE:
1667 list_move_tail(&item->ri_list, &buffer_list);
1668 break;
1669 case XFS_LI_BUF:
1670 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1671 trace_xfs_log_recover_item_reorder_head(log,
1672 trans, item, pass);
1673 list_move(&item->ri_list, &cancel_list);
1674 break;
1675 }
1676 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1677 list_move(&item->ri_list, &inode_buffer_list);
1678 break;
1679 }
1680 list_move_tail(&item->ri_list, &buffer_list);
1681 break;
1682 case XFS_LI_INODE:
1683 case XFS_LI_DQUOT:
1684 case XFS_LI_QUOTAOFF:
1685 case XFS_LI_EFD:
1686 case XFS_LI_EFI:
1687 trace_xfs_log_recover_item_reorder_tail(log,
1688 trans, item, pass);
1689 list_move_tail(&item->ri_list, &inode_list);
1690 break;
1691 default:
1692 xfs_warn(log->l_mp,
1693 "%s: unrecognized type of log operation",
1694 __func__);
1695 ASSERT(0);
1696 /*
1697 * return the remaining items back to the transaction
1698 * item list so they can be freed in caller.
1699 */
1700 if (!list_empty(&sort_list))
1701 list_splice_init(&sort_list, &trans->r_itemq);
1702 error = XFS_ERROR(EIO);
1703 goto out;
1704 }
1705 }
1706 out:
1707 ASSERT(list_empty(&sort_list));
1708 if (!list_empty(&buffer_list))
1709 list_splice(&buffer_list, &trans->r_itemq);
1710 if (!list_empty(&inode_list))
1711 list_splice_tail(&inode_list, &trans->r_itemq);
1712 if (!list_empty(&inode_buffer_list))
1713 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1714 if (!list_empty(&cancel_list))
1715 list_splice_tail(&cancel_list, &trans->r_itemq);
1716 return error;
1717 }
1718
1719 /*
1720 * Build up the table of buf cancel records so that we don't replay
1721 * cancelled data in the second pass. For buffer records that are
1722 * not cancel records, there is nothing to do here so we just return.
1723 *
1724 * If we get a cancel record which is already in the table, this indicates
1725 * that the buffer was cancelled multiple times. In order to ensure
1726 * that during pass 2 we keep the record in the table until we reach its
1727 * last occurrence in the log, we keep a reference count in the cancel
1728 * record in the table to tell us how many times we expect to see this
1729 * record during the second pass.
1730 */
1731 STATIC int
1732 xlog_recover_buffer_pass1(
1733 struct xlog *log,
1734 struct xlog_recover_item *item)
1735 {
1736 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1737 struct list_head *bucket;
1738 struct xfs_buf_cancel *bcp;
1739
1740 /*
1741 * If this isn't a cancel buffer item, then just return.
1742 */
1743 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1744 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1745 return 0;
1746 }
1747
1748 /*
1749 * Insert an xfs_buf_cancel record into the hash table of them.
1750 * If there is already an identical record, bump its reference count.
1751 */
1752 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1753 list_for_each_entry(bcp, bucket, bc_list) {
1754 if (bcp->bc_blkno == buf_f->blf_blkno &&
1755 bcp->bc_len == buf_f->blf_len) {
1756 bcp->bc_refcount++;
1757 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1758 return 0;
1759 }
1760 }
1761
1762 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1763 bcp->bc_blkno = buf_f->blf_blkno;
1764 bcp->bc_len = buf_f->blf_len;
1765 bcp->bc_refcount = 1;
1766 list_add_tail(&bcp->bc_list, bucket);
1767
1768 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1769 return 0;
1770 }
1771
1772 /*
1773 * Check to see whether the buffer being recovered has a corresponding
1774 * entry in the buffer cancel record table. If it is, return the cancel
1775 * buffer structure to the caller.
1776 */
1777 STATIC struct xfs_buf_cancel *
1778 xlog_peek_buffer_cancelled(
1779 struct xlog *log,
1780 xfs_daddr_t blkno,
1781 uint len,
1782 ushort flags)
1783 {
1784 struct list_head *bucket;
1785 struct xfs_buf_cancel *bcp;
1786
1787 if (!log->l_buf_cancel_table) {
1788 /* empty table means no cancelled buffers in the log */
1789 ASSERT(!(flags & XFS_BLF_CANCEL));
1790 return NULL;
1791 }
1792
1793 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1794 list_for_each_entry(bcp, bucket, bc_list) {
1795 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1796 return bcp;
1797 }
1798
1799 /*
1800 * We didn't find a corresponding entry in the table, so return 0 so
1801 * that the buffer is NOT cancelled.
1802 */
1803 ASSERT(!(flags & XFS_BLF_CANCEL));
1804 return NULL;
1805 }
1806
1807 /*
1808 * If the buffer is being cancelled then return 1 so that it will be cancelled,
1809 * otherwise return 0. If the buffer is actually a buffer cancel item
1810 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1811 * table and remove it from the table if this is the last reference.
1812 *
1813 * We remove the cancel record from the table when we encounter its last
1814 * occurrence in the log so that if the same buffer is re-used again after its
1815 * last cancellation we actually replay the changes made at that point.
1816 */
1817 STATIC int
1818 xlog_check_buffer_cancelled(
1819 struct xlog *log,
1820 xfs_daddr_t blkno,
1821 uint len,
1822 ushort flags)
1823 {
1824 struct xfs_buf_cancel *bcp;
1825
1826 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
1827 if (!bcp)
1828 return 0;
1829
1830 /*
1831 * We've go a match, so return 1 so that the recovery of this buffer
1832 * is cancelled. If this buffer is actually a buffer cancel log
1833 * item, then decrement the refcount on the one in the table and
1834 * remove it if this is the last reference.
1835 */
1836 if (flags & XFS_BLF_CANCEL) {
1837 if (--bcp->bc_refcount == 0) {
1838 list_del(&bcp->bc_list);
1839 kmem_free(bcp);
1840 }
1841 }
1842 return 1;
1843 }
1844
1845 /*
1846 * Perform recovery for a buffer full of inodes. In these buffers, the only
1847 * data which should be recovered is that which corresponds to the
1848 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1849 * data for the inodes is always logged through the inodes themselves rather
1850 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1851 *
1852 * The only time when buffers full of inodes are fully recovered is when the
1853 * buffer is full of newly allocated inodes. In this case the buffer will
1854 * not be marked as an inode buffer and so will be sent to
1855 * xlog_recover_do_reg_buffer() below during recovery.
1856 */
1857 STATIC int
1858 xlog_recover_do_inode_buffer(
1859 struct xfs_mount *mp,
1860 xlog_recover_item_t *item,
1861 struct xfs_buf *bp,
1862 xfs_buf_log_format_t *buf_f)
1863 {
1864 int i;
1865 int item_index = 0;
1866 int bit = 0;
1867 int nbits = 0;
1868 int reg_buf_offset = 0;
1869 int reg_buf_bytes = 0;
1870 int next_unlinked_offset;
1871 int inodes_per_buf;
1872 xfs_agino_t *logged_nextp;
1873 xfs_agino_t *buffer_nextp;
1874
1875 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1876
1877 /*
1878 * Post recovery validation only works properly on CRC enabled
1879 * filesystems.
1880 */
1881 if (xfs_sb_version_hascrc(&mp->m_sb))
1882 bp->b_ops = &xfs_inode_buf_ops;
1883
1884 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1885 for (i = 0; i < inodes_per_buf; i++) {
1886 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1887 offsetof(xfs_dinode_t, di_next_unlinked);
1888
1889 while (next_unlinked_offset >=
1890 (reg_buf_offset + reg_buf_bytes)) {
1891 /*
1892 * The next di_next_unlinked field is beyond
1893 * the current logged region. Find the next
1894 * logged region that contains or is beyond
1895 * the current di_next_unlinked field.
1896 */
1897 bit += nbits;
1898 bit = xfs_next_bit(buf_f->blf_data_map,
1899 buf_f->blf_map_size, bit);
1900
1901 /*
1902 * If there are no more logged regions in the
1903 * buffer, then we're done.
1904 */
1905 if (bit == -1)
1906 return 0;
1907
1908 nbits = xfs_contig_bits(buf_f->blf_data_map,
1909 buf_f->blf_map_size, bit);
1910 ASSERT(nbits > 0);
1911 reg_buf_offset = bit << XFS_BLF_SHIFT;
1912 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1913 item_index++;
1914 }
1915
1916 /*
1917 * If the current logged region starts after the current
1918 * di_next_unlinked field, then move on to the next
1919 * di_next_unlinked field.
1920 */
1921 if (next_unlinked_offset < reg_buf_offset)
1922 continue;
1923
1924 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1925 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1926 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1927 BBTOB(bp->b_io_length));
1928
1929 /*
1930 * The current logged region contains a copy of the
1931 * current di_next_unlinked field. Extract its value
1932 * and copy it to the buffer copy.
1933 */
1934 logged_nextp = item->ri_buf[item_index].i_addr +
1935 next_unlinked_offset - reg_buf_offset;
1936 if (unlikely(*logged_nextp == 0)) {
1937 xfs_alert(mp,
1938 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1939 "Trying to replay bad (0) inode di_next_unlinked field.",
1940 item, bp);
1941 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1942 XFS_ERRLEVEL_LOW, mp);
1943 return XFS_ERROR(EFSCORRUPTED);
1944 }
1945
1946 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1947 next_unlinked_offset);
1948 *buffer_nextp = *logged_nextp;
1949
1950 /*
1951 * If necessary, recalculate the CRC in the on-disk inode. We
1952 * have to leave the inode in a consistent state for whoever
1953 * reads it next....
1954 */
1955 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1956 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1957
1958 }
1959
1960 return 0;
1961 }
1962
1963 /*
1964 * V5 filesystems know the age of the buffer on disk being recovered. We can
1965 * have newer objects on disk than we are replaying, and so for these cases we
1966 * don't want to replay the current change as that will make the buffer contents
1967 * temporarily invalid on disk.
1968 *
1969 * The magic number might not match the buffer type we are going to recover
1970 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
1971 * extract the LSN of the existing object in the buffer based on it's current
1972 * magic number. If we don't recognise the magic number in the buffer, then
1973 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1974 * so can recover the buffer.
1975 *
1976 * Note: we cannot rely solely on magic number matches to determine that the
1977 * buffer has a valid LSN - we also need to verify that it belongs to this
1978 * filesystem, so we need to extract the object's LSN and compare it to that
1979 * which we read from the superblock. If the UUIDs don't match, then we've got a
1980 * stale metadata block from an old filesystem instance that we need to recover
1981 * over the top of.
1982 */
1983 static xfs_lsn_t
1984 xlog_recover_get_buf_lsn(
1985 struct xfs_mount *mp,
1986 struct xfs_buf *bp)
1987 {
1988 __uint32_t magic32;
1989 __uint16_t magic16;
1990 __uint16_t magicda;
1991 void *blk = bp->b_addr;
1992 uuid_t *uuid;
1993 xfs_lsn_t lsn = -1;
1994
1995 /* v4 filesystems always recover immediately */
1996 if (!xfs_sb_version_hascrc(&mp->m_sb))
1997 goto recover_immediately;
1998
1999 magic32 = be32_to_cpu(*(__be32 *)blk);
2000 switch (magic32) {
2001 case XFS_ABTB_CRC_MAGIC:
2002 case XFS_ABTC_CRC_MAGIC:
2003 case XFS_ABTB_MAGIC:
2004 case XFS_ABTC_MAGIC:
2005 case XFS_IBT_CRC_MAGIC:
2006 case XFS_IBT_MAGIC: {
2007 struct xfs_btree_block *btb = blk;
2008
2009 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2010 uuid = &btb->bb_u.s.bb_uuid;
2011 break;
2012 }
2013 case XFS_BMAP_CRC_MAGIC:
2014 case XFS_BMAP_MAGIC: {
2015 struct xfs_btree_block *btb = blk;
2016
2017 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2018 uuid = &btb->bb_u.l.bb_uuid;
2019 break;
2020 }
2021 case XFS_AGF_MAGIC:
2022 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2023 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2024 break;
2025 case XFS_AGFL_MAGIC:
2026 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2027 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2028 break;
2029 case XFS_AGI_MAGIC:
2030 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2031 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2032 break;
2033 case XFS_SYMLINK_MAGIC:
2034 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2035 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2036 break;
2037 case XFS_DIR3_BLOCK_MAGIC:
2038 case XFS_DIR3_DATA_MAGIC:
2039 case XFS_DIR3_FREE_MAGIC:
2040 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2041 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2042 break;
2043 case XFS_ATTR3_RMT_MAGIC:
2044 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2045 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2046 break;
2047 case XFS_SB_MAGIC:
2048 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2049 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2050 break;
2051 default:
2052 break;
2053 }
2054
2055 if (lsn != (xfs_lsn_t)-1) {
2056 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2057 goto recover_immediately;
2058 return lsn;
2059 }
2060
2061 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2062 switch (magicda) {
2063 case XFS_DIR3_LEAF1_MAGIC:
2064 case XFS_DIR3_LEAFN_MAGIC:
2065 case XFS_DA3_NODE_MAGIC:
2066 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2067 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2068 break;
2069 default:
2070 break;
2071 }
2072
2073 if (lsn != (xfs_lsn_t)-1) {
2074 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2075 goto recover_immediately;
2076 return lsn;
2077 }
2078
2079 /*
2080 * We do individual object checks on dquot and inode buffers as they
2081 * have their own individual LSN records. Also, we could have a stale
2082 * buffer here, so we have to at least recognise these buffer types.
2083 *
2084 * A notd complexity here is inode unlinked list processing - it logs
2085 * the inode directly in the buffer, but we don't know which inodes have
2086 * been modified, and there is no global buffer LSN. Hence we need to
2087 * recover all inode buffer types immediately. This problem will be
2088 * fixed by logical logging of the unlinked list modifications.
2089 */
2090 magic16 = be16_to_cpu(*(__be16 *)blk);
2091 switch (magic16) {
2092 case XFS_DQUOT_MAGIC:
2093 case XFS_DINODE_MAGIC:
2094 goto recover_immediately;
2095 default:
2096 break;
2097 }
2098
2099 /* unknown buffer contents, recover immediately */
2100
2101 recover_immediately:
2102 return (xfs_lsn_t)-1;
2103
2104 }
2105
2106 /*
2107 * Validate the recovered buffer is of the correct type and attach the
2108 * appropriate buffer operations to them for writeback. Magic numbers are in a
2109 * few places:
2110 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2111 * the first 32 bits of the buffer (most blocks),
2112 * inside a struct xfs_da_blkinfo at the start of the buffer.
2113 */
2114 static void
2115 xlog_recover_validate_buf_type(
2116 struct xfs_mount *mp,
2117 struct xfs_buf *bp,
2118 xfs_buf_log_format_t *buf_f)
2119 {
2120 struct xfs_da_blkinfo *info = bp->b_addr;
2121 __uint32_t magic32;
2122 __uint16_t magic16;
2123 __uint16_t magicda;
2124
2125 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2126 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2127 magicda = be16_to_cpu(info->magic);
2128 switch (xfs_blft_from_flags(buf_f)) {
2129 case XFS_BLFT_BTREE_BUF:
2130 switch (magic32) {
2131 case XFS_ABTB_CRC_MAGIC:
2132 case XFS_ABTC_CRC_MAGIC:
2133 case XFS_ABTB_MAGIC:
2134 case XFS_ABTC_MAGIC:
2135 bp->b_ops = &xfs_allocbt_buf_ops;
2136 break;
2137 case XFS_IBT_CRC_MAGIC:
2138 case XFS_IBT_MAGIC:
2139 bp->b_ops = &xfs_inobt_buf_ops;
2140 break;
2141 case XFS_BMAP_CRC_MAGIC:
2142 case XFS_BMAP_MAGIC:
2143 bp->b_ops = &xfs_bmbt_buf_ops;
2144 break;
2145 default:
2146 xfs_warn(mp, "Bad btree block magic!");
2147 ASSERT(0);
2148 break;
2149 }
2150 break;
2151 case XFS_BLFT_AGF_BUF:
2152 if (magic32 != XFS_AGF_MAGIC) {
2153 xfs_warn(mp, "Bad AGF block magic!");
2154 ASSERT(0);
2155 break;
2156 }
2157 bp->b_ops = &xfs_agf_buf_ops;
2158 break;
2159 case XFS_BLFT_AGFL_BUF:
2160 if (!xfs_sb_version_hascrc(&mp->m_sb))
2161 break;
2162 if (magic32 != XFS_AGFL_MAGIC) {
2163 xfs_warn(mp, "Bad AGFL block magic!");
2164 ASSERT(0);
2165 break;
2166 }
2167 bp->b_ops = &xfs_agfl_buf_ops;
2168 break;
2169 case XFS_BLFT_AGI_BUF:
2170 if (magic32 != XFS_AGI_MAGIC) {
2171 xfs_warn(mp, "Bad AGI block magic!");
2172 ASSERT(0);
2173 break;
2174 }
2175 bp->b_ops = &xfs_agi_buf_ops;
2176 break;
2177 case XFS_BLFT_UDQUOT_BUF:
2178 case XFS_BLFT_PDQUOT_BUF:
2179 case XFS_BLFT_GDQUOT_BUF:
2180 #ifdef CONFIG_XFS_QUOTA
2181 if (magic16 != XFS_DQUOT_MAGIC) {
2182 xfs_warn(mp, "Bad DQUOT block magic!");
2183 ASSERT(0);
2184 break;
2185 }
2186 bp->b_ops = &xfs_dquot_buf_ops;
2187 #else
2188 xfs_alert(mp,
2189 "Trying to recover dquots without QUOTA support built in!");
2190 ASSERT(0);
2191 #endif
2192 break;
2193 case XFS_BLFT_DINO_BUF:
2194 /*
2195 * we get here with inode allocation buffers, not buffers that
2196 * track unlinked list changes.
2197 */
2198 if (magic16 != XFS_DINODE_MAGIC) {
2199 xfs_warn(mp, "Bad INODE block magic!");
2200 ASSERT(0);
2201 break;
2202 }
2203 bp->b_ops = &xfs_inode_buf_ops;
2204 break;
2205 case XFS_BLFT_SYMLINK_BUF:
2206 if (magic32 != XFS_SYMLINK_MAGIC) {
2207 xfs_warn(mp, "Bad symlink block magic!");
2208 ASSERT(0);
2209 break;
2210 }
2211 bp->b_ops = &xfs_symlink_buf_ops;
2212 break;
2213 case XFS_BLFT_DIR_BLOCK_BUF:
2214 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2215 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2216 xfs_warn(mp, "Bad dir block magic!");
2217 ASSERT(0);
2218 break;
2219 }
2220 bp->b_ops = &xfs_dir3_block_buf_ops;
2221 break;
2222 case XFS_BLFT_DIR_DATA_BUF:
2223 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2224 magic32 != XFS_DIR3_DATA_MAGIC) {
2225 xfs_warn(mp, "Bad dir data magic!");
2226 ASSERT(0);
2227 break;
2228 }
2229 bp->b_ops = &xfs_dir3_data_buf_ops;
2230 break;
2231 case XFS_BLFT_DIR_FREE_BUF:
2232 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2233 magic32 != XFS_DIR3_FREE_MAGIC) {
2234 xfs_warn(mp, "Bad dir3 free magic!");
2235 ASSERT(0);
2236 break;
2237 }
2238 bp->b_ops = &xfs_dir3_free_buf_ops;
2239 break;
2240 case XFS_BLFT_DIR_LEAF1_BUF:
2241 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2242 magicda != XFS_DIR3_LEAF1_MAGIC) {
2243 xfs_warn(mp, "Bad dir leaf1 magic!");
2244 ASSERT(0);
2245 break;
2246 }
2247 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2248 break;
2249 case XFS_BLFT_DIR_LEAFN_BUF:
2250 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2251 magicda != XFS_DIR3_LEAFN_MAGIC) {
2252 xfs_warn(mp, "Bad dir leafn magic!");
2253 ASSERT(0);
2254 break;
2255 }
2256 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2257 break;
2258 case XFS_BLFT_DA_NODE_BUF:
2259 if (magicda != XFS_DA_NODE_MAGIC &&
2260 magicda != XFS_DA3_NODE_MAGIC) {
2261 xfs_warn(mp, "Bad da node magic!");
2262 ASSERT(0);
2263 break;
2264 }
2265 bp->b_ops = &xfs_da3_node_buf_ops;
2266 break;
2267 case XFS_BLFT_ATTR_LEAF_BUF:
2268 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2269 magicda != XFS_ATTR3_LEAF_MAGIC) {
2270 xfs_warn(mp, "Bad attr leaf magic!");
2271 ASSERT(0);
2272 break;
2273 }
2274 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2275 break;
2276 case XFS_BLFT_ATTR_RMT_BUF:
2277 if (!xfs_sb_version_hascrc(&mp->m_sb))
2278 break;
2279 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2280 xfs_warn(mp, "Bad attr remote magic!");
2281 ASSERT(0);
2282 break;
2283 }
2284 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2285 break;
2286 case XFS_BLFT_SB_BUF:
2287 if (magic32 != XFS_SB_MAGIC) {
2288 xfs_warn(mp, "Bad SB block magic!");
2289 ASSERT(0);
2290 break;
2291 }
2292 bp->b_ops = &xfs_sb_buf_ops;
2293 break;
2294 default:
2295 xfs_warn(mp, "Unknown buffer type %d!",
2296 xfs_blft_from_flags(buf_f));
2297 break;
2298 }
2299 }
2300
2301 /*
2302 * Perform a 'normal' buffer recovery. Each logged region of the
2303 * buffer should be copied over the corresponding region in the
2304 * given buffer. The bitmap in the buf log format structure indicates
2305 * where to place the logged data.
2306 */
2307 STATIC void
2308 xlog_recover_do_reg_buffer(
2309 struct xfs_mount *mp,
2310 xlog_recover_item_t *item,
2311 struct xfs_buf *bp,
2312 xfs_buf_log_format_t *buf_f)
2313 {
2314 int i;
2315 int bit;
2316 int nbits;
2317 int error;
2318
2319 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2320
2321 bit = 0;
2322 i = 1; /* 0 is the buf format structure */
2323 while (1) {
2324 bit = xfs_next_bit(buf_f->blf_data_map,
2325 buf_f->blf_map_size, bit);
2326 if (bit == -1)
2327 break;
2328 nbits = xfs_contig_bits(buf_f->blf_data_map,
2329 buf_f->blf_map_size, bit);
2330 ASSERT(nbits > 0);
2331 ASSERT(item->ri_buf[i].i_addr != NULL);
2332 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2333 ASSERT(BBTOB(bp->b_io_length) >=
2334 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2335
2336 /*
2337 * The dirty regions logged in the buffer, even though
2338 * contiguous, may span multiple chunks. This is because the
2339 * dirty region may span a physical page boundary in a buffer
2340 * and hence be split into two separate vectors for writing into
2341 * the log. Hence we need to trim nbits back to the length of
2342 * the current region being copied out of the log.
2343 */
2344 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2345 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2346
2347 /*
2348 * Do a sanity check if this is a dquot buffer. Just checking
2349 * the first dquot in the buffer should do. XXXThis is
2350 * probably a good thing to do for other buf types also.
2351 */
2352 error = 0;
2353 if (buf_f->blf_flags &
2354 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2355 if (item->ri_buf[i].i_addr == NULL) {
2356 xfs_alert(mp,
2357 "XFS: NULL dquot in %s.", __func__);
2358 goto next;
2359 }
2360 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2361 xfs_alert(mp,
2362 "XFS: dquot too small (%d) in %s.",
2363 item->ri_buf[i].i_len, __func__);
2364 goto next;
2365 }
2366 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2367 -1, 0, XFS_QMOPT_DOWARN,
2368 "dquot_buf_recover");
2369 if (error)
2370 goto next;
2371 }
2372
2373 memcpy(xfs_buf_offset(bp,
2374 (uint)bit << XFS_BLF_SHIFT), /* dest */
2375 item->ri_buf[i].i_addr, /* source */
2376 nbits<<XFS_BLF_SHIFT); /* length */
2377 next:
2378 i++;
2379 bit += nbits;
2380 }
2381
2382 /* Shouldn't be any more regions */
2383 ASSERT(i == item->ri_total);
2384
2385 /*
2386 * We can only do post recovery validation on items on CRC enabled
2387 * fielsystems as we need to know when the buffer was written to be able
2388 * to determine if we should have replayed the item. If we replay old
2389 * metadata over a newer buffer, then it will enter a temporarily
2390 * inconsistent state resulting in verification failures. Hence for now
2391 * just avoid the verification stage for non-crc filesystems
2392 */
2393 if (xfs_sb_version_hascrc(&mp->m_sb))
2394 xlog_recover_validate_buf_type(mp, bp, buf_f);
2395 }
2396
2397 /*
2398 * Perform a dquot buffer recovery.
2399 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2400 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2401 * Else, treat it as a regular buffer and do recovery.
2402 */
2403 STATIC void
2404 xlog_recover_do_dquot_buffer(
2405 struct xfs_mount *mp,
2406 struct xlog *log,
2407 struct xlog_recover_item *item,
2408 struct xfs_buf *bp,
2409 struct xfs_buf_log_format *buf_f)
2410 {
2411 uint type;
2412
2413 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2414
2415 /*
2416 * Filesystems are required to send in quota flags at mount time.
2417 */
2418 if (mp->m_qflags == 0) {
2419 return;
2420 }
2421
2422 type = 0;
2423 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2424 type |= XFS_DQ_USER;
2425 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2426 type |= XFS_DQ_PROJ;
2427 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2428 type |= XFS_DQ_GROUP;
2429 /*
2430 * This type of quotas was turned off, so ignore this buffer
2431 */
2432 if (log->l_quotaoffs_flag & type)
2433 return;
2434
2435 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2436 }
2437
2438 /*
2439 * This routine replays a modification made to a buffer at runtime.
2440 * There are actually two types of buffer, regular and inode, which
2441 * are handled differently. Inode buffers are handled differently
2442 * in that we only recover a specific set of data from them, namely
2443 * the inode di_next_unlinked fields. This is because all other inode
2444 * data is actually logged via inode records and any data we replay
2445 * here which overlaps that may be stale.
2446 *
2447 * When meta-data buffers are freed at run time we log a buffer item
2448 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2449 * of the buffer in the log should not be replayed at recovery time.
2450 * This is so that if the blocks covered by the buffer are reused for
2451 * file data before we crash we don't end up replaying old, freed
2452 * meta-data into a user's file.
2453 *
2454 * To handle the cancellation of buffer log items, we make two passes
2455 * over the log during recovery. During the first we build a table of
2456 * those buffers which have been cancelled, and during the second we
2457 * only replay those buffers which do not have corresponding cancel
2458 * records in the table. See xlog_recover_buffer_pass[1,2] above
2459 * for more details on the implementation of the table of cancel records.
2460 */
2461 STATIC int
2462 xlog_recover_buffer_pass2(
2463 struct xlog *log,
2464 struct list_head *buffer_list,
2465 struct xlog_recover_item *item,
2466 xfs_lsn_t current_lsn)
2467 {
2468 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2469 xfs_mount_t *mp = log->l_mp;
2470 xfs_buf_t *bp;
2471 int error;
2472 uint buf_flags;
2473 xfs_lsn_t lsn;
2474
2475 /*
2476 * In this pass we only want to recover all the buffers which have
2477 * not been cancelled and are not cancellation buffers themselves.
2478 */
2479 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2480 buf_f->blf_len, buf_f->blf_flags)) {
2481 trace_xfs_log_recover_buf_cancel(log, buf_f);
2482 return 0;
2483 }
2484
2485 trace_xfs_log_recover_buf_recover(log, buf_f);
2486
2487 buf_flags = 0;
2488 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2489 buf_flags |= XBF_UNMAPPED;
2490
2491 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2492 buf_flags, NULL);
2493 if (!bp)
2494 return XFS_ERROR(ENOMEM);
2495 error = bp->b_error;
2496 if (error) {
2497 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2498 goto out_release;
2499 }
2500
2501 /*
2502 * recover the buffer only if we get an LSN from it and it's less than
2503 * the lsn of the transaction we are replaying.
2504 */
2505 lsn = xlog_recover_get_buf_lsn(mp, bp);
2506 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
2507 goto out_release;
2508
2509 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2510 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2511 } else if (buf_f->blf_flags &
2512 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2513 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2514 } else {
2515 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2516 }
2517 if (error)
2518 goto out_release;
2519
2520 /*
2521 * Perform delayed write on the buffer. Asynchronous writes will be
2522 * slower when taking into account all the buffers to be flushed.
2523 *
2524 * Also make sure that only inode buffers with good sizes stay in
2525 * the buffer cache. The kernel moves inodes in buffers of 1 block
2526 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2527 * buffers in the log can be a different size if the log was generated
2528 * by an older kernel using unclustered inode buffers or a newer kernel
2529 * running with a different inode cluster size. Regardless, if the
2530 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2531 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2532 * the buffer out of the buffer cache so that the buffer won't
2533 * overlap with future reads of those inodes.
2534 */
2535 if (XFS_DINODE_MAGIC ==
2536 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2537 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2538 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2539 xfs_buf_stale(bp);
2540 error = xfs_bwrite(bp);
2541 } else {
2542 ASSERT(bp->b_target->bt_mount == mp);
2543 bp->b_iodone = xlog_recover_iodone;
2544 xfs_buf_delwri_queue(bp, buffer_list);
2545 }
2546
2547 out_release:
2548 xfs_buf_relse(bp);
2549 return error;
2550 }
2551
2552 /*
2553 * Inode fork owner changes
2554 *
2555 * If we have been told that we have to reparent the inode fork, it's because an
2556 * extent swap operation on a CRC enabled filesystem has been done and we are
2557 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2558 * owners of it.
2559 *
2560 * The complexity here is that we don't have an inode context to work with, so
2561 * after we've replayed the inode we need to instantiate one. This is where the
2562 * fun begins.
2563 *
2564 * We are in the middle of log recovery, so we can't run transactions. That
2565 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2566 * that will result in the corresponding iput() running the inode through
2567 * xfs_inactive(). If we've just replayed an inode core that changes the link
2568 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2569 * transactions (bad!).
2570 *
2571 * So, to avoid this, we instantiate an inode directly from the inode core we've
2572 * just recovered. We have the buffer still locked, and all we really need to
2573 * instantiate is the inode core and the forks being modified. We can do this
2574 * manually, then run the inode btree owner change, and then tear down the
2575 * xfs_inode without having to run any transactions at all.
2576 *
2577 * Also, because we don't have a transaction context available here but need to
2578 * gather all the buffers we modify for writeback so we pass the buffer_list
2579 * instead for the operation to use.
2580 */
2581
2582 STATIC int
2583 xfs_recover_inode_owner_change(
2584 struct xfs_mount *mp,
2585 struct xfs_dinode *dip,
2586 struct xfs_inode_log_format *in_f,
2587 struct list_head *buffer_list)
2588 {
2589 struct xfs_inode *ip;
2590 int error;
2591
2592 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2593
2594 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2595 if (!ip)
2596 return ENOMEM;
2597
2598 /* instantiate the inode */
2599 xfs_dinode_from_disk(&ip->i_d, dip);
2600 ASSERT(ip->i_d.di_version >= 3);
2601
2602 error = xfs_iformat_fork(ip, dip);
2603 if (error)
2604 goto out_free_ip;
2605
2606
2607 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2608 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2609 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2610 ip->i_ino, buffer_list);
2611 if (error)
2612 goto out_free_ip;
2613 }
2614
2615 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2616 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2617 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2618 ip->i_ino, buffer_list);
2619 if (error)
2620 goto out_free_ip;
2621 }
2622
2623 out_free_ip:
2624 xfs_inode_free(ip);
2625 return error;
2626 }
2627
2628 STATIC int
2629 xlog_recover_inode_pass2(
2630 struct xlog *log,
2631 struct list_head *buffer_list,
2632 struct xlog_recover_item *item,
2633 xfs_lsn_t current_lsn)
2634 {
2635 xfs_inode_log_format_t *in_f;
2636 xfs_mount_t *mp = log->l_mp;
2637 xfs_buf_t *bp;
2638 xfs_dinode_t *dip;
2639 int len;
2640 xfs_caddr_t src;
2641 xfs_caddr_t dest;
2642 int error;
2643 int attr_index;
2644 uint fields;
2645 xfs_icdinode_t *dicp;
2646 uint isize;
2647 int need_free = 0;
2648
2649 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2650 in_f = item->ri_buf[0].i_addr;
2651 } else {
2652 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2653 need_free = 1;
2654 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2655 if (error)
2656 goto error;
2657 }
2658
2659 /*
2660 * Inode buffers can be freed, look out for it,
2661 * and do not replay the inode.
2662 */
2663 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2664 in_f->ilf_len, 0)) {
2665 error = 0;
2666 trace_xfs_log_recover_inode_cancel(log, in_f);
2667 goto error;
2668 }
2669 trace_xfs_log_recover_inode_recover(log, in_f);
2670
2671 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2672 &xfs_inode_buf_ops);
2673 if (!bp) {
2674 error = ENOMEM;
2675 goto error;
2676 }
2677 error = bp->b_error;
2678 if (error) {
2679 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2680 goto out_release;
2681 }
2682 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2683 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2684
2685 /*
2686 * Make sure the place we're flushing out to really looks
2687 * like an inode!
2688 */
2689 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2690 xfs_alert(mp,
2691 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2692 __func__, dip, bp, in_f->ilf_ino);
2693 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2694 XFS_ERRLEVEL_LOW, mp);
2695 error = EFSCORRUPTED;
2696 goto out_release;
2697 }
2698 dicp = item->ri_buf[1].i_addr;
2699 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2700 xfs_alert(mp,
2701 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2702 __func__, item, in_f->ilf_ino);
2703 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2704 XFS_ERRLEVEL_LOW, mp);
2705 error = EFSCORRUPTED;
2706 goto out_release;
2707 }
2708
2709 /*
2710 * If the inode has an LSN in it, recover the inode only if it's less
2711 * than the lsn of the transaction we are replaying. Note: we still
2712 * need to replay an owner change even though the inode is more recent
2713 * than the transaction as there is no guarantee that all the btree
2714 * blocks are more recent than this transaction, too.
2715 */
2716 if (dip->di_version >= 3) {
2717 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
2718
2719 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2720 trace_xfs_log_recover_inode_skip(log, in_f);
2721 error = 0;
2722 goto out_owner_change;
2723 }
2724 }
2725
2726 /*
2727 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2728 * are transactional and if ordering is necessary we can determine that
2729 * more accurately by the LSN field in the V3 inode core. Don't trust
2730 * the inode versions we might be changing them here - use the
2731 * superblock flag to determine whether we need to look at di_flushiter
2732 * to skip replay when the on disk inode is newer than the log one
2733 */
2734 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2735 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2736 /*
2737 * Deal with the wrap case, DI_MAX_FLUSH is less
2738 * than smaller numbers
2739 */
2740 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2741 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2742 /* do nothing */
2743 } else {
2744 trace_xfs_log_recover_inode_skip(log, in_f);
2745 error = 0;
2746 goto out_release;
2747 }
2748 }
2749
2750 /* Take the opportunity to reset the flush iteration count */
2751 dicp->di_flushiter = 0;
2752
2753 if (unlikely(S_ISREG(dicp->di_mode))) {
2754 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2755 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2756 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2757 XFS_ERRLEVEL_LOW, mp, dicp);
2758 xfs_alert(mp,
2759 "%s: Bad regular inode log record, rec ptr 0x%p, "
2760 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2761 __func__, item, dip, bp, in_f->ilf_ino);
2762 error = EFSCORRUPTED;
2763 goto out_release;
2764 }
2765 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2766 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2767 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2768 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2769 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2770 XFS_ERRLEVEL_LOW, mp, dicp);
2771 xfs_alert(mp,
2772 "%s: Bad dir inode log record, rec ptr 0x%p, "
2773 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2774 __func__, item, dip, bp, in_f->ilf_ino);
2775 error = EFSCORRUPTED;
2776 goto out_release;
2777 }
2778 }
2779 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2780 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2781 XFS_ERRLEVEL_LOW, mp, dicp);
2782 xfs_alert(mp,
2783 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2784 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2785 __func__, item, dip, bp, in_f->ilf_ino,
2786 dicp->di_nextents + dicp->di_anextents,
2787 dicp->di_nblocks);
2788 error = EFSCORRUPTED;
2789 goto out_release;
2790 }
2791 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2792 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2793 XFS_ERRLEVEL_LOW, mp, dicp);
2794 xfs_alert(mp,
2795 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2796 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2797 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2798 error = EFSCORRUPTED;
2799 goto out_release;
2800 }
2801 isize = xfs_icdinode_size(dicp->di_version);
2802 if (unlikely(item->ri_buf[1].i_len > isize)) {
2803 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2804 XFS_ERRLEVEL_LOW, mp, dicp);
2805 xfs_alert(mp,
2806 "%s: Bad inode log record length %d, rec ptr 0x%p",
2807 __func__, item->ri_buf[1].i_len, item);
2808 error = EFSCORRUPTED;
2809 goto out_release;
2810 }
2811
2812 /* The core is in in-core format */
2813 xfs_dinode_to_disk(dip, dicp);
2814
2815 /* the rest is in on-disk format */
2816 if (item->ri_buf[1].i_len > isize) {
2817 memcpy((char *)dip + isize,
2818 item->ri_buf[1].i_addr + isize,
2819 item->ri_buf[1].i_len - isize);
2820 }
2821
2822 fields = in_f->ilf_fields;
2823 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2824 case XFS_ILOG_DEV:
2825 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2826 break;
2827 case XFS_ILOG_UUID:
2828 memcpy(XFS_DFORK_DPTR(dip),
2829 &in_f->ilf_u.ilfu_uuid,
2830 sizeof(uuid_t));
2831 break;
2832 }
2833
2834 if (in_f->ilf_size == 2)
2835 goto out_owner_change;
2836 len = item->ri_buf[2].i_len;
2837 src = item->ri_buf[2].i_addr;
2838 ASSERT(in_f->ilf_size <= 4);
2839 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2840 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2841 (len == in_f->ilf_dsize));
2842
2843 switch (fields & XFS_ILOG_DFORK) {
2844 case XFS_ILOG_DDATA:
2845 case XFS_ILOG_DEXT:
2846 memcpy(XFS_DFORK_DPTR(dip), src, len);
2847 break;
2848
2849 case XFS_ILOG_DBROOT:
2850 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2851 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2852 XFS_DFORK_DSIZE(dip, mp));
2853 break;
2854
2855 default:
2856 /*
2857 * There are no data fork flags set.
2858 */
2859 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2860 break;
2861 }
2862
2863 /*
2864 * If we logged any attribute data, recover it. There may or
2865 * may not have been any other non-core data logged in this
2866 * transaction.
2867 */
2868 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2869 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2870 attr_index = 3;
2871 } else {
2872 attr_index = 2;
2873 }
2874 len = item->ri_buf[attr_index].i_len;
2875 src = item->ri_buf[attr_index].i_addr;
2876 ASSERT(len == in_f->ilf_asize);
2877
2878 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2879 case XFS_ILOG_ADATA:
2880 case XFS_ILOG_AEXT:
2881 dest = XFS_DFORK_APTR(dip);
2882 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2883 memcpy(dest, src, len);
2884 break;
2885
2886 case XFS_ILOG_ABROOT:
2887 dest = XFS_DFORK_APTR(dip);
2888 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2889 len, (xfs_bmdr_block_t*)dest,
2890 XFS_DFORK_ASIZE(dip, mp));
2891 break;
2892
2893 default:
2894 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2895 ASSERT(0);
2896 error = EIO;
2897 goto out_release;
2898 }
2899 }
2900
2901 out_owner_change:
2902 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
2903 error = xfs_recover_inode_owner_change(mp, dip, in_f,
2904 buffer_list);
2905 /* re-generate the checksum. */
2906 xfs_dinode_calc_crc(log->l_mp, dip);
2907
2908 ASSERT(bp->b_target->bt_mount == mp);
2909 bp->b_iodone = xlog_recover_iodone;
2910 xfs_buf_delwri_queue(bp, buffer_list);
2911
2912 out_release:
2913 xfs_buf_relse(bp);
2914 error:
2915 if (need_free)
2916 kmem_free(in_f);
2917 return XFS_ERROR(error);
2918 }
2919
2920 /*
2921 * Recover QUOTAOFF records. We simply make a note of it in the xlog
2922 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2923 * of that type.
2924 */
2925 STATIC int
2926 xlog_recover_quotaoff_pass1(
2927 struct xlog *log,
2928 struct xlog_recover_item *item)
2929 {
2930 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2931 ASSERT(qoff_f);
2932
2933 /*
2934 * The logitem format's flag tells us if this was user quotaoff,
2935 * group/project quotaoff or both.
2936 */
2937 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2938 log->l_quotaoffs_flag |= XFS_DQ_USER;
2939 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2940 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2941 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2942 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2943
2944 return (0);
2945 }
2946
2947 /*
2948 * Recover a dquot record
2949 */
2950 STATIC int
2951 xlog_recover_dquot_pass2(
2952 struct xlog *log,
2953 struct list_head *buffer_list,
2954 struct xlog_recover_item *item,
2955 xfs_lsn_t current_lsn)
2956 {
2957 xfs_mount_t *mp = log->l_mp;
2958 xfs_buf_t *bp;
2959 struct xfs_disk_dquot *ddq, *recddq;
2960 int error;
2961 xfs_dq_logformat_t *dq_f;
2962 uint type;
2963
2964
2965 /*
2966 * Filesystems are required to send in quota flags at mount time.
2967 */
2968 if (mp->m_qflags == 0)
2969 return (0);
2970
2971 recddq = item->ri_buf[1].i_addr;
2972 if (recddq == NULL) {
2973 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2974 return XFS_ERROR(EIO);
2975 }
2976 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2977 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2978 item->ri_buf[1].i_len, __func__);
2979 return XFS_ERROR(EIO);
2980 }
2981
2982 /*
2983 * This type of quotas was turned off, so ignore this record.
2984 */
2985 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2986 ASSERT(type);
2987 if (log->l_quotaoffs_flag & type)
2988 return (0);
2989
2990 /*
2991 * At this point we know that quota was _not_ turned off.
2992 * Since the mount flags are not indicating to us otherwise, this
2993 * must mean that quota is on, and the dquot needs to be replayed.
2994 * Remember that we may not have fully recovered the superblock yet,
2995 * so we can't do the usual trick of looking at the SB quota bits.
2996 *
2997 * The other possibility, of course, is that the quota subsystem was
2998 * removed since the last mount - ENOSYS.
2999 */
3000 dq_f = item->ri_buf[0].i_addr;
3001 ASSERT(dq_f);
3002 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3003 "xlog_recover_dquot_pass2 (log copy)");
3004 if (error)
3005 return XFS_ERROR(EIO);
3006 ASSERT(dq_f->qlf_len == 1);
3007
3008 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3009 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3010 NULL);
3011 if (error)
3012 return error;
3013
3014 ASSERT(bp);
3015 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
3016
3017 /*
3018 * At least the magic num portion should be on disk because this
3019 * was among a chunk of dquots created earlier, and we did some
3020 * minimal initialization then.
3021 */
3022 error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3023 "xlog_recover_dquot_pass2");
3024 if (error) {
3025 xfs_buf_relse(bp);
3026 return XFS_ERROR(EIO);
3027 }
3028
3029 /*
3030 * If the dquot has an LSN in it, recover the dquot only if it's less
3031 * than the lsn of the transaction we are replaying.
3032 */
3033 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3034 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3035 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3036
3037 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3038 goto out_release;
3039 }
3040 }
3041
3042 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3043 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3044 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3045 XFS_DQUOT_CRC_OFF);
3046 }
3047
3048 ASSERT(dq_f->qlf_size == 2);
3049 ASSERT(bp->b_target->bt_mount == mp);
3050 bp->b_iodone = xlog_recover_iodone;
3051 xfs_buf_delwri_queue(bp, buffer_list);
3052
3053 out_release:
3054 xfs_buf_relse(bp);
3055 return 0;
3056 }
3057
3058 /*
3059 * This routine is called to create an in-core extent free intent
3060 * item from the efi format structure which was logged on disk.
3061 * It allocates an in-core efi, copies the extents from the format
3062 * structure into it, and adds the efi to the AIL with the given
3063 * LSN.
3064 */
3065 STATIC int
3066 xlog_recover_efi_pass2(
3067 struct xlog *log,
3068 struct xlog_recover_item *item,
3069 xfs_lsn_t lsn)
3070 {
3071 int error;
3072 xfs_mount_t *mp = log->l_mp;
3073 xfs_efi_log_item_t *efip;
3074 xfs_efi_log_format_t *efi_formatp;
3075
3076 efi_formatp = item->ri_buf[0].i_addr;
3077
3078 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3079 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
3080 &(efip->efi_format)))) {
3081 xfs_efi_item_free(efip);
3082 return error;
3083 }
3084 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3085
3086 spin_lock(&log->l_ailp->xa_lock);
3087 /*
3088 * xfs_trans_ail_update() drops the AIL lock.
3089 */
3090 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3091 return 0;
3092 }
3093
3094
3095 /*
3096 * This routine is called when an efd format structure is found in
3097 * a committed transaction in the log. It's purpose is to cancel
3098 * the corresponding efi if it was still in the log. To do this
3099 * it searches the AIL for the efi with an id equal to that in the
3100 * efd format structure. If we find it, we remove the efi from the
3101 * AIL and free it.
3102 */
3103 STATIC int
3104 xlog_recover_efd_pass2(
3105 struct xlog *log,
3106 struct xlog_recover_item *item)
3107 {
3108 xfs_efd_log_format_t *efd_formatp;
3109 xfs_efi_log_item_t *efip = NULL;
3110 xfs_log_item_t *lip;
3111 __uint64_t efi_id;
3112 struct xfs_ail_cursor cur;
3113 struct xfs_ail *ailp = log->l_ailp;
3114
3115 efd_formatp = item->ri_buf[0].i_addr;
3116 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3117 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3118 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3119 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3120 efi_id = efd_formatp->efd_efi_id;
3121
3122 /*
3123 * Search for the efi with the id in the efd format structure
3124 * in the AIL.
3125 */
3126 spin_lock(&ailp->xa_lock);
3127 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3128 while (lip != NULL) {
3129 if (lip->li_type == XFS_LI_EFI) {
3130 efip = (xfs_efi_log_item_t *)lip;
3131 if (efip->efi_format.efi_id == efi_id) {
3132 /*
3133 * xfs_trans_ail_delete() drops the
3134 * AIL lock.
3135 */
3136 xfs_trans_ail_delete(ailp, lip,
3137 SHUTDOWN_CORRUPT_INCORE);
3138 xfs_efi_item_free(efip);
3139 spin_lock(&ailp->xa_lock);
3140 break;
3141 }
3142 }
3143 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3144 }
3145 xfs_trans_ail_cursor_done(ailp, &cur);
3146 spin_unlock(&ailp->xa_lock);
3147
3148 return 0;
3149 }
3150
3151 /*
3152 * This routine is called when an inode create format structure is found in a
3153 * committed transaction in the log. It's purpose is to initialise the inodes
3154 * being allocated on disk. This requires us to get inode cluster buffers that
3155 * match the range to be intialised, stamped with inode templates and written
3156 * by delayed write so that subsequent modifications will hit the cached buffer
3157 * and only need writing out at the end of recovery.
3158 */
3159 STATIC int
3160 xlog_recover_do_icreate_pass2(
3161 struct xlog *log,
3162 struct list_head *buffer_list,
3163 xlog_recover_item_t *item)
3164 {
3165 struct xfs_mount *mp = log->l_mp;
3166 struct xfs_icreate_log *icl;
3167 xfs_agnumber_t agno;
3168 xfs_agblock_t agbno;
3169 unsigned int count;
3170 unsigned int isize;
3171 xfs_agblock_t length;
3172
3173 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3174 if (icl->icl_type != XFS_LI_ICREATE) {
3175 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3176 return EINVAL;
3177 }
3178
3179 if (icl->icl_size != 1) {
3180 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3181 return EINVAL;
3182 }
3183
3184 agno = be32_to_cpu(icl->icl_ag);
3185 if (agno >= mp->m_sb.sb_agcount) {
3186 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3187 return EINVAL;
3188 }
3189 agbno = be32_to_cpu(icl->icl_agbno);
3190 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3191 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3192 return EINVAL;
3193 }
3194 isize = be32_to_cpu(icl->icl_isize);
3195 if (isize != mp->m_sb.sb_inodesize) {
3196 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3197 return EINVAL;
3198 }
3199 count = be32_to_cpu(icl->icl_count);
3200 if (!count) {
3201 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3202 return EINVAL;
3203 }
3204 length = be32_to_cpu(icl->icl_length);
3205 if (!length || length >= mp->m_sb.sb_agblocks) {
3206 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3207 return EINVAL;
3208 }
3209
3210 /* existing allocation is fixed value */
3211 ASSERT(count == XFS_IALLOC_INODES(mp));
3212 ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3213 if (count != XFS_IALLOC_INODES(mp) ||
3214 length != XFS_IALLOC_BLOCKS(mp)) {
3215 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3216 return EINVAL;
3217 }
3218
3219 /*
3220 * Inode buffers can be freed. Do not replay the inode initialisation as
3221 * we could be overwriting something written after this inode buffer was
3222 * cancelled.
3223 *
3224 * XXX: we need to iterate all buffers and only init those that are not
3225 * cancelled. I think that a more fine grained factoring of
3226 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3227 * done easily.
3228 */
3229 if (xlog_check_buffer_cancelled(log,
3230 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3231 return 0;
3232
3233 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3234 be32_to_cpu(icl->icl_gen));
3235 return 0;
3236 }
3237
3238 /*
3239 * Free up any resources allocated by the transaction
3240 *
3241 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3242 */
3243 STATIC void
3244 xlog_recover_free_trans(
3245 struct xlog_recover *trans)
3246 {
3247 xlog_recover_item_t *item, *n;
3248 int i;
3249
3250 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3251 /* Free the regions in the item. */
3252 list_del(&item->ri_list);
3253 for (i = 0; i < item->ri_cnt; i++)
3254 kmem_free(item->ri_buf[i].i_addr);
3255 /* Free the item itself */
3256 kmem_free(item->ri_buf);
3257 kmem_free(item);
3258 }
3259 /* Free the transaction recover structure */
3260 kmem_free(trans);
3261 }
3262
3263 STATIC void
3264 xlog_recover_buffer_ra_pass2(
3265 struct xlog *log,
3266 struct xlog_recover_item *item)
3267 {
3268 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3269 struct xfs_mount *mp = log->l_mp;
3270
3271 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3272 buf_f->blf_len, buf_f->blf_flags)) {
3273 return;
3274 }
3275
3276 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3277 buf_f->blf_len, NULL);
3278 }
3279
3280 STATIC void
3281 xlog_recover_inode_ra_pass2(
3282 struct xlog *log,
3283 struct xlog_recover_item *item)
3284 {
3285 struct xfs_inode_log_format ilf_buf;
3286 struct xfs_inode_log_format *ilfp;
3287 struct xfs_mount *mp = log->l_mp;
3288 int error;
3289
3290 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3291 ilfp = item->ri_buf[0].i_addr;
3292 } else {
3293 ilfp = &ilf_buf;
3294 memset(ilfp, 0, sizeof(*ilfp));
3295 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3296 if (error)
3297 return;
3298 }
3299
3300 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3301 return;
3302
3303 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3304 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3305 }
3306
3307 STATIC void
3308 xlog_recover_dquot_ra_pass2(
3309 struct xlog *log,
3310 struct xlog_recover_item *item)
3311 {
3312 struct xfs_mount *mp = log->l_mp;
3313 struct xfs_disk_dquot *recddq;
3314 struct xfs_dq_logformat *dq_f;
3315 uint type;
3316
3317
3318 if (mp->m_qflags == 0)
3319 return;
3320
3321 recddq = item->ri_buf[1].i_addr;
3322 if (recddq == NULL)
3323 return;
3324 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3325 return;
3326
3327 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3328 ASSERT(type);
3329 if (log->l_quotaoffs_flag & type)
3330 return;
3331
3332 dq_f = item->ri_buf[0].i_addr;
3333 ASSERT(dq_f);
3334 ASSERT(dq_f->qlf_len == 1);
3335
3336 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
3337 XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
3338 }
3339
3340 STATIC void
3341 xlog_recover_ra_pass2(
3342 struct xlog *log,
3343 struct xlog_recover_item *item)
3344 {
3345 switch (ITEM_TYPE(item)) {
3346 case XFS_LI_BUF:
3347 xlog_recover_buffer_ra_pass2(log, item);
3348 break;
3349 case XFS_LI_INODE:
3350 xlog_recover_inode_ra_pass2(log, item);
3351 break;
3352 case XFS_LI_DQUOT:
3353 xlog_recover_dquot_ra_pass2(log, item);
3354 break;
3355 case XFS_LI_EFI:
3356 case XFS_LI_EFD:
3357 case XFS_LI_QUOTAOFF:
3358 default:
3359 break;
3360 }
3361 }
3362
3363 STATIC int
3364 xlog_recover_commit_pass1(
3365 struct xlog *log,
3366 struct xlog_recover *trans,
3367 struct xlog_recover_item *item)
3368 {
3369 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3370
3371 switch (ITEM_TYPE(item)) {
3372 case XFS_LI_BUF:
3373 return xlog_recover_buffer_pass1(log, item);
3374 case XFS_LI_QUOTAOFF:
3375 return xlog_recover_quotaoff_pass1(log, item);
3376 case XFS_LI_INODE:
3377 case XFS_LI_EFI:
3378 case XFS_LI_EFD:
3379 case XFS_LI_DQUOT:
3380 case XFS_LI_ICREATE:
3381 /* nothing to do in pass 1 */
3382 return 0;
3383 default:
3384 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3385 __func__, ITEM_TYPE(item));
3386 ASSERT(0);
3387 return XFS_ERROR(EIO);
3388 }
3389 }
3390
3391 STATIC int
3392 xlog_recover_commit_pass2(
3393 struct xlog *log,
3394 struct xlog_recover *trans,
3395 struct list_head *buffer_list,
3396 struct xlog_recover_item *item)
3397 {
3398 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3399
3400 switch (ITEM_TYPE(item)) {
3401 case XFS_LI_BUF:
3402 return xlog_recover_buffer_pass2(log, buffer_list, item,
3403 trans->r_lsn);
3404 case XFS_LI_INODE:
3405 return xlog_recover_inode_pass2(log, buffer_list, item,
3406 trans->r_lsn);
3407 case XFS_LI_EFI:
3408 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3409 case XFS_LI_EFD:
3410 return xlog_recover_efd_pass2(log, item);
3411 case XFS_LI_DQUOT:
3412 return xlog_recover_dquot_pass2(log, buffer_list, item,
3413 trans->r_lsn);
3414 case XFS_LI_ICREATE:
3415 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3416 case XFS_LI_QUOTAOFF:
3417 /* nothing to do in pass2 */
3418 return 0;
3419 default:
3420 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3421 __func__, ITEM_TYPE(item));
3422 ASSERT(0);
3423 return XFS_ERROR(EIO);
3424 }
3425 }
3426
3427 STATIC int
3428 xlog_recover_items_pass2(
3429 struct xlog *log,
3430 struct xlog_recover *trans,
3431 struct list_head *buffer_list,
3432 struct list_head *item_list)
3433 {
3434 struct xlog_recover_item *item;
3435 int error = 0;
3436
3437 list_for_each_entry(item, item_list, ri_list) {
3438 error = xlog_recover_commit_pass2(log, trans,
3439 buffer_list, item);
3440 if (error)
3441 return error;
3442 }
3443
3444 return error;
3445 }
3446
3447 /*
3448 * Perform the transaction.
3449 *
3450 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3451 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3452 */
3453 STATIC int
3454 xlog_recover_commit_trans(
3455 struct xlog *log,
3456 struct xlog_recover *trans,
3457 int pass)
3458 {
3459 int error = 0;
3460 int error2;
3461 int items_queued = 0;
3462 struct xlog_recover_item *item;
3463 struct xlog_recover_item *next;
3464 LIST_HEAD (buffer_list);
3465 LIST_HEAD (ra_list);
3466 LIST_HEAD (done_list);
3467
3468 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3469
3470 hlist_del(&trans->r_list);
3471
3472 error = xlog_recover_reorder_trans(log, trans, pass);
3473 if (error)
3474 return error;
3475
3476 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3477 switch (pass) {
3478 case XLOG_RECOVER_PASS1:
3479 error = xlog_recover_commit_pass1(log, trans, item);
3480 break;
3481 case XLOG_RECOVER_PASS2:
3482 xlog_recover_ra_pass2(log, item);
3483 list_move_tail(&item->ri_list, &ra_list);
3484 items_queued++;
3485 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3486 error = xlog_recover_items_pass2(log, trans,
3487 &buffer_list, &ra_list);
3488 list_splice_tail_init(&ra_list, &done_list);
3489 items_queued = 0;
3490 }
3491
3492 break;
3493 default:
3494 ASSERT(0);
3495 }
3496
3497 if (error)
3498 goto out;
3499 }
3500
3501 out:
3502 if (!list_empty(&ra_list)) {
3503 if (!error)
3504 error = xlog_recover_items_pass2(log, trans,
3505 &buffer_list, &ra_list);
3506 list_splice_tail_init(&ra_list, &done_list);
3507 }
3508
3509 if (!list_empty(&done_list))
3510 list_splice_init(&done_list, &trans->r_itemq);
3511
3512 xlog_recover_free_trans(trans);
3513
3514 error2 = xfs_buf_delwri_submit(&buffer_list);
3515 return error ? error : error2;
3516 }
3517
3518 STATIC int
3519 xlog_recover_unmount_trans(
3520 struct xlog *log,
3521 struct xlog_recover *trans)
3522 {
3523 /* Do nothing now */
3524 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3525 return 0;
3526 }
3527
3528 /*
3529 * There are two valid states of the r_state field. 0 indicates that the
3530 * transaction structure is in a normal state. We have either seen the
3531 * start of the transaction or the last operation we added was not a partial
3532 * operation. If the last operation we added to the transaction was a
3533 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3534 *
3535 * NOTE: skip LRs with 0 data length.
3536 */
3537 STATIC int
3538 xlog_recover_process_data(
3539 struct xlog *log,
3540 struct hlist_head rhash[],
3541 struct xlog_rec_header *rhead,
3542 xfs_caddr_t dp,
3543 int pass)
3544 {
3545 xfs_caddr_t lp;
3546 int num_logops;
3547 xlog_op_header_t *ohead;
3548 xlog_recover_t *trans;
3549 xlog_tid_t tid;
3550 int error;
3551 unsigned long hash;
3552 uint flags;
3553
3554 lp = dp + be32_to_cpu(rhead->h_len);
3555 num_logops = be32_to_cpu(rhead->h_num_logops);
3556
3557 /* check the log format matches our own - else we can't recover */
3558 if (xlog_header_check_recover(log->l_mp, rhead))
3559 return (XFS_ERROR(EIO));
3560
3561 while ((dp < lp) && num_logops) {
3562 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3563 ohead = (xlog_op_header_t *)dp;
3564 dp += sizeof(xlog_op_header_t);
3565 if (ohead->oh_clientid != XFS_TRANSACTION &&
3566 ohead->oh_clientid != XFS_LOG) {
3567 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3568 __func__, ohead->oh_clientid);
3569 ASSERT(0);
3570 return (XFS_ERROR(EIO));
3571 }
3572 tid = be32_to_cpu(ohead->oh_tid);
3573 hash = XLOG_RHASH(tid);
3574 trans = xlog_recover_find_tid(&rhash[hash], tid);
3575 if (trans == NULL) { /* not found; add new tid */
3576 if (ohead->oh_flags & XLOG_START_TRANS)
3577 xlog_recover_new_tid(&rhash[hash], tid,
3578 be64_to_cpu(rhead->h_lsn));
3579 } else {
3580 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3581 xfs_warn(log->l_mp, "%s: bad length 0x%x",
3582 __func__, be32_to_cpu(ohead->oh_len));
3583 WARN_ON(1);
3584 return (XFS_ERROR(EIO));
3585 }
3586 flags = ohead->oh_flags & ~XLOG_END_TRANS;
3587 if (flags & XLOG_WAS_CONT_TRANS)
3588 flags &= ~XLOG_CONTINUE_TRANS;
3589 switch (flags) {
3590 case XLOG_COMMIT_TRANS:
3591 error = xlog_recover_commit_trans(log,
3592 trans, pass);
3593 break;
3594 case XLOG_UNMOUNT_TRANS:
3595 error = xlog_recover_unmount_trans(log, trans);
3596 break;
3597 case XLOG_WAS_CONT_TRANS:
3598 error = xlog_recover_add_to_cont_trans(log,
3599 trans, dp,
3600 be32_to_cpu(ohead->oh_len));
3601 break;
3602 case XLOG_START_TRANS:
3603 xfs_warn(log->l_mp, "%s: bad transaction",
3604 __func__);
3605 ASSERT(0);
3606 error = XFS_ERROR(EIO);
3607 break;
3608 case 0:
3609 case XLOG_CONTINUE_TRANS:
3610 error = xlog_recover_add_to_trans(log, trans,
3611 dp, be32_to_cpu(ohead->oh_len));
3612 break;
3613 default:
3614 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3615 __func__, flags);
3616 ASSERT(0);
3617 error = XFS_ERROR(EIO);
3618 break;
3619 }
3620 if (error) {
3621 xlog_recover_free_trans(trans);
3622 return error;
3623 }
3624 }
3625 dp += be32_to_cpu(ohead->oh_len);
3626 num_logops--;
3627 }
3628 return 0;
3629 }
3630
3631 /*
3632 * Process an extent free intent item that was recovered from
3633 * the log. We need to free the extents that it describes.
3634 */
3635 STATIC int
3636 xlog_recover_process_efi(
3637 xfs_mount_t *mp,
3638 xfs_efi_log_item_t *efip)
3639 {
3640 xfs_efd_log_item_t *efdp;
3641 xfs_trans_t *tp;
3642 int i;
3643 int error = 0;
3644 xfs_extent_t *extp;
3645 xfs_fsblock_t startblock_fsb;
3646
3647 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3648
3649 /*
3650 * First check the validity of the extents described by the
3651 * EFI. If any are bad, then assume that all are bad and
3652 * just toss the EFI.
3653 */
3654 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3655 extp = &(efip->efi_format.efi_extents[i]);
3656 startblock_fsb = XFS_BB_TO_FSB(mp,
3657 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3658 if ((startblock_fsb == 0) ||
3659 (extp->ext_len == 0) ||
3660 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3661 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3662 /*
3663 * This will pull the EFI from the AIL and
3664 * free the memory associated with it.
3665 */
3666 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3667 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3668 return XFS_ERROR(EIO);
3669 }
3670 }
3671
3672 tp = xfs_trans_alloc(mp, 0);
3673 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
3674 if (error)
3675 goto abort_error;
3676 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3677
3678 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3679 extp = &(efip->efi_format.efi_extents[i]);
3680 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3681 if (error)
3682 goto abort_error;
3683 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3684 extp->ext_len);
3685 }
3686
3687 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3688 error = xfs_trans_commit(tp, 0);
3689 return error;
3690
3691 abort_error:
3692 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3693 return error;
3694 }
3695
3696 /*
3697 * When this is called, all of the EFIs which did not have
3698 * corresponding EFDs should be in the AIL. What we do now
3699 * is free the extents associated with each one.
3700 *
3701 * Since we process the EFIs in normal transactions, they
3702 * will be removed at some point after the commit. This prevents
3703 * us from just walking down the list processing each one.
3704 * We'll use a flag in the EFI to skip those that we've already
3705 * processed and use the AIL iteration mechanism's generation
3706 * count to try to speed this up at least a bit.
3707 *
3708 * When we start, we know that the EFIs are the only things in
3709 * the AIL. As we process them, however, other items are added
3710 * to the AIL. Since everything added to the AIL must come after
3711 * everything already in the AIL, we stop processing as soon as
3712 * we see something other than an EFI in the AIL.
3713 */
3714 STATIC int
3715 xlog_recover_process_efis(
3716 struct xlog *log)
3717 {
3718 xfs_log_item_t *lip;
3719 xfs_efi_log_item_t *efip;
3720 int error = 0;
3721 struct xfs_ail_cursor cur;
3722 struct xfs_ail *ailp;
3723
3724 ailp = log->l_ailp;
3725 spin_lock(&ailp->xa_lock);
3726 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3727 while (lip != NULL) {
3728 /*
3729 * We're done when we see something other than an EFI.
3730 * There should be no EFIs left in the AIL now.
3731 */
3732 if (lip->li_type != XFS_LI_EFI) {
3733 #ifdef DEBUG
3734 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3735 ASSERT(lip->li_type != XFS_LI_EFI);
3736 #endif
3737 break;
3738 }
3739
3740 /*
3741 * Skip EFIs that we've already processed.
3742 */
3743 efip = (xfs_efi_log_item_t *)lip;
3744 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3745 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3746 continue;
3747 }
3748
3749 spin_unlock(&ailp->xa_lock);
3750 error = xlog_recover_process_efi(log->l_mp, efip);
3751 spin_lock(&ailp->xa_lock);
3752 if (error)
3753 goto out;
3754 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3755 }
3756 out:
3757 xfs_trans_ail_cursor_done(ailp, &cur);
3758 spin_unlock(&ailp->xa_lock);
3759 return error;
3760 }
3761
3762 /*
3763 * This routine performs a transaction to null out a bad inode pointer
3764 * in an agi unlinked inode hash bucket.
3765 */
3766 STATIC void
3767 xlog_recover_clear_agi_bucket(
3768 xfs_mount_t *mp,
3769 xfs_agnumber_t agno,
3770 int bucket)
3771 {
3772 xfs_trans_t *tp;
3773 xfs_agi_t *agi;
3774 xfs_buf_t *agibp;
3775 int offset;
3776 int error;
3777
3778 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3779 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
3780 if (error)
3781 goto out_abort;
3782
3783 error = xfs_read_agi(mp, tp, agno, &agibp);
3784 if (error)
3785 goto out_abort;
3786
3787 agi = XFS_BUF_TO_AGI(agibp);
3788 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3789 offset = offsetof(xfs_agi_t, agi_unlinked) +
3790 (sizeof(xfs_agino_t) * bucket);
3791 xfs_trans_log_buf(tp, agibp, offset,
3792 (offset + sizeof(xfs_agino_t) - 1));
3793
3794 error = xfs_trans_commit(tp, 0);
3795 if (error)
3796 goto out_error;
3797 return;
3798
3799 out_abort:
3800 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3801 out_error:
3802 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3803 return;
3804 }
3805
3806 STATIC xfs_agino_t
3807 xlog_recover_process_one_iunlink(
3808 struct xfs_mount *mp,
3809 xfs_agnumber_t agno,
3810 xfs_agino_t agino,
3811 int bucket)
3812 {
3813 struct xfs_buf *ibp;
3814 struct xfs_dinode *dip;
3815 struct xfs_inode *ip;
3816 xfs_ino_t ino;
3817 int error;
3818
3819 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3820 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3821 if (error)
3822 goto fail;
3823
3824 /*
3825 * Get the on disk inode to find the next inode in the bucket.
3826 */
3827 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3828 if (error)
3829 goto fail_iput;
3830
3831 ASSERT(ip->i_d.di_nlink == 0);
3832 ASSERT(ip->i_d.di_mode != 0);
3833
3834 /* setup for the next pass */
3835 agino = be32_to_cpu(dip->di_next_unlinked);
3836 xfs_buf_relse(ibp);
3837
3838 /*
3839 * Prevent any DMAPI event from being sent when the reference on
3840 * the inode is dropped.
3841 */
3842 ip->i_d.di_dmevmask = 0;
3843
3844 IRELE(ip);
3845 return agino;
3846
3847 fail_iput:
3848 IRELE(ip);
3849 fail:
3850 /*
3851 * We can't read in the inode this bucket points to, or this inode
3852 * is messed up. Just ditch this bucket of inodes. We will lose
3853 * some inodes and space, but at least we won't hang.
3854 *
3855 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3856 * clear the inode pointer in the bucket.
3857 */
3858 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3859 return NULLAGINO;
3860 }
3861
3862 /*
3863 * xlog_iunlink_recover
3864 *
3865 * This is called during recovery to process any inodes which
3866 * we unlinked but not freed when the system crashed. These
3867 * inodes will be on the lists in the AGI blocks. What we do
3868 * here is scan all the AGIs and fully truncate and free any
3869 * inodes found on the lists. Each inode is removed from the
3870 * lists when it has been fully truncated and is freed. The
3871 * freeing of the inode and its removal from the list must be
3872 * atomic.
3873 */
3874 STATIC void
3875 xlog_recover_process_iunlinks(
3876 struct xlog *log)
3877 {
3878 xfs_mount_t *mp;
3879 xfs_agnumber_t agno;
3880 xfs_agi_t *agi;
3881 xfs_buf_t *agibp;
3882 xfs_agino_t agino;
3883 int bucket;
3884 int error;
3885 uint mp_dmevmask;
3886
3887 mp = log->l_mp;
3888
3889 /*
3890 * Prevent any DMAPI event from being sent while in this function.
3891 */
3892 mp_dmevmask = mp->m_dmevmask;
3893 mp->m_dmevmask = 0;
3894
3895 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3896 /*
3897 * Find the agi for this ag.
3898 */
3899 error = xfs_read_agi(mp, NULL, agno, &agibp);
3900 if (error) {
3901 /*
3902 * AGI is b0rked. Don't process it.
3903 *
3904 * We should probably mark the filesystem as corrupt
3905 * after we've recovered all the ag's we can....
3906 */
3907 continue;
3908 }
3909 /*
3910 * Unlock the buffer so that it can be acquired in the normal
3911 * course of the transaction to truncate and free each inode.
3912 * Because we are not racing with anyone else here for the AGI
3913 * buffer, we don't even need to hold it locked to read the
3914 * initial unlinked bucket entries out of the buffer. We keep
3915 * buffer reference though, so that it stays pinned in memory
3916 * while we need the buffer.
3917 */
3918 agi = XFS_BUF_TO_AGI(agibp);
3919 xfs_buf_unlock(agibp);
3920
3921 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3922 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3923 while (agino != NULLAGINO) {
3924 agino = xlog_recover_process_one_iunlink(mp,
3925 agno, agino, bucket);
3926 }
3927 }
3928 xfs_buf_rele(agibp);
3929 }
3930
3931 mp->m_dmevmask = mp_dmevmask;
3932 }
3933
3934 /*
3935 * Upack the log buffer data and crc check it. If the check fails, issue a
3936 * warning if and only if the CRC in the header is non-zero. This makes the
3937 * check an advisory warning, and the zero CRC check will prevent failure
3938 * warnings from being emitted when upgrading the kernel from one that does not
3939 * add CRCs by default.
3940 *
3941 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3942 * corruption failure
3943 */
3944 STATIC int
3945 xlog_unpack_data_crc(
3946 struct xlog_rec_header *rhead,
3947 xfs_caddr_t dp,
3948 struct xlog *log)
3949 {
3950 __le32 crc;
3951
3952 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3953 if (crc != rhead->h_crc) {
3954 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3955 xfs_alert(log->l_mp,
3956 "log record CRC mismatch: found 0x%x, expected 0x%x.",
3957 le32_to_cpu(rhead->h_crc),
3958 le32_to_cpu(crc));
3959 xfs_hex_dump(dp, 32);
3960 }
3961
3962 /*
3963 * If we've detected a log record corruption, then we can't
3964 * recover past this point. Abort recovery if we are enforcing
3965 * CRC protection by punting an error back up the stack.
3966 */
3967 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3968 return EFSCORRUPTED;
3969 }
3970
3971 return 0;
3972 }
3973
3974 STATIC int
3975 xlog_unpack_data(
3976 struct xlog_rec_header *rhead,
3977 xfs_caddr_t dp,
3978 struct xlog *log)
3979 {
3980 int i, j, k;
3981 int error;
3982
3983 error = xlog_unpack_data_crc(rhead, dp, log);
3984 if (error)
3985 return error;
3986
3987 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3988 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3989 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3990 dp += BBSIZE;
3991 }
3992
3993 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3994 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3995 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3996 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3997 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3998 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3999 dp += BBSIZE;
4000 }
4001 }
4002
4003 return 0;
4004 }
4005
4006 STATIC int
4007 xlog_valid_rec_header(
4008 struct xlog *log,
4009 struct xlog_rec_header *rhead,
4010 xfs_daddr_t blkno)
4011 {
4012 int hlen;
4013
4014 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4015 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4016 XFS_ERRLEVEL_LOW, log->l_mp);
4017 return XFS_ERROR(EFSCORRUPTED);
4018 }
4019 if (unlikely(
4020 (!rhead->h_version ||
4021 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4022 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4023 __func__, be32_to_cpu(rhead->h_version));
4024 return XFS_ERROR(EIO);
4025 }
4026
4027 /* LR body must have data or it wouldn't have been written */
4028 hlen = be32_to_cpu(rhead->h_len);
4029 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4030 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4031 XFS_ERRLEVEL_LOW, log->l_mp);
4032 return XFS_ERROR(EFSCORRUPTED);
4033 }
4034 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4035 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4036 XFS_ERRLEVEL_LOW, log->l_mp);
4037 return XFS_ERROR(EFSCORRUPTED);
4038 }
4039 return 0;
4040 }
4041
4042 /*
4043 * Read the log from tail to head and process the log records found.
4044 * Handle the two cases where the tail and head are in the same cycle
4045 * and where the active portion of the log wraps around the end of
4046 * the physical log separately. The pass parameter is passed through
4047 * to the routines called to process the data and is not looked at
4048 * here.
4049 */
4050 STATIC int
4051 xlog_do_recovery_pass(
4052 struct xlog *log,
4053 xfs_daddr_t head_blk,
4054 xfs_daddr_t tail_blk,
4055 int pass)
4056 {
4057 xlog_rec_header_t *rhead;
4058 xfs_daddr_t blk_no;
4059 xfs_caddr_t offset;
4060 xfs_buf_t *hbp, *dbp;
4061 int error = 0, h_size;
4062 int bblks, split_bblks;
4063 int hblks, split_hblks, wrapped_hblks;
4064 struct hlist_head rhash[XLOG_RHASH_SIZE];
4065
4066 ASSERT(head_blk != tail_blk);
4067
4068 /*
4069 * Read the header of the tail block and get the iclog buffer size from
4070 * h_size. Use this to tell how many sectors make up the log header.
4071 */
4072 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4073 /*
4074 * When using variable length iclogs, read first sector of
4075 * iclog header and extract the header size from it. Get a
4076 * new hbp that is the correct size.
4077 */
4078 hbp = xlog_get_bp(log, 1);
4079 if (!hbp)
4080 return ENOMEM;
4081
4082 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4083 if (error)
4084 goto bread_err1;
4085
4086 rhead = (xlog_rec_header_t *)offset;
4087 error = xlog_valid_rec_header(log, rhead, tail_blk);
4088 if (error)
4089 goto bread_err1;
4090 h_size = be32_to_cpu(rhead->h_size);
4091 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4092 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4093 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4094 if (h_size % XLOG_HEADER_CYCLE_SIZE)
4095 hblks++;
4096 xlog_put_bp(hbp);
4097 hbp = xlog_get_bp(log, hblks);
4098 } else {
4099 hblks = 1;
4100 }
4101 } else {
4102 ASSERT(log->l_sectBBsize == 1);
4103 hblks = 1;
4104 hbp = xlog_get_bp(log, 1);
4105 h_size = XLOG_BIG_RECORD_BSIZE;
4106 }
4107
4108 if (!hbp)
4109 return ENOMEM;
4110 dbp = xlog_get_bp(log, BTOBB(h_size));
4111 if (!dbp) {
4112 xlog_put_bp(hbp);
4113 return ENOMEM;
4114 }
4115
4116 memset(rhash, 0, sizeof(rhash));
4117 if (tail_blk <= head_blk) {
4118 for (blk_no = tail_blk; blk_no < head_blk; ) {
4119 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4120 if (error)
4121 goto bread_err2;
4122
4123 rhead = (xlog_rec_header_t *)offset;
4124 error = xlog_valid_rec_header(log, rhead, blk_no);
4125 if (error)
4126 goto bread_err2;
4127
4128 /* blocks in data section */
4129 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4130 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
4131 &offset);
4132 if (error)
4133 goto bread_err2;
4134
4135 error = xlog_unpack_data(rhead, offset, log);
4136 if (error)
4137 goto bread_err2;
4138
4139 error = xlog_recover_process_data(log,
4140 rhash, rhead, offset, pass);
4141 if (error)
4142 goto bread_err2;
4143 blk_no += bblks + hblks;
4144 }
4145 } else {
4146 /*
4147 * Perform recovery around the end of the physical log.
4148 * When the head is not on the same cycle number as the tail,
4149 * we can't do a sequential recovery as above.
4150 */
4151 blk_no = tail_blk;
4152 while (blk_no < log->l_logBBsize) {
4153 /*
4154 * Check for header wrapping around physical end-of-log
4155 */
4156 offset = hbp->b_addr;
4157 split_hblks = 0;
4158 wrapped_hblks = 0;
4159 if (blk_no + hblks <= log->l_logBBsize) {
4160 /* Read header in one read */
4161 error = xlog_bread(log, blk_no, hblks, hbp,
4162 &offset);
4163 if (error)
4164 goto bread_err2;
4165 } else {
4166 /* This LR is split across physical log end */
4167 if (blk_no != log->l_logBBsize) {
4168 /* some data before physical log end */
4169 ASSERT(blk_no <= INT_MAX);
4170 split_hblks = log->l_logBBsize - (int)blk_no;
4171 ASSERT(split_hblks > 0);
4172 error = xlog_bread(log, blk_no,
4173 split_hblks, hbp,
4174 &offset);
4175 if (error)
4176 goto bread_err2;
4177 }
4178
4179 /*
4180 * Note: this black magic still works with
4181 * large sector sizes (non-512) only because:
4182 * - we increased the buffer size originally
4183 * by 1 sector giving us enough extra space
4184 * for the second read;
4185 * - the log start is guaranteed to be sector
4186 * aligned;
4187 * - we read the log end (LR header start)
4188 * _first_, then the log start (LR header end)
4189 * - order is important.
4190 */
4191 wrapped_hblks = hblks - split_hblks;
4192 error = xlog_bread_offset(log, 0,
4193 wrapped_hblks, hbp,
4194 offset + BBTOB(split_hblks));
4195 if (error)
4196 goto bread_err2;
4197 }
4198 rhead = (xlog_rec_header_t *)offset;
4199 error = xlog_valid_rec_header(log, rhead,
4200 split_hblks ? blk_no : 0);
4201 if (error)
4202 goto bread_err2;
4203
4204 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4205 blk_no += hblks;
4206
4207 /* Read in data for log record */
4208 if (blk_no + bblks <= log->l_logBBsize) {
4209 error = xlog_bread(log, blk_no, bblks, dbp,
4210 &offset);
4211 if (error)
4212 goto bread_err2;
4213 } else {
4214 /* This log record is split across the
4215 * physical end of log */
4216 offset = dbp->b_addr;
4217 split_bblks = 0;
4218 if (blk_no != log->l_logBBsize) {
4219 /* some data is before the physical
4220 * end of log */
4221 ASSERT(!wrapped_hblks);
4222 ASSERT(blk_no <= INT_MAX);
4223 split_bblks =
4224 log->l_logBBsize - (int)blk_no;
4225 ASSERT(split_bblks > 0);
4226 error = xlog_bread(log, blk_no,
4227 split_bblks, dbp,
4228 &offset);
4229 if (error)
4230 goto bread_err2;
4231 }
4232
4233 /*
4234 * Note: this black magic still works with
4235 * large sector sizes (non-512) only because:
4236 * - we increased the buffer size originally
4237 * by 1 sector giving us enough extra space
4238 * for the second read;
4239 * - the log start is guaranteed to be sector
4240 * aligned;
4241 * - we read the log end (LR header start)
4242 * _first_, then the log start (LR header end)
4243 * - order is important.
4244 */
4245 error = xlog_bread_offset(log, 0,
4246 bblks - split_bblks, dbp,
4247 offset + BBTOB(split_bblks));
4248 if (error)
4249 goto bread_err2;
4250 }
4251
4252 error = xlog_unpack_data(rhead, offset, log);
4253 if (error)
4254 goto bread_err2;
4255
4256 error = xlog_recover_process_data(log, rhash,
4257 rhead, offset, pass);
4258 if (error)
4259 goto bread_err2;
4260 blk_no += bblks;
4261 }
4262
4263 ASSERT(blk_no >= log->l_logBBsize);
4264 blk_no -= log->l_logBBsize;
4265
4266 /* read first part of physical log */
4267 while (blk_no < head_blk) {
4268 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4269 if (error)
4270 goto bread_err2;
4271
4272 rhead = (xlog_rec_header_t *)offset;
4273 error = xlog_valid_rec_header(log, rhead, blk_no);
4274 if (error)
4275 goto bread_err2;
4276
4277 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4278 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4279 &offset);
4280 if (error)
4281 goto bread_err2;
4282
4283 error = xlog_unpack_data(rhead, offset, log);
4284 if (error)
4285 goto bread_err2;
4286
4287 error = xlog_recover_process_data(log, rhash,
4288 rhead, offset, pass);
4289 if (error)
4290 goto bread_err2;
4291 blk_no += bblks + hblks;
4292 }
4293 }
4294
4295 bread_err2:
4296 xlog_put_bp(dbp);
4297 bread_err1:
4298 xlog_put_bp(hbp);
4299 return error;
4300 }
4301
4302 /*
4303 * Do the recovery of the log. We actually do this in two phases.
4304 * The two passes are necessary in order to implement the function
4305 * of cancelling a record written into the log. The first pass
4306 * determines those things which have been cancelled, and the
4307 * second pass replays log items normally except for those which
4308 * have been cancelled. The handling of the replay and cancellations
4309 * takes place in the log item type specific routines.
4310 *
4311 * The table of items which have cancel records in the log is allocated
4312 * and freed at this level, since only here do we know when all of
4313 * the log recovery has been completed.
4314 */
4315 STATIC int
4316 xlog_do_log_recovery(
4317 struct xlog *log,
4318 xfs_daddr_t head_blk,
4319 xfs_daddr_t tail_blk)
4320 {
4321 int error, i;
4322
4323 ASSERT(head_blk != tail_blk);
4324
4325 /*
4326 * First do a pass to find all of the cancelled buf log items.
4327 * Store them in the buf_cancel_table for use in the second pass.
4328 */
4329 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4330 sizeof(struct list_head),
4331 KM_SLEEP);
4332 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4333 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4334
4335 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4336 XLOG_RECOVER_PASS1);
4337 if (error != 0) {
4338 kmem_free(log->l_buf_cancel_table);
4339 log->l_buf_cancel_table = NULL;
4340 return error;
4341 }
4342 /*
4343 * Then do a second pass to actually recover the items in the log.
4344 * When it is complete free the table of buf cancel items.
4345 */
4346 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4347 XLOG_RECOVER_PASS2);
4348 #ifdef DEBUG
4349 if (!error) {
4350 int i;
4351
4352 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4353 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4354 }
4355 #endif /* DEBUG */
4356
4357 kmem_free(log->l_buf_cancel_table);
4358 log->l_buf_cancel_table = NULL;
4359
4360 return error;
4361 }
4362
4363 /*
4364 * Do the actual recovery
4365 */
4366 STATIC int
4367 xlog_do_recover(
4368 struct xlog *log,
4369 xfs_daddr_t head_blk,
4370 xfs_daddr_t tail_blk)
4371 {
4372 int error;
4373 xfs_buf_t *bp;
4374 xfs_sb_t *sbp;
4375
4376 /*
4377 * First replay the images in the log.
4378 */
4379 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4380 if (error)
4381 return error;
4382
4383 /*
4384 * If IO errors happened during recovery, bail out.
4385 */
4386 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4387 return (EIO);
4388 }
4389
4390 /*
4391 * We now update the tail_lsn since much of the recovery has completed
4392 * and there may be space available to use. If there were no extent
4393 * or iunlinks, we can free up the entire log and set the tail_lsn to
4394 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4395 * lsn of the last known good LR on disk. If there are extent frees
4396 * or iunlinks they will have some entries in the AIL; so we look at
4397 * the AIL to determine how to set the tail_lsn.
4398 */
4399 xlog_assign_tail_lsn(log->l_mp);
4400
4401 /*
4402 * Now that we've finished replaying all buffer and inode
4403 * updates, re-read in the superblock and reverify it.
4404 */
4405 bp = xfs_getsb(log->l_mp, 0);
4406 XFS_BUF_UNDONE(bp);
4407 ASSERT(!(XFS_BUF_ISWRITE(bp)));
4408 XFS_BUF_READ(bp);
4409 XFS_BUF_UNASYNC(bp);
4410 bp->b_ops = &xfs_sb_buf_ops;
4411 xfsbdstrat(log->l_mp, bp);
4412 error = xfs_buf_iowait(bp);
4413 if (error) {
4414 xfs_buf_ioerror_alert(bp, __func__);
4415 ASSERT(0);
4416 xfs_buf_relse(bp);
4417 return error;
4418 }
4419
4420 /* Convert superblock from on-disk format */
4421 sbp = &log->l_mp->m_sb;
4422 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4423 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4424 ASSERT(xfs_sb_good_version(sbp));
4425 xfs_buf_relse(bp);
4426
4427 /* We've re-read the superblock so re-initialize per-cpu counters */
4428 xfs_icsb_reinit_counters(log->l_mp);
4429
4430 xlog_recover_check_summary(log);
4431
4432 /* Normal transactions can now occur */
4433 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4434 return 0;
4435 }
4436
4437 /*
4438 * Perform recovery and re-initialize some log variables in xlog_find_tail.
4439 *
4440 * Return error or zero.
4441 */
4442 int
4443 xlog_recover(
4444 struct xlog *log)
4445 {
4446 xfs_daddr_t head_blk, tail_blk;
4447 int error;
4448
4449 /* find the tail of the log */
4450 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4451 return error;
4452
4453 if (tail_blk != head_blk) {
4454 /* There used to be a comment here:
4455 *
4456 * disallow recovery on read-only mounts. note -- mount
4457 * checks for ENOSPC and turns it into an intelligent
4458 * error message.
4459 * ...but this is no longer true. Now, unless you specify
4460 * NORECOVERY (in which case this function would never be
4461 * called), we just go ahead and recover. We do this all
4462 * under the vfs layer, so we can get away with it unless
4463 * the device itself is read-only, in which case we fail.
4464 */
4465 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4466 return error;
4467 }
4468
4469 /*
4470 * Version 5 superblock log feature mask validation. We know the
4471 * log is dirty so check if there are any unknown log features
4472 * in what we need to recover. If there are unknown features
4473 * (e.g. unsupported transactions, then simply reject the
4474 * attempt at recovery before touching anything.
4475 */
4476 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4477 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4478 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4479 xfs_warn(log->l_mp,
4480 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4481 "The log can not be fully and/or safely recovered by this kernel.\n"
4482 "Please recover the log on a kernel that supports the unknown features.",
4483 (log->l_mp->m_sb.sb_features_log_incompat &
4484 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4485 return EINVAL;
4486 }
4487
4488 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4489 log->l_mp->m_logname ? log->l_mp->m_logname
4490 : "internal");
4491
4492 error = xlog_do_recover(log, head_blk, tail_blk);
4493 log->l_flags |= XLOG_RECOVERY_NEEDED;
4494 }
4495 return error;
4496 }
4497
4498 /*
4499 * In the first part of recovery we replay inodes and buffers and build
4500 * up the list of extent free items which need to be processed. Here
4501 * we process the extent free items and clean up the on disk unlinked
4502 * inode lists. This is separated from the first part of recovery so
4503 * that the root and real-time bitmap inodes can be read in from disk in
4504 * between the two stages. This is necessary so that we can free space
4505 * in the real-time portion of the file system.
4506 */
4507 int
4508 xlog_recover_finish(
4509 struct xlog *log)
4510 {
4511 /*
4512 * Now we're ready to do the transactions needed for the
4513 * rest of recovery. Start with completing all the extent
4514 * free intent records and then process the unlinked inode
4515 * lists. At this point, we essentially run in normal mode
4516 * except that we're still performing recovery actions
4517 * rather than accepting new requests.
4518 */
4519 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4520 int error;
4521 error = xlog_recover_process_efis(log);
4522 if (error) {
4523 xfs_alert(log->l_mp, "Failed to recover EFIs");
4524 return error;
4525 }
4526 /*
4527 * Sync the log to get all the EFIs out of the AIL.
4528 * This isn't absolutely necessary, but it helps in
4529 * case the unlink transactions would have problems
4530 * pushing the EFIs out of the way.
4531 */
4532 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4533
4534 xlog_recover_process_iunlinks(log);
4535
4536 xlog_recover_check_summary(log);
4537
4538 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4539 log->l_mp->m_logname ? log->l_mp->m_logname
4540 : "internal");
4541 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4542 } else {
4543 xfs_info(log->l_mp, "Ending clean mount");
4544 }
4545 return 0;
4546 }
4547
4548
4549 #if defined(DEBUG)
4550 /*
4551 * Read all of the agf and agi counters and check that they
4552 * are consistent with the superblock counters.
4553 */
4554 void
4555 xlog_recover_check_summary(
4556 struct xlog *log)
4557 {
4558 xfs_mount_t *mp;
4559 xfs_agf_t *agfp;
4560 xfs_buf_t *agfbp;
4561 xfs_buf_t *agibp;
4562 xfs_agnumber_t agno;
4563 __uint64_t freeblks;
4564 __uint64_t itotal;
4565 __uint64_t ifree;
4566 int error;
4567
4568 mp = log->l_mp;
4569
4570 freeblks = 0LL;
4571 itotal = 0LL;
4572 ifree = 0LL;
4573 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4574 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4575 if (error) {
4576 xfs_alert(mp, "%s agf read failed agno %d error %d",
4577 __func__, agno, error);
4578 } else {
4579 agfp = XFS_BUF_TO_AGF(agfbp);
4580 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4581 be32_to_cpu(agfp->agf_flcount);
4582 xfs_buf_relse(agfbp);
4583 }
4584
4585 error = xfs_read_agi(mp, NULL, agno, &agibp);
4586 if (error) {
4587 xfs_alert(mp, "%s agi read failed agno %d error %d",
4588 __func__, agno, error);
4589 } else {
4590 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
4591
4592 itotal += be32_to_cpu(agi->agi_count);
4593 ifree += be32_to_cpu(agi->agi_freecount);
4594 xfs_buf_relse(agibp);
4595 }
4596 }
4597 }
4598 #endif /* DEBUG */
This page took 0.163678 seconds and 4 git commands to generate.