xfs: Remove the macro XFS_BUF_ERROR and family
[deliverable/linux.git] / fs / xfs / xfs_log_recover.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_log_priv.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_log_recover.h"
40 #include "xfs_extfree_item.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_quota.h"
43 #include "xfs_rw.h"
44 #include "xfs_utils.h"
45 #include "xfs_trace.h"
46
47 STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
48 STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
49 #if defined(DEBUG)
50 STATIC void xlog_recover_check_summary(xlog_t *);
51 #else
52 #define xlog_recover_check_summary(log)
53 #endif
54
55 /*
56 * This structure is used during recovery to record the buf log items which
57 * have been canceled and should not be replayed.
58 */
59 struct xfs_buf_cancel {
60 xfs_daddr_t bc_blkno;
61 uint bc_len;
62 int bc_refcount;
63 struct list_head bc_list;
64 };
65
66 /*
67 * Sector aligned buffer routines for buffer create/read/write/access
68 */
69
70 /*
71 * Verify the given count of basic blocks is valid number of blocks
72 * to specify for an operation involving the given XFS log buffer.
73 * Returns nonzero if the count is valid, 0 otherwise.
74 */
75
76 static inline int
77 xlog_buf_bbcount_valid(
78 xlog_t *log,
79 int bbcount)
80 {
81 return bbcount > 0 && bbcount <= log->l_logBBsize;
82 }
83
84 /*
85 * Allocate a buffer to hold log data. The buffer needs to be able
86 * to map to a range of nbblks basic blocks at any valid (basic
87 * block) offset within the log.
88 */
89 STATIC xfs_buf_t *
90 xlog_get_bp(
91 xlog_t *log,
92 int nbblks)
93 {
94 struct xfs_buf *bp;
95
96 if (!xlog_buf_bbcount_valid(log, nbblks)) {
97 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
98 nbblks);
99 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
100 return NULL;
101 }
102
103 /*
104 * We do log I/O in units of log sectors (a power-of-2
105 * multiple of the basic block size), so we round up the
106 * requested size to accommodate the basic blocks required
107 * for complete log sectors.
108 *
109 * In addition, the buffer may be used for a non-sector-
110 * aligned block offset, in which case an I/O of the
111 * requested size could extend beyond the end of the
112 * buffer. If the requested size is only 1 basic block it
113 * will never straddle a sector boundary, so this won't be
114 * an issue. Nor will this be a problem if the log I/O is
115 * done in basic blocks (sector size 1). But otherwise we
116 * extend the buffer by one extra log sector to ensure
117 * there's space to accommodate this possibility.
118 */
119 if (nbblks > 1 && log->l_sectBBsize > 1)
120 nbblks += log->l_sectBBsize;
121 nbblks = round_up(nbblks, log->l_sectBBsize);
122
123 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0);
124 if (bp)
125 xfs_buf_unlock(bp);
126 return bp;
127 }
128
129 STATIC void
130 xlog_put_bp(
131 xfs_buf_t *bp)
132 {
133 xfs_buf_free(bp);
134 }
135
136 /*
137 * Return the address of the start of the given block number's data
138 * in a log buffer. The buffer covers a log sector-aligned region.
139 */
140 STATIC xfs_caddr_t
141 xlog_align(
142 xlog_t *log,
143 xfs_daddr_t blk_no,
144 int nbblks,
145 xfs_buf_t *bp)
146 {
147 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
148
149 ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
150 return XFS_BUF_PTR(bp) + BBTOB(offset);
151 }
152
153
154 /*
155 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
156 */
157 STATIC int
158 xlog_bread_noalign(
159 xlog_t *log,
160 xfs_daddr_t blk_no,
161 int nbblks,
162 xfs_buf_t *bp)
163 {
164 int error;
165
166 if (!xlog_buf_bbcount_valid(log, nbblks)) {
167 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
168 nbblks);
169 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
170 return EFSCORRUPTED;
171 }
172
173 blk_no = round_down(blk_no, log->l_sectBBsize);
174 nbblks = round_up(nbblks, log->l_sectBBsize);
175
176 ASSERT(nbblks > 0);
177 ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
178
179 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
180 XFS_BUF_READ(bp);
181 XFS_BUF_BUSY(bp);
182 XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
183 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
184
185 xfsbdstrat(log->l_mp, bp);
186 error = xfs_buf_iowait(bp);
187 if (error)
188 xfs_ioerror_alert("xlog_bread", log->l_mp,
189 bp, XFS_BUF_ADDR(bp));
190 return error;
191 }
192
193 STATIC int
194 xlog_bread(
195 xlog_t *log,
196 xfs_daddr_t blk_no,
197 int nbblks,
198 xfs_buf_t *bp,
199 xfs_caddr_t *offset)
200 {
201 int error;
202
203 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
204 if (error)
205 return error;
206
207 *offset = xlog_align(log, blk_no, nbblks, bp);
208 return 0;
209 }
210
211 /*
212 * Read at an offset into the buffer. Returns with the buffer in it's original
213 * state regardless of the result of the read.
214 */
215 STATIC int
216 xlog_bread_offset(
217 xlog_t *log,
218 xfs_daddr_t blk_no, /* block to read from */
219 int nbblks, /* blocks to read */
220 xfs_buf_t *bp,
221 xfs_caddr_t offset)
222 {
223 xfs_caddr_t orig_offset = XFS_BUF_PTR(bp);
224 int orig_len = bp->b_buffer_length;
225 int error, error2;
226
227 error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks));
228 if (error)
229 return error;
230
231 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
232
233 /* must reset buffer pointer even on error */
234 error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len);
235 if (error)
236 return error;
237 return error2;
238 }
239
240 /*
241 * Write out the buffer at the given block for the given number of blocks.
242 * The buffer is kept locked across the write and is returned locked.
243 * This can only be used for synchronous log writes.
244 */
245 STATIC int
246 xlog_bwrite(
247 xlog_t *log,
248 xfs_daddr_t blk_no,
249 int nbblks,
250 xfs_buf_t *bp)
251 {
252 int error;
253
254 if (!xlog_buf_bbcount_valid(log, nbblks)) {
255 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
256 nbblks);
257 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
258 return EFSCORRUPTED;
259 }
260
261 blk_no = round_down(blk_no, log->l_sectBBsize);
262 nbblks = round_up(nbblks, log->l_sectBBsize);
263
264 ASSERT(nbblks > 0);
265 ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
266
267 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
268 XFS_BUF_ZEROFLAGS(bp);
269 XFS_BUF_BUSY(bp);
270 XFS_BUF_HOLD(bp);
271 xfs_buf_lock(bp);
272 XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
273 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
274
275 if ((error = xfs_bwrite(log->l_mp, bp)))
276 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
277 bp, XFS_BUF_ADDR(bp));
278 return error;
279 }
280
281 #ifdef DEBUG
282 /*
283 * dump debug superblock and log record information
284 */
285 STATIC void
286 xlog_header_check_dump(
287 xfs_mount_t *mp,
288 xlog_rec_header_t *head)
289 {
290 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
291 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
292 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
293 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
294 }
295 #else
296 #define xlog_header_check_dump(mp, head)
297 #endif
298
299 /*
300 * check log record header for recovery
301 */
302 STATIC int
303 xlog_header_check_recover(
304 xfs_mount_t *mp,
305 xlog_rec_header_t *head)
306 {
307 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
308
309 /*
310 * IRIX doesn't write the h_fmt field and leaves it zeroed
311 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
312 * a dirty log created in IRIX.
313 */
314 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
315 xfs_warn(mp,
316 "dirty log written in incompatible format - can't recover");
317 xlog_header_check_dump(mp, head);
318 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
319 XFS_ERRLEVEL_HIGH, mp);
320 return XFS_ERROR(EFSCORRUPTED);
321 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
322 xfs_warn(mp,
323 "dirty log entry has mismatched uuid - can't recover");
324 xlog_header_check_dump(mp, head);
325 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
326 XFS_ERRLEVEL_HIGH, mp);
327 return XFS_ERROR(EFSCORRUPTED);
328 }
329 return 0;
330 }
331
332 /*
333 * read the head block of the log and check the header
334 */
335 STATIC int
336 xlog_header_check_mount(
337 xfs_mount_t *mp,
338 xlog_rec_header_t *head)
339 {
340 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
341
342 if (uuid_is_nil(&head->h_fs_uuid)) {
343 /*
344 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
345 * h_fs_uuid is nil, we assume this log was last mounted
346 * by IRIX and continue.
347 */
348 xfs_warn(mp, "nil uuid in log - IRIX style log");
349 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
350 xfs_warn(mp, "log has mismatched uuid - can't recover");
351 xlog_header_check_dump(mp, head);
352 XFS_ERROR_REPORT("xlog_header_check_mount",
353 XFS_ERRLEVEL_HIGH, mp);
354 return XFS_ERROR(EFSCORRUPTED);
355 }
356 return 0;
357 }
358
359 STATIC void
360 xlog_recover_iodone(
361 struct xfs_buf *bp)
362 {
363 if (bp->b_error) {
364 /*
365 * We're not going to bother about retrying
366 * this during recovery. One strike!
367 */
368 xfs_ioerror_alert("xlog_recover_iodone",
369 bp->b_target->bt_mount, bp,
370 XFS_BUF_ADDR(bp));
371 xfs_force_shutdown(bp->b_target->bt_mount,
372 SHUTDOWN_META_IO_ERROR);
373 }
374 bp->b_iodone = NULL;
375 xfs_buf_ioend(bp, 0);
376 }
377
378 /*
379 * This routine finds (to an approximation) the first block in the physical
380 * log which contains the given cycle. It uses a binary search algorithm.
381 * Note that the algorithm can not be perfect because the disk will not
382 * necessarily be perfect.
383 */
384 STATIC int
385 xlog_find_cycle_start(
386 xlog_t *log,
387 xfs_buf_t *bp,
388 xfs_daddr_t first_blk,
389 xfs_daddr_t *last_blk,
390 uint cycle)
391 {
392 xfs_caddr_t offset;
393 xfs_daddr_t mid_blk;
394 xfs_daddr_t end_blk;
395 uint mid_cycle;
396 int error;
397
398 end_blk = *last_blk;
399 mid_blk = BLK_AVG(first_blk, end_blk);
400 while (mid_blk != first_blk && mid_blk != end_blk) {
401 error = xlog_bread(log, mid_blk, 1, bp, &offset);
402 if (error)
403 return error;
404 mid_cycle = xlog_get_cycle(offset);
405 if (mid_cycle == cycle)
406 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
407 else
408 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
409 mid_blk = BLK_AVG(first_blk, end_blk);
410 }
411 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
412 (mid_blk == end_blk && mid_blk-1 == first_blk));
413
414 *last_blk = end_blk;
415
416 return 0;
417 }
418
419 /*
420 * Check that a range of blocks does not contain stop_on_cycle_no.
421 * Fill in *new_blk with the block offset where such a block is
422 * found, or with -1 (an invalid block number) if there is no such
423 * block in the range. The scan needs to occur from front to back
424 * and the pointer into the region must be updated since a later
425 * routine will need to perform another test.
426 */
427 STATIC int
428 xlog_find_verify_cycle(
429 xlog_t *log,
430 xfs_daddr_t start_blk,
431 int nbblks,
432 uint stop_on_cycle_no,
433 xfs_daddr_t *new_blk)
434 {
435 xfs_daddr_t i, j;
436 uint cycle;
437 xfs_buf_t *bp;
438 xfs_daddr_t bufblks;
439 xfs_caddr_t buf = NULL;
440 int error = 0;
441
442 /*
443 * Greedily allocate a buffer big enough to handle the full
444 * range of basic blocks we'll be examining. If that fails,
445 * try a smaller size. We need to be able to read at least
446 * a log sector, or we're out of luck.
447 */
448 bufblks = 1 << ffs(nbblks);
449 while (!(bp = xlog_get_bp(log, bufblks))) {
450 bufblks >>= 1;
451 if (bufblks < log->l_sectBBsize)
452 return ENOMEM;
453 }
454
455 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
456 int bcount;
457
458 bcount = min(bufblks, (start_blk + nbblks - i));
459
460 error = xlog_bread(log, i, bcount, bp, &buf);
461 if (error)
462 goto out;
463
464 for (j = 0; j < bcount; j++) {
465 cycle = xlog_get_cycle(buf);
466 if (cycle == stop_on_cycle_no) {
467 *new_blk = i+j;
468 goto out;
469 }
470
471 buf += BBSIZE;
472 }
473 }
474
475 *new_blk = -1;
476
477 out:
478 xlog_put_bp(bp);
479 return error;
480 }
481
482 /*
483 * Potentially backup over partial log record write.
484 *
485 * In the typical case, last_blk is the number of the block directly after
486 * a good log record. Therefore, we subtract one to get the block number
487 * of the last block in the given buffer. extra_bblks contains the number
488 * of blocks we would have read on a previous read. This happens when the
489 * last log record is split over the end of the physical log.
490 *
491 * extra_bblks is the number of blocks potentially verified on a previous
492 * call to this routine.
493 */
494 STATIC int
495 xlog_find_verify_log_record(
496 xlog_t *log,
497 xfs_daddr_t start_blk,
498 xfs_daddr_t *last_blk,
499 int extra_bblks)
500 {
501 xfs_daddr_t i;
502 xfs_buf_t *bp;
503 xfs_caddr_t offset = NULL;
504 xlog_rec_header_t *head = NULL;
505 int error = 0;
506 int smallmem = 0;
507 int num_blks = *last_blk - start_blk;
508 int xhdrs;
509
510 ASSERT(start_blk != 0 || *last_blk != start_blk);
511
512 if (!(bp = xlog_get_bp(log, num_blks))) {
513 if (!(bp = xlog_get_bp(log, 1)))
514 return ENOMEM;
515 smallmem = 1;
516 } else {
517 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
518 if (error)
519 goto out;
520 offset += ((num_blks - 1) << BBSHIFT);
521 }
522
523 for (i = (*last_blk) - 1; i >= 0; i--) {
524 if (i < start_blk) {
525 /* valid log record not found */
526 xfs_warn(log->l_mp,
527 "Log inconsistent (didn't find previous header)");
528 ASSERT(0);
529 error = XFS_ERROR(EIO);
530 goto out;
531 }
532
533 if (smallmem) {
534 error = xlog_bread(log, i, 1, bp, &offset);
535 if (error)
536 goto out;
537 }
538
539 head = (xlog_rec_header_t *)offset;
540
541 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
542 break;
543
544 if (!smallmem)
545 offset -= BBSIZE;
546 }
547
548 /*
549 * We hit the beginning of the physical log & still no header. Return
550 * to caller. If caller can handle a return of -1, then this routine
551 * will be called again for the end of the physical log.
552 */
553 if (i == -1) {
554 error = -1;
555 goto out;
556 }
557
558 /*
559 * We have the final block of the good log (the first block
560 * of the log record _before_ the head. So we check the uuid.
561 */
562 if ((error = xlog_header_check_mount(log->l_mp, head)))
563 goto out;
564
565 /*
566 * We may have found a log record header before we expected one.
567 * last_blk will be the 1st block # with a given cycle #. We may end
568 * up reading an entire log record. In this case, we don't want to
569 * reset last_blk. Only when last_blk points in the middle of a log
570 * record do we update last_blk.
571 */
572 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
573 uint h_size = be32_to_cpu(head->h_size);
574
575 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
576 if (h_size % XLOG_HEADER_CYCLE_SIZE)
577 xhdrs++;
578 } else {
579 xhdrs = 1;
580 }
581
582 if (*last_blk - i + extra_bblks !=
583 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
584 *last_blk = i;
585
586 out:
587 xlog_put_bp(bp);
588 return error;
589 }
590
591 /*
592 * Head is defined to be the point of the log where the next log write
593 * write could go. This means that incomplete LR writes at the end are
594 * eliminated when calculating the head. We aren't guaranteed that previous
595 * LR have complete transactions. We only know that a cycle number of
596 * current cycle number -1 won't be present in the log if we start writing
597 * from our current block number.
598 *
599 * last_blk contains the block number of the first block with a given
600 * cycle number.
601 *
602 * Return: zero if normal, non-zero if error.
603 */
604 STATIC int
605 xlog_find_head(
606 xlog_t *log,
607 xfs_daddr_t *return_head_blk)
608 {
609 xfs_buf_t *bp;
610 xfs_caddr_t offset;
611 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
612 int num_scan_bblks;
613 uint first_half_cycle, last_half_cycle;
614 uint stop_on_cycle;
615 int error, log_bbnum = log->l_logBBsize;
616
617 /* Is the end of the log device zeroed? */
618 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
619 *return_head_blk = first_blk;
620
621 /* Is the whole lot zeroed? */
622 if (!first_blk) {
623 /* Linux XFS shouldn't generate totally zeroed logs -
624 * mkfs etc write a dummy unmount record to a fresh
625 * log so we can store the uuid in there
626 */
627 xfs_warn(log->l_mp, "totally zeroed log");
628 }
629
630 return 0;
631 } else if (error) {
632 xfs_warn(log->l_mp, "empty log check failed");
633 return error;
634 }
635
636 first_blk = 0; /* get cycle # of 1st block */
637 bp = xlog_get_bp(log, 1);
638 if (!bp)
639 return ENOMEM;
640
641 error = xlog_bread(log, 0, 1, bp, &offset);
642 if (error)
643 goto bp_err;
644
645 first_half_cycle = xlog_get_cycle(offset);
646
647 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
648 error = xlog_bread(log, last_blk, 1, bp, &offset);
649 if (error)
650 goto bp_err;
651
652 last_half_cycle = xlog_get_cycle(offset);
653 ASSERT(last_half_cycle != 0);
654
655 /*
656 * If the 1st half cycle number is equal to the last half cycle number,
657 * then the entire log is stamped with the same cycle number. In this
658 * case, head_blk can't be set to zero (which makes sense). The below
659 * math doesn't work out properly with head_blk equal to zero. Instead,
660 * we set it to log_bbnum which is an invalid block number, but this
661 * value makes the math correct. If head_blk doesn't changed through
662 * all the tests below, *head_blk is set to zero at the very end rather
663 * than log_bbnum. In a sense, log_bbnum and zero are the same block
664 * in a circular file.
665 */
666 if (first_half_cycle == last_half_cycle) {
667 /*
668 * In this case we believe that the entire log should have
669 * cycle number last_half_cycle. We need to scan backwards
670 * from the end verifying that there are no holes still
671 * containing last_half_cycle - 1. If we find such a hole,
672 * then the start of that hole will be the new head. The
673 * simple case looks like
674 * x | x ... | x - 1 | x
675 * Another case that fits this picture would be
676 * x | x + 1 | x ... | x
677 * In this case the head really is somewhere at the end of the
678 * log, as one of the latest writes at the beginning was
679 * incomplete.
680 * One more case is
681 * x | x + 1 | x ... | x - 1 | x
682 * This is really the combination of the above two cases, and
683 * the head has to end up at the start of the x-1 hole at the
684 * end of the log.
685 *
686 * In the 256k log case, we will read from the beginning to the
687 * end of the log and search for cycle numbers equal to x-1.
688 * We don't worry about the x+1 blocks that we encounter,
689 * because we know that they cannot be the head since the log
690 * started with x.
691 */
692 head_blk = log_bbnum;
693 stop_on_cycle = last_half_cycle - 1;
694 } else {
695 /*
696 * In this case we want to find the first block with cycle
697 * number matching last_half_cycle. We expect the log to be
698 * some variation on
699 * x + 1 ... | x ... | x
700 * The first block with cycle number x (last_half_cycle) will
701 * be where the new head belongs. First we do a binary search
702 * for the first occurrence of last_half_cycle. The binary
703 * search may not be totally accurate, so then we scan back
704 * from there looking for occurrences of last_half_cycle before
705 * us. If that backwards scan wraps around the beginning of
706 * the log, then we look for occurrences of last_half_cycle - 1
707 * at the end of the log. The cases we're looking for look
708 * like
709 * v binary search stopped here
710 * x + 1 ... | x | x + 1 | x ... | x
711 * ^ but we want to locate this spot
712 * or
713 * <---------> less than scan distance
714 * x + 1 ... | x ... | x - 1 | x
715 * ^ we want to locate this spot
716 */
717 stop_on_cycle = last_half_cycle;
718 if ((error = xlog_find_cycle_start(log, bp, first_blk,
719 &head_blk, last_half_cycle)))
720 goto bp_err;
721 }
722
723 /*
724 * Now validate the answer. Scan back some number of maximum possible
725 * blocks and make sure each one has the expected cycle number. The
726 * maximum is determined by the total possible amount of buffering
727 * in the in-core log. The following number can be made tighter if
728 * we actually look at the block size of the filesystem.
729 */
730 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
731 if (head_blk >= num_scan_bblks) {
732 /*
733 * We are guaranteed that the entire check can be performed
734 * in one buffer.
735 */
736 start_blk = head_blk - num_scan_bblks;
737 if ((error = xlog_find_verify_cycle(log,
738 start_blk, num_scan_bblks,
739 stop_on_cycle, &new_blk)))
740 goto bp_err;
741 if (new_blk != -1)
742 head_blk = new_blk;
743 } else { /* need to read 2 parts of log */
744 /*
745 * We are going to scan backwards in the log in two parts.
746 * First we scan the physical end of the log. In this part
747 * of the log, we are looking for blocks with cycle number
748 * last_half_cycle - 1.
749 * If we find one, then we know that the log starts there, as
750 * we've found a hole that didn't get written in going around
751 * the end of the physical log. The simple case for this is
752 * x + 1 ... | x ... | x - 1 | x
753 * <---------> less than scan distance
754 * If all of the blocks at the end of the log have cycle number
755 * last_half_cycle, then we check the blocks at the start of
756 * the log looking for occurrences of last_half_cycle. If we
757 * find one, then our current estimate for the location of the
758 * first occurrence of last_half_cycle is wrong and we move
759 * back to the hole we've found. This case looks like
760 * x + 1 ... | x | x + 1 | x ...
761 * ^ binary search stopped here
762 * Another case we need to handle that only occurs in 256k
763 * logs is
764 * x + 1 ... | x ... | x+1 | x ...
765 * ^ binary search stops here
766 * In a 256k log, the scan at the end of the log will see the
767 * x + 1 blocks. We need to skip past those since that is
768 * certainly not the head of the log. By searching for
769 * last_half_cycle-1 we accomplish that.
770 */
771 ASSERT(head_blk <= INT_MAX &&
772 (xfs_daddr_t) num_scan_bblks >= head_blk);
773 start_blk = log_bbnum - (num_scan_bblks - head_blk);
774 if ((error = xlog_find_verify_cycle(log, start_blk,
775 num_scan_bblks - (int)head_blk,
776 (stop_on_cycle - 1), &new_blk)))
777 goto bp_err;
778 if (new_blk != -1) {
779 head_blk = new_blk;
780 goto validate_head;
781 }
782
783 /*
784 * Scan beginning of log now. The last part of the physical
785 * log is good. This scan needs to verify that it doesn't find
786 * the last_half_cycle.
787 */
788 start_blk = 0;
789 ASSERT(head_blk <= INT_MAX);
790 if ((error = xlog_find_verify_cycle(log,
791 start_blk, (int)head_blk,
792 stop_on_cycle, &new_blk)))
793 goto bp_err;
794 if (new_blk != -1)
795 head_blk = new_blk;
796 }
797
798 validate_head:
799 /*
800 * Now we need to make sure head_blk is not pointing to a block in
801 * the middle of a log record.
802 */
803 num_scan_bblks = XLOG_REC_SHIFT(log);
804 if (head_blk >= num_scan_bblks) {
805 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
806
807 /* start ptr at last block ptr before head_blk */
808 if ((error = xlog_find_verify_log_record(log, start_blk,
809 &head_blk, 0)) == -1) {
810 error = XFS_ERROR(EIO);
811 goto bp_err;
812 } else if (error)
813 goto bp_err;
814 } else {
815 start_blk = 0;
816 ASSERT(head_blk <= INT_MAX);
817 if ((error = xlog_find_verify_log_record(log, start_blk,
818 &head_blk, 0)) == -1) {
819 /* We hit the beginning of the log during our search */
820 start_blk = log_bbnum - (num_scan_bblks - head_blk);
821 new_blk = log_bbnum;
822 ASSERT(start_blk <= INT_MAX &&
823 (xfs_daddr_t) log_bbnum-start_blk >= 0);
824 ASSERT(head_blk <= INT_MAX);
825 if ((error = xlog_find_verify_log_record(log,
826 start_blk, &new_blk,
827 (int)head_blk)) == -1) {
828 error = XFS_ERROR(EIO);
829 goto bp_err;
830 } else if (error)
831 goto bp_err;
832 if (new_blk != log_bbnum)
833 head_blk = new_blk;
834 } else if (error)
835 goto bp_err;
836 }
837
838 xlog_put_bp(bp);
839 if (head_blk == log_bbnum)
840 *return_head_blk = 0;
841 else
842 *return_head_blk = head_blk;
843 /*
844 * When returning here, we have a good block number. Bad block
845 * means that during a previous crash, we didn't have a clean break
846 * from cycle number N to cycle number N-1. In this case, we need
847 * to find the first block with cycle number N-1.
848 */
849 return 0;
850
851 bp_err:
852 xlog_put_bp(bp);
853
854 if (error)
855 xfs_warn(log->l_mp, "failed to find log head");
856 return error;
857 }
858
859 /*
860 * Find the sync block number or the tail of the log.
861 *
862 * This will be the block number of the last record to have its
863 * associated buffers synced to disk. Every log record header has
864 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
865 * to get a sync block number. The only concern is to figure out which
866 * log record header to believe.
867 *
868 * The following algorithm uses the log record header with the largest
869 * lsn. The entire log record does not need to be valid. We only care
870 * that the header is valid.
871 *
872 * We could speed up search by using current head_blk buffer, but it is not
873 * available.
874 */
875 STATIC int
876 xlog_find_tail(
877 xlog_t *log,
878 xfs_daddr_t *head_blk,
879 xfs_daddr_t *tail_blk)
880 {
881 xlog_rec_header_t *rhead;
882 xlog_op_header_t *op_head;
883 xfs_caddr_t offset = NULL;
884 xfs_buf_t *bp;
885 int error, i, found;
886 xfs_daddr_t umount_data_blk;
887 xfs_daddr_t after_umount_blk;
888 xfs_lsn_t tail_lsn;
889 int hblks;
890
891 found = 0;
892
893 /*
894 * Find previous log record
895 */
896 if ((error = xlog_find_head(log, head_blk)))
897 return error;
898
899 bp = xlog_get_bp(log, 1);
900 if (!bp)
901 return ENOMEM;
902 if (*head_blk == 0) { /* special case */
903 error = xlog_bread(log, 0, 1, bp, &offset);
904 if (error)
905 goto done;
906
907 if (xlog_get_cycle(offset) == 0) {
908 *tail_blk = 0;
909 /* leave all other log inited values alone */
910 goto done;
911 }
912 }
913
914 /*
915 * Search backwards looking for log record header block
916 */
917 ASSERT(*head_blk < INT_MAX);
918 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
919 error = xlog_bread(log, i, 1, bp, &offset);
920 if (error)
921 goto done;
922
923 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
924 found = 1;
925 break;
926 }
927 }
928 /*
929 * If we haven't found the log record header block, start looking
930 * again from the end of the physical log. XXXmiken: There should be
931 * a check here to make sure we didn't search more than N blocks in
932 * the previous code.
933 */
934 if (!found) {
935 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
936 error = xlog_bread(log, i, 1, bp, &offset);
937 if (error)
938 goto done;
939
940 if (*(__be32 *)offset ==
941 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
942 found = 2;
943 break;
944 }
945 }
946 }
947 if (!found) {
948 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
949 ASSERT(0);
950 return XFS_ERROR(EIO);
951 }
952
953 /* find blk_no of tail of log */
954 rhead = (xlog_rec_header_t *)offset;
955 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
956
957 /*
958 * Reset log values according to the state of the log when we
959 * crashed. In the case where head_blk == 0, we bump curr_cycle
960 * one because the next write starts a new cycle rather than
961 * continuing the cycle of the last good log record. At this
962 * point we have guaranteed that all partial log records have been
963 * accounted for. Therefore, we know that the last good log record
964 * written was complete and ended exactly on the end boundary
965 * of the physical log.
966 */
967 log->l_prev_block = i;
968 log->l_curr_block = (int)*head_blk;
969 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
970 if (found == 2)
971 log->l_curr_cycle++;
972 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
973 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
974 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
975 BBTOB(log->l_curr_block));
976 xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
977 BBTOB(log->l_curr_block));
978
979 /*
980 * Look for unmount record. If we find it, then we know there
981 * was a clean unmount. Since 'i' could be the last block in
982 * the physical log, we convert to a log block before comparing
983 * to the head_blk.
984 *
985 * Save the current tail lsn to use to pass to
986 * xlog_clear_stale_blocks() below. We won't want to clear the
987 * unmount record if there is one, so we pass the lsn of the
988 * unmount record rather than the block after it.
989 */
990 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
991 int h_size = be32_to_cpu(rhead->h_size);
992 int h_version = be32_to_cpu(rhead->h_version);
993
994 if ((h_version & XLOG_VERSION_2) &&
995 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
996 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
997 if (h_size % XLOG_HEADER_CYCLE_SIZE)
998 hblks++;
999 } else {
1000 hblks = 1;
1001 }
1002 } else {
1003 hblks = 1;
1004 }
1005 after_umount_blk = (i + hblks + (int)
1006 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1007 tail_lsn = atomic64_read(&log->l_tail_lsn);
1008 if (*head_blk == after_umount_blk &&
1009 be32_to_cpu(rhead->h_num_logops) == 1) {
1010 umount_data_blk = (i + hblks) % log->l_logBBsize;
1011 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1012 if (error)
1013 goto done;
1014
1015 op_head = (xlog_op_header_t *)offset;
1016 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1017 /*
1018 * Set tail and last sync so that newly written
1019 * log records will point recovery to after the
1020 * current unmount record.
1021 */
1022 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1023 log->l_curr_cycle, after_umount_blk);
1024 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1025 log->l_curr_cycle, after_umount_blk);
1026 *tail_blk = after_umount_blk;
1027
1028 /*
1029 * Note that the unmount was clean. If the unmount
1030 * was not clean, we need to know this to rebuild the
1031 * superblock counters from the perag headers if we
1032 * have a filesystem using non-persistent counters.
1033 */
1034 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1035 }
1036 }
1037
1038 /*
1039 * Make sure that there are no blocks in front of the head
1040 * with the same cycle number as the head. This can happen
1041 * because we allow multiple outstanding log writes concurrently,
1042 * and the later writes might make it out before earlier ones.
1043 *
1044 * We use the lsn from before modifying it so that we'll never
1045 * overwrite the unmount record after a clean unmount.
1046 *
1047 * Do this only if we are going to recover the filesystem
1048 *
1049 * NOTE: This used to say "if (!readonly)"
1050 * However on Linux, we can & do recover a read-only filesystem.
1051 * We only skip recovery if NORECOVERY is specified on mount,
1052 * in which case we would not be here.
1053 *
1054 * But... if the -device- itself is readonly, just skip this.
1055 * We can't recover this device anyway, so it won't matter.
1056 */
1057 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1058 error = xlog_clear_stale_blocks(log, tail_lsn);
1059
1060 done:
1061 xlog_put_bp(bp);
1062
1063 if (error)
1064 xfs_warn(log->l_mp, "failed to locate log tail");
1065 return error;
1066 }
1067
1068 /*
1069 * Is the log zeroed at all?
1070 *
1071 * The last binary search should be changed to perform an X block read
1072 * once X becomes small enough. You can then search linearly through
1073 * the X blocks. This will cut down on the number of reads we need to do.
1074 *
1075 * If the log is partially zeroed, this routine will pass back the blkno
1076 * of the first block with cycle number 0. It won't have a complete LR
1077 * preceding it.
1078 *
1079 * Return:
1080 * 0 => the log is completely written to
1081 * -1 => use *blk_no as the first block of the log
1082 * >0 => error has occurred
1083 */
1084 STATIC int
1085 xlog_find_zeroed(
1086 xlog_t *log,
1087 xfs_daddr_t *blk_no)
1088 {
1089 xfs_buf_t *bp;
1090 xfs_caddr_t offset;
1091 uint first_cycle, last_cycle;
1092 xfs_daddr_t new_blk, last_blk, start_blk;
1093 xfs_daddr_t num_scan_bblks;
1094 int error, log_bbnum = log->l_logBBsize;
1095
1096 *blk_no = 0;
1097
1098 /* check totally zeroed log */
1099 bp = xlog_get_bp(log, 1);
1100 if (!bp)
1101 return ENOMEM;
1102 error = xlog_bread(log, 0, 1, bp, &offset);
1103 if (error)
1104 goto bp_err;
1105
1106 first_cycle = xlog_get_cycle(offset);
1107 if (first_cycle == 0) { /* completely zeroed log */
1108 *blk_no = 0;
1109 xlog_put_bp(bp);
1110 return -1;
1111 }
1112
1113 /* check partially zeroed log */
1114 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1115 if (error)
1116 goto bp_err;
1117
1118 last_cycle = xlog_get_cycle(offset);
1119 if (last_cycle != 0) { /* log completely written to */
1120 xlog_put_bp(bp);
1121 return 0;
1122 } else if (first_cycle != 1) {
1123 /*
1124 * If the cycle of the last block is zero, the cycle of
1125 * the first block must be 1. If it's not, maybe we're
1126 * not looking at a log... Bail out.
1127 */
1128 xfs_warn(log->l_mp,
1129 "Log inconsistent or not a log (last==0, first!=1)");
1130 return XFS_ERROR(EINVAL);
1131 }
1132
1133 /* we have a partially zeroed log */
1134 last_blk = log_bbnum-1;
1135 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1136 goto bp_err;
1137
1138 /*
1139 * Validate the answer. Because there is no way to guarantee that
1140 * the entire log is made up of log records which are the same size,
1141 * we scan over the defined maximum blocks. At this point, the maximum
1142 * is not chosen to mean anything special. XXXmiken
1143 */
1144 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1145 ASSERT(num_scan_bblks <= INT_MAX);
1146
1147 if (last_blk < num_scan_bblks)
1148 num_scan_bblks = last_blk;
1149 start_blk = last_blk - num_scan_bblks;
1150
1151 /*
1152 * We search for any instances of cycle number 0 that occur before
1153 * our current estimate of the head. What we're trying to detect is
1154 * 1 ... | 0 | 1 | 0...
1155 * ^ binary search ends here
1156 */
1157 if ((error = xlog_find_verify_cycle(log, start_blk,
1158 (int)num_scan_bblks, 0, &new_blk)))
1159 goto bp_err;
1160 if (new_blk != -1)
1161 last_blk = new_blk;
1162
1163 /*
1164 * Potentially backup over partial log record write. We don't need
1165 * to search the end of the log because we know it is zero.
1166 */
1167 if ((error = xlog_find_verify_log_record(log, start_blk,
1168 &last_blk, 0)) == -1) {
1169 error = XFS_ERROR(EIO);
1170 goto bp_err;
1171 } else if (error)
1172 goto bp_err;
1173
1174 *blk_no = last_blk;
1175 bp_err:
1176 xlog_put_bp(bp);
1177 if (error)
1178 return error;
1179 return -1;
1180 }
1181
1182 /*
1183 * These are simple subroutines used by xlog_clear_stale_blocks() below
1184 * to initialize a buffer full of empty log record headers and write
1185 * them into the log.
1186 */
1187 STATIC void
1188 xlog_add_record(
1189 xlog_t *log,
1190 xfs_caddr_t buf,
1191 int cycle,
1192 int block,
1193 int tail_cycle,
1194 int tail_block)
1195 {
1196 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1197
1198 memset(buf, 0, BBSIZE);
1199 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1200 recp->h_cycle = cpu_to_be32(cycle);
1201 recp->h_version = cpu_to_be32(
1202 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1203 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1204 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1205 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1206 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1207 }
1208
1209 STATIC int
1210 xlog_write_log_records(
1211 xlog_t *log,
1212 int cycle,
1213 int start_block,
1214 int blocks,
1215 int tail_cycle,
1216 int tail_block)
1217 {
1218 xfs_caddr_t offset;
1219 xfs_buf_t *bp;
1220 int balign, ealign;
1221 int sectbb = log->l_sectBBsize;
1222 int end_block = start_block + blocks;
1223 int bufblks;
1224 int error = 0;
1225 int i, j = 0;
1226
1227 /*
1228 * Greedily allocate a buffer big enough to handle the full
1229 * range of basic blocks to be written. If that fails, try
1230 * a smaller size. We need to be able to write at least a
1231 * log sector, or we're out of luck.
1232 */
1233 bufblks = 1 << ffs(blocks);
1234 while (!(bp = xlog_get_bp(log, bufblks))) {
1235 bufblks >>= 1;
1236 if (bufblks < sectbb)
1237 return ENOMEM;
1238 }
1239
1240 /* We may need to do a read at the start to fill in part of
1241 * the buffer in the starting sector not covered by the first
1242 * write below.
1243 */
1244 balign = round_down(start_block, sectbb);
1245 if (balign != start_block) {
1246 error = xlog_bread_noalign(log, start_block, 1, bp);
1247 if (error)
1248 goto out_put_bp;
1249
1250 j = start_block - balign;
1251 }
1252
1253 for (i = start_block; i < end_block; i += bufblks) {
1254 int bcount, endcount;
1255
1256 bcount = min(bufblks, end_block - start_block);
1257 endcount = bcount - j;
1258
1259 /* We may need to do a read at the end to fill in part of
1260 * the buffer in the final sector not covered by the write.
1261 * If this is the same sector as the above read, skip it.
1262 */
1263 ealign = round_down(end_block, sectbb);
1264 if (j == 0 && (start_block + endcount > ealign)) {
1265 offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block);
1266 error = xlog_bread_offset(log, ealign, sectbb,
1267 bp, offset);
1268 if (error)
1269 break;
1270
1271 }
1272
1273 offset = xlog_align(log, start_block, endcount, bp);
1274 for (; j < endcount; j++) {
1275 xlog_add_record(log, offset, cycle, i+j,
1276 tail_cycle, tail_block);
1277 offset += BBSIZE;
1278 }
1279 error = xlog_bwrite(log, start_block, endcount, bp);
1280 if (error)
1281 break;
1282 start_block += endcount;
1283 j = 0;
1284 }
1285
1286 out_put_bp:
1287 xlog_put_bp(bp);
1288 return error;
1289 }
1290
1291 /*
1292 * This routine is called to blow away any incomplete log writes out
1293 * in front of the log head. We do this so that we won't become confused
1294 * if we come up, write only a little bit more, and then crash again.
1295 * If we leave the partial log records out there, this situation could
1296 * cause us to think those partial writes are valid blocks since they
1297 * have the current cycle number. We get rid of them by overwriting them
1298 * with empty log records with the old cycle number rather than the
1299 * current one.
1300 *
1301 * The tail lsn is passed in rather than taken from
1302 * the log so that we will not write over the unmount record after a
1303 * clean unmount in a 512 block log. Doing so would leave the log without
1304 * any valid log records in it until a new one was written. If we crashed
1305 * during that time we would not be able to recover.
1306 */
1307 STATIC int
1308 xlog_clear_stale_blocks(
1309 xlog_t *log,
1310 xfs_lsn_t tail_lsn)
1311 {
1312 int tail_cycle, head_cycle;
1313 int tail_block, head_block;
1314 int tail_distance, max_distance;
1315 int distance;
1316 int error;
1317
1318 tail_cycle = CYCLE_LSN(tail_lsn);
1319 tail_block = BLOCK_LSN(tail_lsn);
1320 head_cycle = log->l_curr_cycle;
1321 head_block = log->l_curr_block;
1322
1323 /*
1324 * Figure out the distance between the new head of the log
1325 * and the tail. We want to write over any blocks beyond the
1326 * head that we may have written just before the crash, but
1327 * we don't want to overwrite the tail of the log.
1328 */
1329 if (head_cycle == tail_cycle) {
1330 /*
1331 * The tail is behind the head in the physical log,
1332 * so the distance from the head to the tail is the
1333 * distance from the head to the end of the log plus
1334 * the distance from the beginning of the log to the
1335 * tail.
1336 */
1337 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1338 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1339 XFS_ERRLEVEL_LOW, log->l_mp);
1340 return XFS_ERROR(EFSCORRUPTED);
1341 }
1342 tail_distance = tail_block + (log->l_logBBsize - head_block);
1343 } else {
1344 /*
1345 * The head is behind the tail in the physical log,
1346 * so the distance from the head to the tail is just
1347 * the tail block minus the head block.
1348 */
1349 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1350 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1351 XFS_ERRLEVEL_LOW, log->l_mp);
1352 return XFS_ERROR(EFSCORRUPTED);
1353 }
1354 tail_distance = tail_block - head_block;
1355 }
1356
1357 /*
1358 * If the head is right up against the tail, we can't clear
1359 * anything.
1360 */
1361 if (tail_distance <= 0) {
1362 ASSERT(tail_distance == 0);
1363 return 0;
1364 }
1365
1366 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1367 /*
1368 * Take the smaller of the maximum amount of outstanding I/O
1369 * we could have and the distance to the tail to clear out.
1370 * We take the smaller so that we don't overwrite the tail and
1371 * we don't waste all day writing from the head to the tail
1372 * for no reason.
1373 */
1374 max_distance = MIN(max_distance, tail_distance);
1375
1376 if ((head_block + max_distance) <= log->l_logBBsize) {
1377 /*
1378 * We can stomp all the blocks we need to without
1379 * wrapping around the end of the log. Just do it
1380 * in a single write. Use the cycle number of the
1381 * current cycle minus one so that the log will look like:
1382 * n ... | n - 1 ...
1383 */
1384 error = xlog_write_log_records(log, (head_cycle - 1),
1385 head_block, max_distance, tail_cycle,
1386 tail_block);
1387 if (error)
1388 return error;
1389 } else {
1390 /*
1391 * We need to wrap around the end of the physical log in
1392 * order to clear all the blocks. Do it in two separate
1393 * I/Os. The first write should be from the head to the
1394 * end of the physical log, and it should use the current
1395 * cycle number minus one just like above.
1396 */
1397 distance = log->l_logBBsize - head_block;
1398 error = xlog_write_log_records(log, (head_cycle - 1),
1399 head_block, distance, tail_cycle,
1400 tail_block);
1401
1402 if (error)
1403 return error;
1404
1405 /*
1406 * Now write the blocks at the start of the physical log.
1407 * This writes the remainder of the blocks we want to clear.
1408 * It uses the current cycle number since we're now on the
1409 * same cycle as the head so that we get:
1410 * n ... n ... | n - 1 ...
1411 * ^^^^^ blocks we're writing
1412 */
1413 distance = max_distance - (log->l_logBBsize - head_block);
1414 error = xlog_write_log_records(log, head_cycle, 0, distance,
1415 tail_cycle, tail_block);
1416 if (error)
1417 return error;
1418 }
1419
1420 return 0;
1421 }
1422
1423 /******************************************************************************
1424 *
1425 * Log recover routines
1426 *
1427 ******************************************************************************
1428 */
1429
1430 STATIC xlog_recover_t *
1431 xlog_recover_find_tid(
1432 struct hlist_head *head,
1433 xlog_tid_t tid)
1434 {
1435 xlog_recover_t *trans;
1436 struct hlist_node *n;
1437
1438 hlist_for_each_entry(trans, n, head, r_list) {
1439 if (trans->r_log_tid == tid)
1440 return trans;
1441 }
1442 return NULL;
1443 }
1444
1445 STATIC void
1446 xlog_recover_new_tid(
1447 struct hlist_head *head,
1448 xlog_tid_t tid,
1449 xfs_lsn_t lsn)
1450 {
1451 xlog_recover_t *trans;
1452
1453 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1454 trans->r_log_tid = tid;
1455 trans->r_lsn = lsn;
1456 INIT_LIST_HEAD(&trans->r_itemq);
1457
1458 INIT_HLIST_NODE(&trans->r_list);
1459 hlist_add_head(&trans->r_list, head);
1460 }
1461
1462 STATIC void
1463 xlog_recover_add_item(
1464 struct list_head *head)
1465 {
1466 xlog_recover_item_t *item;
1467
1468 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1469 INIT_LIST_HEAD(&item->ri_list);
1470 list_add_tail(&item->ri_list, head);
1471 }
1472
1473 STATIC int
1474 xlog_recover_add_to_cont_trans(
1475 struct log *log,
1476 xlog_recover_t *trans,
1477 xfs_caddr_t dp,
1478 int len)
1479 {
1480 xlog_recover_item_t *item;
1481 xfs_caddr_t ptr, old_ptr;
1482 int old_len;
1483
1484 if (list_empty(&trans->r_itemq)) {
1485 /* finish copying rest of trans header */
1486 xlog_recover_add_item(&trans->r_itemq);
1487 ptr = (xfs_caddr_t) &trans->r_theader +
1488 sizeof(xfs_trans_header_t) - len;
1489 memcpy(ptr, dp, len); /* d, s, l */
1490 return 0;
1491 }
1492 /* take the tail entry */
1493 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1494
1495 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1496 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1497
1498 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1499 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1500 item->ri_buf[item->ri_cnt-1].i_len += len;
1501 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1502 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1503 return 0;
1504 }
1505
1506 /*
1507 * The next region to add is the start of a new region. It could be
1508 * a whole region or it could be the first part of a new region. Because
1509 * of this, the assumption here is that the type and size fields of all
1510 * format structures fit into the first 32 bits of the structure.
1511 *
1512 * This works because all regions must be 32 bit aligned. Therefore, we
1513 * either have both fields or we have neither field. In the case we have
1514 * neither field, the data part of the region is zero length. We only have
1515 * a log_op_header and can throw away the header since a new one will appear
1516 * later. If we have at least 4 bytes, then we can determine how many regions
1517 * will appear in the current log item.
1518 */
1519 STATIC int
1520 xlog_recover_add_to_trans(
1521 struct log *log,
1522 xlog_recover_t *trans,
1523 xfs_caddr_t dp,
1524 int len)
1525 {
1526 xfs_inode_log_format_t *in_f; /* any will do */
1527 xlog_recover_item_t *item;
1528 xfs_caddr_t ptr;
1529
1530 if (!len)
1531 return 0;
1532 if (list_empty(&trans->r_itemq)) {
1533 /* we need to catch log corruptions here */
1534 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1535 xfs_warn(log->l_mp, "%s: bad header magic number",
1536 __func__);
1537 ASSERT(0);
1538 return XFS_ERROR(EIO);
1539 }
1540 if (len == sizeof(xfs_trans_header_t))
1541 xlog_recover_add_item(&trans->r_itemq);
1542 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1543 return 0;
1544 }
1545
1546 ptr = kmem_alloc(len, KM_SLEEP);
1547 memcpy(ptr, dp, len);
1548 in_f = (xfs_inode_log_format_t *)ptr;
1549
1550 /* take the tail entry */
1551 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1552 if (item->ri_total != 0 &&
1553 item->ri_total == item->ri_cnt) {
1554 /* tail item is in use, get a new one */
1555 xlog_recover_add_item(&trans->r_itemq);
1556 item = list_entry(trans->r_itemq.prev,
1557 xlog_recover_item_t, ri_list);
1558 }
1559
1560 if (item->ri_total == 0) { /* first region to be added */
1561 if (in_f->ilf_size == 0 ||
1562 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1563 xfs_warn(log->l_mp,
1564 "bad number of regions (%d) in inode log format",
1565 in_f->ilf_size);
1566 ASSERT(0);
1567 return XFS_ERROR(EIO);
1568 }
1569
1570 item->ri_total = in_f->ilf_size;
1571 item->ri_buf =
1572 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1573 KM_SLEEP);
1574 }
1575 ASSERT(item->ri_total > item->ri_cnt);
1576 /* Description region is ri_buf[0] */
1577 item->ri_buf[item->ri_cnt].i_addr = ptr;
1578 item->ri_buf[item->ri_cnt].i_len = len;
1579 item->ri_cnt++;
1580 trace_xfs_log_recover_item_add(log, trans, item, 0);
1581 return 0;
1582 }
1583
1584 /*
1585 * Sort the log items in the transaction. Cancelled buffers need
1586 * to be put first so they are processed before any items that might
1587 * modify the buffers. If they are cancelled, then the modifications
1588 * don't need to be replayed.
1589 */
1590 STATIC int
1591 xlog_recover_reorder_trans(
1592 struct log *log,
1593 xlog_recover_t *trans,
1594 int pass)
1595 {
1596 xlog_recover_item_t *item, *n;
1597 LIST_HEAD(sort_list);
1598
1599 list_splice_init(&trans->r_itemq, &sort_list);
1600 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1601 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1602
1603 switch (ITEM_TYPE(item)) {
1604 case XFS_LI_BUF:
1605 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1606 trace_xfs_log_recover_item_reorder_head(log,
1607 trans, item, pass);
1608 list_move(&item->ri_list, &trans->r_itemq);
1609 break;
1610 }
1611 case XFS_LI_INODE:
1612 case XFS_LI_DQUOT:
1613 case XFS_LI_QUOTAOFF:
1614 case XFS_LI_EFD:
1615 case XFS_LI_EFI:
1616 trace_xfs_log_recover_item_reorder_tail(log,
1617 trans, item, pass);
1618 list_move_tail(&item->ri_list, &trans->r_itemq);
1619 break;
1620 default:
1621 xfs_warn(log->l_mp,
1622 "%s: unrecognized type of log operation",
1623 __func__);
1624 ASSERT(0);
1625 return XFS_ERROR(EIO);
1626 }
1627 }
1628 ASSERT(list_empty(&sort_list));
1629 return 0;
1630 }
1631
1632 /*
1633 * Build up the table of buf cancel records so that we don't replay
1634 * cancelled data in the second pass. For buffer records that are
1635 * not cancel records, there is nothing to do here so we just return.
1636 *
1637 * If we get a cancel record which is already in the table, this indicates
1638 * that the buffer was cancelled multiple times. In order to ensure
1639 * that during pass 2 we keep the record in the table until we reach its
1640 * last occurrence in the log, we keep a reference count in the cancel
1641 * record in the table to tell us how many times we expect to see this
1642 * record during the second pass.
1643 */
1644 STATIC int
1645 xlog_recover_buffer_pass1(
1646 struct log *log,
1647 xlog_recover_item_t *item)
1648 {
1649 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1650 struct list_head *bucket;
1651 struct xfs_buf_cancel *bcp;
1652
1653 /*
1654 * If this isn't a cancel buffer item, then just return.
1655 */
1656 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1657 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1658 return 0;
1659 }
1660
1661 /*
1662 * Insert an xfs_buf_cancel record into the hash table of them.
1663 * If there is already an identical record, bump its reference count.
1664 */
1665 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1666 list_for_each_entry(bcp, bucket, bc_list) {
1667 if (bcp->bc_blkno == buf_f->blf_blkno &&
1668 bcp->bc_len == buf_f->blf_len) {
1669 bcp->bc_refcount++;
1670 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1671 return 0;
1672 }
1673 }
1674
1675 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1676 bcp->bc_blkno = buf_f->blf_blkno;
1677 bcp->bc_len = buf_f->blf_len;
1678 bcp->bc_refcount = 1;
1679 list_add_tail(&bcp->bc_list, bucket);
1680
1681 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1682 return 0;
1683 }
1684
1685 /*
1686 * Check to see whether the buffer being recovered has a corresponding
1687 * entry in the buffer cancel record table. If it does then return 1
1688 * so that it will be cancelled, otherwise return 0. If the buffer is
1689 * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1690 * the refcount on the entry in the table and remove it from the table
1691 * if this is the last reference.
1692 *
1693 * We remove the cancel record from the table when we encounter its
1694 * last occurrence in the log so that if the same buffer is re-used
1695 * again after its last cancellation we actually replay the changes
1696 * made at that point.
1697 */
1698 STATIC int
1699 xlog_check_buffer_cancelled(
1700 struct log *log,
1701 xfs_daddr_t blkno,
1702 uint len,
1703 ushort flags)
1704 {
1705 struct list_head *bucket;
1706 struct xfs_buf_cancel *bcp;
1707
1708 if (log->l_buf_cancel_table == NULL) {
1709 /*
1710 * There is nothing in the table built in pass one,
1711 * so this buffer must not be cancelled.
1712 */
1713 ASSERT(!(flags & XFS_BLF_CANCEL));
1714 return 0;
1715 }
1716
1717 /*
1718 * Search for an entry in the cancel table that matches our buffer.
1719 */
1720 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1721 list_for_each_entry(bcp, bucket, bc_list) {
1722 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1723 goto found;
1724 }
1725
1726 /*
1727 * We didn't find a corresponding entry in the table, so return 0 so
1728 * that the buffer is NOT cancelled.
1729 */
1730 ASSERT(!(flags & XFS_BLF_CANCEL));
1731 return 0;
1732
1733 found:
1734 /*
1735 * We've go a match, so return 1 so that the recovery of this buffer
1736 * is cancelled. If this buffer is actually a buffer cancel log
1737 * item, then decrement the refcount on the one in the table and
1738 * remove it if this is the last reference.
1739 */
1740 if (flags & XFS_BLF_CANCEL) {
1741 if (--bcp->bc_refcount == 0) {
1742 list_del(&bcp->bc_list);
1743 kmem_free(bcp);
1744 }
1745 }
1746 return 1;
1747 }
1748
1749 /*
1750 * Perform recovery for a buffer full of inodes. In these buffers, the only
1751 * data which should be recovered is that which corresponds to the
1752 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1753 * data for the inodes is always logged through the inodes themselves rather
1754 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1755 *
1756 * The only time when buffers full of inodes are fully recovered is when the
1757 * buffer is full of newly allocated inodes. In this case the buffer will
1758 * not be marked as an inode buffer and so will be sent to
1759 * xlog_recover_do_reg_buffer() below during recovery.
1760 */
1761 STATIC int
1762 xlog_recover_do_inode_buffer(
1763 struct xfs_mount *mp,
1764 xlog_recover_item_t *item,
1765 struct xfs_buf *bp,
1766 xfs_buf_log_format_t *buf_f)
1767 {
1768 int i;
1769 int item_index = 0;
1770 int bit = 0;
1771 int nbits = 0;
1772 int reg_buf_offset = 0;
1773 int reg_buf_bytes = 0;
1774 int next_unlinked_offset;
1775 int inodes_per_buf;
1776 xfs_agino_t *logged_nextp;
1777 xfs_agino_t *buffer_nextp;
1778
1779 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1780
1781 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1782 for (i = 0; i < inodes_per_buf; i++) {
1783 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1784 offsetof(xfs_dinode_t, di_next_unlinked);
1785
1786 while (next_unlinked_offset >=
1787 (reg_buf_offset + reg_buf_bytes)) {
1788 /*
1789 * The next di_next_unlinked field is beyond
1790 * the current logged region. Find the next
1791 * logged region that contains or is beyond
1792 * the current di_next_unlinked field.
1793 */
1794 bit += nbits;
1795 bit = xfs_next_bit(buf_f->blf_data_map,
1796 buf_f->blf_map_size, bit);
1797
1798 /*
1799 * If there are no more logged regions in the
1800 * buffer, then we're done.
1801 */
1802 if (bit == -1)
1803 return 0;
1804
1805 nbits = xfs_contig_bits(buf_f->blf_data_map,
1806 buf_f->blf_map_size, bit);
1807 ASSERT(nbits > 0);
1808 reg_buf_offset = bit << XFS_BLF_SHIFT;
1809 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1810 item_index++;
1811 }
1812
1813 /*
1814 * If the current logged region starts after the current
1815 * di_next_unlinked field, then move on to the next
1816 * di_next_unlinked field.
1817 */
1818 if (next_unlinked_offset < reg_buf_offset)
1819 continue;
1820
1821 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1822 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1823 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1824
1825 /*
1826 * The current logged region contains a copy of the
1827 * current di_next_unlinked field. Extract its value
1828 * and copy it to the buffer copy.
1829 */
1830 logged_nextp = item->ri_buf[item_index].i_addr +
1831 next_unlinked_offset - reg_buf_offset;
1832 if (unlikely(*logged_nextp == 0)) {
1833 xfs_alert(mp,
1834 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1835 "Trying to replay bad (0) inode di_next_unlinked field.",
1836 item, bp);
1837 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1838 XFS_ERRLEVEL_LOW, mp);
1839 return XFS_ERROR(EFSCORRUPTED);
1840 }
1841
1842 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1843 next_unlinked_offset);
1844 *buffer_nextp = *logged_nextp;
1845 }
1846
1847 return 0;
1848 }
1849
1850 /*
1851 * Perform a 'normal' buffer recovery. Each logged region of the
1852 * buffer should be copied over the corresponding region in the
1853 * given buffer. The bitmap in the buf log format structure indicates
1854 * where to place the logged data.
1855 */
1856 STATIC void
1857 xlog_recover_do_reg_buffer(
1858 struct xfs_mount *mp,
1859 xlog_recover_item_t *item,
1860 struct xfs_buf *bp,
1861 xfs_buf_log_format_t *buf_f)
1862 {
1863 int i;
1864 int bit;
1865 int nbits;
1866 int error;
1867
1868 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1869
1870 bit = 0;
1871 i = 1; /* 0 is the buf format structure */
1872 while (1) {
1873 bit = xfs_next_bit(buf_f->blf_data_map,
1874 buf_f->blf_map_size, bit);
1875 if (bit == -1)
1876 break;
1877 nbits = xfs_contig_bits(buf_f->blf_data_map,
1878 buf_f->blf_map_size, bit);
1879 ASSERT(nbits > 0);
1880 ASSERT(item->ri_buf[i].i_addr != NULL);
1881 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1882 ASSERT(XFS_BUF_COUNT(bp) >=
1883 ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
1884
1885 /*
1886 * Do a sanity check if this is a dquot buffer. Just checking
1887 * the first dquot in the buffer should do. XXXThis is
1888 * probably a good thing to do for other buf types also.
1889 */
1890 error = 0;
1891 if (buf_f->blf_flags &
1892 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1893 if (item->ri_buf[i].i_addr == NULL) {
1894 xfs_alert(mp,
1895 "XFS: NULL dquot in %s.", __func__);
1896 goto next;
1897 }
1898 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1899 xfs_alert(mp,
1900 "XFS: dquot too small (%d) in %s.",
1901 item->ri_buf[i].i_len, __func__);
1902 goto next;
1903 }
1904 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
1905 -1, 0, XFS_QMOPT_DOWARN,
1906 "dquot_buf_recover");
1907 if (error)
1908 goto next;
1909 }
1910
1911 memcpy(xfs_buf_offset(bp,
1912 (uint)bit << XFS_BLF_SHIFT), /* dest */
1913 item->ri_buf[i].i_addr, /* source */
1914 nbits<<XFS_BLF_SHIFT); /* length */
1915 next:
1916 i++;
1917 bit += nbits;
1918 }
1919
1920 /* Shouldn't be any more regions */
1921 ASSERT(i == item->ri_total);
1922 }
1923
1924 /*
1925 * Do some primitive error checking on ondisk dquot data structures.
1926 */
1927 int
1928 xfs_qm_dqcheck(
1929 struct xfs_mount *mp,
1930 xfs_disk_dquot_t *ddq,
1931 xfs_dqid_t id,
1932 uint type, /* used only when IO_dorepair is true */
1933 uint flags,
1934 char *str)
1935 {
1936 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
1937 int errs = 0;
1938
1939 /*
1940 * We can encounter an uninitialized dquot buffer for 2 reasons:
1941 * 1. If we crash while deleting the quotainode(s), and those blks got
1942 * used for user data. This is because we take the path of regular
1943 * file deletion; however, the size field of quotainodes is never
1944 * updated, so all the tricks that we play in itruncate_finish
1945 * don't quite matter.
1946 *
1947 * 2. We don't play the quota buffers when there's a quotaoff logitem.
1948 * But the allocation will be replayed so we'll end up with an
1949 * uninitialized quota block.
1950 *
1951 * This is all fine; things are still consistent, and we haven't lost
1952 * any quota information. Just don't complain about bad dquot blks.
1953 */
1954 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
1955 if (flags & XFS_QMOPT_DOWARN)
1956 xfs_alert(mp,
1957 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1958 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1959 errs++;
1960 }
1961 if (ddq->d_version != XFS_DQUOT_VERSION) {
1962 if (flags & XFS_QMOPT_DOWARN)
1963 xfs_alert(mp,
1964 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1965 str, id, ddq->d_version, XFS_DQUOT_VERSION);
1966 errs++;
1967 }
1968
1969 if (ddq->d_flags != XFS_DQ_USER &&
1970 ddq->d_flags != XFS_DQ_PROJ &&
1971 ddq->d_flags != XFS_DQ_GROUP) {
1972 if (flags & XFS_QMOPT_DOWARN)
1973 xfs_alert(mp,
1974 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1975 str, id, ddq->d_flags);
1976 errs++;
1977 }
1978
1979 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1980 if (flags & XFS_QMOPT_DOWARN)
1981 xfs_alert(mp,
1982 "%s : ondisk-dquot 0x%p, ID mismatch: "
1983 "0x%x expected, found id 0x%x",
1984 str, ddq, id, be32_to_cpu(ddq->d_id));
1985 errs++;
1986 }
1987
1988 if (!errs && ddq->d_id) {
1989 if (ddq->d_blk_softlimit &&
1990 be64_to_cpu(ddq->d_bcount) >=
1991 be64_to_cpu(ddq->d_blk_softlimit)) {
1992 if (!ddq->d_btimer) {
1993 if (flags & XFS_QMOPT_DOWARN)
1994 xfs_alert(mp,
1995 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
1996 str, (int)be32_to_cpu(ddq->d_id), ddq);
1997 errs++;
1998 }
1999 }
2000 if (ddq->d_ino_softlimit &&
2001 be64_to_cpu(ddq->d_icount) >=
2002 be64_to_cpu(ddq->d_ino_softlimit)) {
2003 if (!ddq->d_itimer) {
2004 if (flags & XFS_QMOPT_DOWARN)
2005 xfs_alert(mp,
2006 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2007 str, (int)be32_to_cpu(ddq->d_id), ddq);
2008 errs++;
2009 }
2010 }
2011 if (ddq->d_rtb_softlimit &&
2012 be64_to_cpu(ddq->d_rtbcount) >=
2013 be64_to_cpu(ddq->d_rtb_softlimit)) {
2014 if (!ddq->d_rtbtimer) {
2015 if (flags & XFS_QMOPT_DOWARN)
2016 xfs_alert(mp,
2017 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2018 str, (int)be32_to_cpu(ddq->d_id), ddq);
2019 errs++;
2020 }
2021 }
2022 }
2023
2024 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2025 return errs;
2026
2027 if (flags & XFS_QMOPT_DOWARN)
2028 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2029
2030 /*
2031 * Typically, a repair is only requested by quotacheck.
2032 */
2033 ASSERT(id != -1);
2034 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2035 memset(d, 0, sizeof(xfs_dqblk_t));
2036
2037 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2038 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2039 d->dd_diskdq.d_flags = type;
2040 d->dd_diskdq.d_id = cpu_to_be32(id);
2041
2042 return errs;
2043 }
2044
2045 /*
2046 * Perform a dquot buffer recovery.
2047 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2048 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2049 * Else, treat it as a regular buffer and do recovery.
2050 */
2051 STATIC void
2052 xlog_recover_do_dquot_buffer(
2053 xfs_mount_t *mp,
2054 xlog_t *log,
2055 xlog_recover_item_t *item,
2056 xfs_buf_t *bp,
2057 xfs_buf_log_format_t *buf_f)
2058 {
2059 uint type;
2060
2061 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2062
2063 /*
2064 * Filesystems are required to send in quota flags at mount time.
2065 */
2066 if (mp->m_qflags == 0) {
2067 return;
2068 }
2069
2070 type = 0;
2071 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2072 type |= XFS_DQ_USER;
2073 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2074 type |= XFS_DQ_PROJ;
2075 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2076 type |= XFS_DQ_GROUP;
2077 /*
2078 * This type of quotas was turned off, so ignore this buffer
2079 */
2080 if (log->l_quotaoffs_flag & type)
2081 return;
2082
2083 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2084 }
2085
2086 /*
2087 * This routine replays a modification made to a buffer at runtime.
2088 * There are actually two types of buffer, regular and inode, which
2089 * are handled differently. Inode buffers are handled differently
2090 * in that we only recover a specific set of data from them, namely
2091 * the inode di_next_unlinked fields. This is because all other inode
2092 * data is actually logged via inode records and any data we replay
2093 * here which overlaps that may be stale.
2094 *
2095 * When meta-data buffers are freed at run time we log a buffer item
2096 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2097 * of the buffer in the log should not be replayed at recovery time.
2098 * This is so that if the blocks covered by the buffer are reused for
2099 * file data before we crash we don't end up replaying old, freed
2100 * meta-data into a user's file.
2101 *
2102 * To handle the cancellation of buffer log items, we make two passes
2103 * over the log during recovery. During the first we build a table of
2104 * those buffers which have been cancelled, and during the second we
2105 * only replay those buffers which do not have corresponding cancel
2106 * records in the table. See xlog_recover_do_buffer_pass[1,2] above
2107 * for more details on the implementation of the table of cancel records.
2108 */
2109 STATIC int
2110 xlog_recover_buffer_pass2(
2111 xlog_t *log,
2112 xlog_recover_item_t *item)
2113 {
2114 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2115 xfs_mount_t *mp = log->l_mp;
2116 xfs_buf_t *bp;
2117 int error;
2118 uint buf_flags;
2119
2120 /*
2121 * In this pass we only want to recover all the buffers which have
2122 * not been cancelled and are not cancellation buffers themselves.
2123 */
2124 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2125 buf_f->blf_len, buf_f->blf_flags)) {
2126 trace_xfs_log_recover_buf_cancel(log, buf_f);
2127 return 0;
2128 }
2129
2130 trace_xfs_log_recover_buf_recover(log, buf_f);
2131
2132 buf_flags = XBF_LOCK;
2133 if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
2134 buf_flags |= XBF_MAPPED;
2135
2136 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2137 buf_flags);
2138 error = xfs_buf_geterror(bp);
2139 if (error) {
2140 xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
2141 bp, buf_f->blf_blkno);
2142 xfs_buf_relse(bp);
2143 return error;
2144 }
2145
2146 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2147 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2148 } else if (buf_f->blf_flags &
2149 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2150 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2151 } else {
2152 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2153 }
2154 if (error)
2155 return XFS_ERROR(error);
2156
2157 /*
2158 * Perform delayed write on the buffer. Asynchronous writes will be
2159 * slower when taking into account all the buffers to be flushed.
2160 *
2161 * Also make sure that only inode buffers with good sizes stay in
2162 * the buffer cache. The kernel moves inodes in buffers of 1 block
2163 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2164 * buffers in the log can be a different size if the log was generated
2165 * by an older kernel using unclustered inode buffers or a newer kernel
2166 * running with a different inode cluster size. Regardless, if the
2167 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2168 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2169 * the buffer out of the buffer cache so that the buffer won't
2170 * overlap with future reads of those inodes.
2171 */
2172 if (XFS_DINODE_MAGIC ==
2173 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2174 (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2175 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2176 XFS_BUF_STALE(bp);
2177 error = xfs_bwrite(mp, bp);
2178 } else {
2179 ASSERT(bp->b_target->bt_mount == mp);
2180 bp->b_iodone = xlog_recover_iodone;
2181 xfs_bdwrite(mp, bp);
2182 }
2183
2184 return (error);
2185 }
2186
2187 STATIC int
2188 xlog_recover_inode_pass2(
2189 xlog_t *log,
2190 xlog_recover_item_t *item)
2191 {
2192 xfs_inode_log_format_t *in_f;
2193 xfs_mount_t *mp = log->l_mp;
2194 xfs_buf_t *bp;
2195 xfs_dinode_t *dip;
2196 int len;
2197 xfs_caddr_t src;
2198 xfs_caddr_t dest;
2199 int error;
2200 int attr_index;
2201 uint fields;
2202 xfs_icdinode_t *dicp;
2203 int need_free = 0;
2204
2205 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2206 in_f = item->ri_buf[0].i_addr;
2207 } else {
2208 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2209 need_free = 1;
2210 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2211 if (error)
2212 goto error;
2213 }
2214
2215 /*
2216 * Inode buffers can be freed, look out for it,
2217 * and do not replay the inode.
2218 */
2219 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2220 in_f->ilf_len, 0)) {
2221 error = 0;
2222 trace_xfs_log_recover_inode_cancel(log, in_f);
2223 goto error;
2224 }
2225 trace_xfs_log_recover_inode_recover(log, in_f);
2226
2227 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
2228 XBF_LOCK);
2229 error = xfs_buf_geterror(bp);
2230 if (error) {
2231 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2232 bp, in_f->ilf_blkno);
2233 xfs_buf_relse(bp);
2234 goto error;
2235 }
2236 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2237 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2238
2239 /*
2240 * Make sure the place we're flushing out to really looks
2241 * like an inode!
2242 */
2243 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2244 xfs_buf_relse(bp);
2245 xfs_alert(mp,
2246 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2247 __func__, dip, bp, in_f->ilf_ino);
2248 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2249 XFS_ERRLEVEL_LOW, mp);
2250 error = EFSCORRUPTED;
2251 goto error;
2252 }
2253 dicp = item->ri_buf[1].i_addr;
2254 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2255 xfs_buf_relse(bp);
2256 xfs_alert(mp,
2257 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2258 __func__, item, in_f->ilf_ino);
2259 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2260 XFS_ERRLEVEL_LOW, mp);
2261 error = EFSCORRUPTED;
2262 goto error;
2263 }
2264
2265 /* Skip replay when the on disk inode is newer than the log one */
2266 if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2267 /*
2268 * Deal with the wrap case, DI_MAX_FLUSH is less
2269 * than smaller numbers
2270 */
2271 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2272 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2273 /* do nothing */
2274 } else {
2275 xfs_buf_relse(bp);
2276 trace_xfs_log_recover_inode_skip(log, in_f);
2277 error = 0;
2278 goto error;
2279 }
2280 }
2281 /* Take the opportunity to reset the flush iteration count */
2282 dicp->di_flushiter = 0;
2283
2284 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2285 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2286 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2287 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2288 XFS_ERRLEVEL_LOW, mp, dicp);
2289 xfs_buf_relse(bp);
2290 xfs_alert(mp,
2291 "%s: Bad regular inode log record, rec ptr 0x%p, "
2292 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2293 __func__, item, dip, bp, in_f->ilf_ino);
2294 error = EFSCORRUPTED;
2295 goto error;
2296 }
2297 } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2298 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2299 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2300 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2301 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2302 XFS_ERRLEVEL_LOW, mp, dicp);
2303 xfs_buf_relse(bp);
2304 xfs_alert(mp,
2305 "%s: Bad dir inode log record, rec ptr 0x%p, "
2306 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2307 __func__, item, dip, bp, in_f->ilf_ino);
2308 error = EFSCORRUPTED;
2309 goto error;
2310 }
2311 }
2312 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2313 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2314 XFS_ERRLEVEL_LOW, mp, dicp);
2315 xfs_buf_relse(bp);
2316 xfs_alert(mp,
2317 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2318 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2319 __func__, item, dip, bp, in_f->ilf_ino,
2320 dicp->di_nextents + dicp->di_anextents,
2321 dicp->di_nblocks);
2322 error = EFSCORRUPTED;
2323 goto error;
2324 }
2325 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2326 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2327 XFS_ERRLEVEL_LOW, mp, dicp);
2328 xfs_buf_relse(bp);
2329 xfs_alert(mp,
2330 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2331 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2332 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2333 error = EFSCORRUPTED;
2334 goto error;
2335 }
2336 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2337 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2338 XFS_ERRLEVEL_LOW, mp, dicp);
2339 xfs_buf_relse(bp);
2340 xfs_alert(mp,
2341 "%s: Bad inode log record length %d, rec ptr 0x%p",
2342 __func__, item->ri_buf[1].i_len, item);
2343 error = EFSCORRUPTED;
2344 goto error;
2345 }
2346
2347 /* The core is in in-core format */
2348 xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2349
2350 /* the rest is in on-disk format */
2351 if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2352 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2353 item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2354 item->ri_buf[1].i_len - sizeof(struct xfs_icdinode));
2355 }
2356
2357 fields = in_f->ilf_fields;
2358 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2359 case XFS_ILOG_DEV:
2360 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2361 break;
2362 case XFS_ILOG_UUID:
2363 memcpy(XFS_DFORK_DPTR(dip),
2364 &in_f->ilf_u.ilfu_uuid,
2365 sizeof(uuid_t));
2366 break;
2367 }
2368
2369 if (in_f->ilf_size == 2)
2370 goto write_inode_buffer;
2371 len = item->ri_buf[2].i_len;
2372 src = item->ri_buf[2].i_addr;
2373 ASSERT(in_f->ilf_size <= 4);
2374 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2375 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2376 (len == in_f->ilf_dsize));
2377
2378 switch (fields & XFS_ILOG_DFORK) {
2379 case XFS_ILOG_DDATA:
2380 case XFS_ILOG_DEXT:
2381 memcpy(XFS_DFORK_DPTR(dip), src, len);
2382 break;
2383
2384 case XFS_ILOG_DBROOT:
2385 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2386 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2387 XFS_DFORK_DSIZE(dip, mp));
2388 break;
2389
2390 default:
2391 /*
2392 * There are no data fork flags set.
2393 */
2394 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2395 break;
2396 }
2397
2398 /*
2399 * If we logged any attribute data, recover it. There may or
2400 * may not have been any other non-core data logged in this
2401 * transaction.
2402 */
2403 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2404 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2405 attr_index = 3;
2406 } else {
2407 attr_index = 2;
2408 }
2409 len = item->ri_buf[attr_index].i_len;
2410 src = item->ri_buf[attr_index].i_addr;
2411 ASSERT(len == in_f->ilf_asize);
2412
2413 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2414 case XFS_ILOG_ADATA:
2415 case XFS_ILOG_AEXT:
2416 dest = XFS_DFORK_APTR(dip);
2417 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2418 memcpy(dest, src, len);
2419 break;
2420
2421 case XFS_ILOG_ABROOT:
2422 dest = XFS_DFORK_APTR(dip);
2423 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2424 len, (xfs_bmdr_block_t*)dest,
2425 XFS_DFORK_ASIZE(dip, mp));
2426 break;
2427
2428 default:
2429 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2430 ASSERT(0);
2431 xfs_buf_relse(bp);
2432 error = EIO;
2433 goto error;
2434 }
2435 }
2436
2437 write_inode_buffer:
2438 ASSERT(bp->b_target->bt_mount == mp);
2439 bp->b_iodone = xlog_recover_iodone;
2440 xfs_bdwrite(mp, bp);
2441 error:
2442 if (need_free)
2443 kmem_free(in_f);
2444 return XFS_ERROR(error);
2445 }
2446
2447 /*
2448 * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2449 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2450 * of that type.
2451 */
2452 STATIC int
2453 xlog_recover_quotaoff_pass1(
2454 xlog_t *log,
2455 xlog_recover_item_t *item)
2456 {
2457 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2458 ASSERT(qoff_f);
2459
2460 /*
2461 * The logitem format's flag tells us if this was user quotaoff,
2462 * group/project quotaoff or both.
2463 */
2464 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2465 log->l_quotaoffs_flag |= XFS_DQ_USER;
2466 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2467 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2468 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2469 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2470
2471 return (0);
2472 }
2473
2474 /*
2475 * Recover a dquot record
2476 */
2477 STATIC int
2478 xlog_recover_dquot_pass2(
2479 xlog_t *log,
2480 xlog_recover_item_t *item)
2481 {
2482 xfs_mount_t *mp = log->l_mp;
2483 xfs_buf_t *bp;
2484 struct xfs_disk_dquot *ddq, *recddq;
2485 int error;
2486 xfs_dq_logformat_t *dq_f;
2487 uint type;
2488
2489
2490 /*
2491 * Filesystems are required to send in quota flags at mount time.
2492 */
2493 if (mp->m_qflags == 0)
2494 return (0);
2495
2496 recddq = item->ri_buf[1].i_addr;
2497 if (recddq == NULL) {
2498 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2499 return XFS_ERROR(EIO);
2500 }
2501 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2502 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2503 item->ri_buf[1].i_len, __func__);
2504 return XFS_ERROR(EIO);
2505 }
2506
2507 /*
2508 * This type of quotas was turned off, so ignore this record.
2509 */
2510 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2511 ASSERT(type);
2512 if (log->l_quotaoffs_flag & type)
2513 return (0);
2514
2515 /*
2516 * At this point we know that quota was _not_ turned off.
2517 * Since the mount flags are not indicating to us otherwise, this
2518 * must mean that quota is on, and the dquot needs to be replayed.
2519 * Remember that we may not have fully recovered the superblock yet,
2520 * so we can't do the usual trick of looking at the SB quota bits.
2521 *
2522 * The other possibility, of course, is that the quota subsystem was
2523 * removed since the last mount - ENOSYS.
2524 */
2525 dq_f = item->ri_buf[0].i_addr;
2526 ASSERT(dq_f);
2527 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2528 "xlog_recover_dquot_pass2 (log copy)");
2529 if (error)
2530 return XFS_ERROR(EIO);
2531 ASSERT(dq_f->qlf_len == 1);
2532
2533 error = xfs_read_buf(mp, mp->m_ddev_targp,
2534 dq_f->qlf_blkno,
2535 XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2536 0, &bp);
2537 if (error) {
2538 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2539 bp, dq_f->qlf_blkno);
2540 return error;
2541 }
2542 ASSERT(bp);
2543 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2544
2545 /*
2546 * At least the magic num portion should be on disk because this
2547 * was among a chunk of dquots created earlier, and we did some
2548 * minimal initialization then.
2549 */
2550 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2551 "xlog_recover_dquot_pass2");
2552 if (error) {
2553 xfs_buf_relse(bp);
2554 return XFS_ERROR(EIO);
2555 }
2556
2557 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2558
2559 ASSERT(dq_f->qlf_size == 2);
2560 ASSERT(bp->b_target->bt_mount == mp);
2561 bp->b_iodone = xlog_recover_iodone;
2562 xfs_bdwrite(mp, bp);
2563
2564 return (0);
2565 }
2566
2567 /*
2568 * This routine is called to create an in-core extent free intent
2569 * item from the efi format structure which was logged on disk.
2570 * It allocates an in-core efi, copies the extents from the format
2571 * structure into it, and adds the efi to the AIL with the given
2572 * LSN.
2573 */
2574 STATIC int
2575 xlog_recover_efi_pass2(
2576 xlog_t *log,
2577 xlog_recover_item_t *item,
2578 xfs_lsn_t lsn)
2579 {
2580 int error;
2581 xfs_mount_t *mp = log->l_mp;
2582 xfs_efi_log_item_t *efip;
2583 xfs_efi_log_format_t *efi_formatp;
2584
2585 efi_formatp = item->ri_buf[0].i_addr;
2586
2587 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2588 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2589 &(efip->efi_format)))) {
2590 xfs_efi_item_free(efip);
2591 return error;
2592 }
2593 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2594
2595 spin_lock(&log->l_ailp->xa_lock);
2596 /*
2597 * xfs_trans_ail_update() drops the AIL lock.
2598 */
2599 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2600 return 0;
2601 }
2602
2603
2604 /*
2605 * This routine is called when an efd format structure is found in
2606 * a committed transaction in the log. It's purpose is to cancel
2607 * the corresponding efi if it was still in the log. To do this
2608 * it searches the AIL for the efi with an id equal to that in the
2609 * efd format structure. If we find it, we remove the efi from the
2610 * AIL and free it.
2611 */
2612 STATIC int
2613 xlog_recover_efd_pass2(
2614 xlog_t *log,
2615 xlog_recover_item_t *item)
2616 {
2617 xfs_efd_log_format_t *efd_formatp;
2618 xfs_efi_log_item_t *efip = NULL;
2619 xfs_log_item_t *lip;
2620 __uint64_t efi_id;
2621 struct xfs_ail_cursor cur;
2622 struct xfs_ail *ailp = log->l_ailp;
2623
2624 efd_formatp = item->ri_buf[0].i_addr;
2625 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2626 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2627 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2628 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2629 efi_id = efd_formatp->efd_efi_id;
2630
2631 /*
2632 * Search for the efi with the id in the efd format structure
2633 * in the AIL.
2634 */
2635 spin_lock(&ailp->xa_lock);
2636 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2637 while (lip != NULL) {
2638 if (lip->li_type == XFS_LI_EFI) {
2639 efip = (xfs_efi_log_item_t *)lip;
2640 if (efip->efi_format.efi_id == efi_id) {
2641 /*
2642 * xfs_trans_ail_delete() drops the
2643 * AIL lock.
2644 */
2645 xfs_trans_ail_delete(ailp, lip);
2646 xfs_efi_item_free(efip);
2647 spin_lock(&ailp->xa_lock);
2648 break;
2649 }
2650 }
2651 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2652 }
2653 xfs_trans_ail_cursor_done(ailp, &cur);
2654 spin_unlock(&ailp->xa_lock);
2655
2656 return 0;
2657 }
2658
2659 /*
2660 * Free up any resources allocated by the transaction
2661 *
2662 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2663 */
2664 STATIC void
2665 xlog_recover_free_trans(
2666 struct xlog_recover *trans)
2667 {
2668 xlog_recover_item_t *item, *n;
2669 int i;
2670
2671 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2672 /* Free the regions in the item. */
2673 list_del(&item->ri_list);
2674 for (i = 0; i < item->ri_cnt; i++)
2675 kmem_free(item->ri_buf[i].i_addr);
2676 /* Free the item itself */
2677 kmem_free(item->ri_buf);
2678 kmem_free(item);
2679 }
2680 /* Free the transaction recover structure */
2681 kmem_free(trans);
2682 }
2683
2684 STATIC int
2685 xlog_recover_commit_pass1(
2686 struct log *log,
2687 struct xlog_recover *trans,
2688 xlog_recover_item_t *item)
2689 {
2690 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2691
2692 switch (ITEM_TYPE(item)) {
2693 case XFS_LI_BUF:
2694 return xlog_recover_buffer_pass1(log, item);
2695 case XFS_LI_QUOTAOFF:
2696 return xlog_recover_quotaoff_pass1(log, item);
2697 case XFS_LI_INODE:
2698 case XFS_LI_EFI:
2699 case XFS_LI_EFD:
2700 case XFS_LI_DQUOT:
2701 /* nothing to do in pass 1 */
2702 return 0;
2703 default:
2704 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2705 __func__, ITEM_TYPE(item));
2706 ASSERT(0);
2707 return XFS_ERROR(EIO);
2708 }
2709 }
2710
2711 STATIC int
2712 xlog_recover_commit_pass2(
2713 struct log *log,
2714 struct xlog_recover *trans,
2715 xlog_recover_item_t *item)
2716 {
2717 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2718
2719 switch (ITEM_TYPE(item)) {
2720 case XFS_LI_BUF:
2721 return xlog_recover_buffer_pass2(log, item);
2722 case XFS_LI_INODE:
2723 return xlog_recover_inode_pass2(log, item);
2724 case XFS_LI_EFI:
2725 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2726 case XFS_LI_EFD:
2727 return xlog_recover_efd_pass2(log, item);
2728 case XFS_LI_DQUOT:
2729 return xlog_recover_dquot_pass2(log, item);
2730 case XFS_LI_QUOTAOFF:
2731 /* nothing to do in pass2 */
2732 return 0;
2733 default:
2734 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2735 __func__, ITEM_TYPE(item));
2736 ASSERT(0);
2737 return XFS_ERROR(EIO);
2738 }
2739 }
2740
2741 /*
2742 * Perform the transaction.
2743 *
2744 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2745 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2746 */
2747 STATIC int
2748 xlog_recover_commit_trans(
2749 struct log *log,
2750 struct xlog_recover *trans,
2751 int pass)
2752 {
2753 int error = 0;
2754 xlog_recover_item_t *item;
2755
2756 hlist_del(&trans->r_list);
2757
2758 error = xlog_recover_reorder_trans(log, trans, pass);
2759 if (error)
2760 return error;
2761
2762 list_for_each_entry(item, &trans->r_itemq, ri_list) {
2763 if (pass == XLOG_RECOVER_PASS1)
2764 error = xlog_recover_commit_pass1(log, trans, item);
2765 else
2766 error = xlog_recover_commit_pass2(log, trans, item);
2767 if (error)
2768 return error;
2769 }
2770
2771 xlog_recover_free_trans(trans);
2772 return 0;
2773 }
2774
2775 STATIC int
2776 xlog_recover_unmount_trans(
2777 struct log *log,
2778 xlog_recover_t *trans)
2779 {
2780 /* Do nothing now */
2781 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2782 return 0;
2783 }
2784
2785 /*
2786 * There are two valid states of the r_state field. 0 indicates that the
2787 * transaction structure is in a normal state. We have either seen the
2788 * start of the transaction or the last operation we added was not a partial
2789 * operation. If the last operation we added to the transaction was a
2790 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2791 *
2792 * NOTE: skip LRs with 0 data length.
2793 */
2794 STATIC int
2795 xlog_recover_process_data(
2796 xlog_t *log,
2797 struct hlist_head rhash[],
2798 xlog_rec_header_t *rhead,
2799 xfs_caddr_t dp,
2800 int pass)
2801 {
2802 xfs_caddr_t lp;
2803 int num_logops;
2804 xlog_op_header_t *ohead;
2805 xlog_recover_t *trans;
2806 xlog_tid_t tid;
2807 int error;
2808 unsigned long hash;
2809 uint flags;
2810
2811 lp = dp + be32_to_cpu(rhead->h_len);
2812 num_logops = be32_to_cpu(rhead->h_num_logops);
2813
2814 /* check the log format matches our own - else we can't recover */
2815 if (xlog_header_check_recover(log->l_mp, rhead))
2816 return (XFS_ERROR(EIO));
2817
2818 while ((dp < lp) && num_logops) {
2819 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2820 ohead = (xlog_op_header_t *)dp;
2821 dp += sizeof(xlog_op_header_t);
2822 if (ohead->oh_clientid != XFS_TRANSACTION &&
2823 ohead->oh_clientid != XFS_LOG) {
2824 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2825 __func__, ohead->oh_clientid);
2826 ASSERT(0);
2827 return (XFS_ERROR(EIO));
2828 }
2829 tid = be32_to_cpu(ohead->oh_tid);
2830 hash = XLOG_RHASH(tid);
2831 trans = xlog_recover_find_tid(&rhash[hash], tid);
2832 if (trans == NULL) { /* not found; add new tid */
2833 if (ohead->oh_flags & XLOG_START_TRANS)
2834 xlog_recover_new_tid(&rhash[hash], tid,
2835 be64_to_cpu(rhead->h_lsn));
2836 } else {
2837 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2838 xfs_warn(log->l_mp, "%s: bad length 0x%x",
2839 __func__, be32_to_cpu(ohead->oh_len));
2840 WARN_ON(1);
2841 return (XFS_ERROR(EIO));
2842 }
2843 flags = ohead->oh_flags & ~XLOG_END_TRANS;
2844 if (flags & XLOG_WAS_CONT_TRANS)
2845 flags &= ~XLOG_CONTINUE_TRANS;
2846 switch (flags) {
2847 case XLOG_COMMIT_TRANS:
2848 error = xlog_recover_commit_trans(log,
2849 trans, pass);
2850 break;
2851 case XLOG_UNMOUNT_TRANS:
2852 error = xlog_recover_unmount_trans(log, trans);
2853 break;
2854 case XLOG_WAS_CONT_TRANS:
2855 error = xlog_recover_add_to_cont_trans(log,
2856 trans, dp,
2857 be32_to_cpu(ohead->oh_len));
2858 break;
2859 case XLOG_START_TRANS:
2860 xfs_warn(log->l_mp, "%s: bad transaction",
2861 __func__);
2862 ASSERT(0);
2863 error = XFS_ERROR(EIO);
2864 break;
2865 case 0:
2866 case XLOG_CONTINUE_TRANS:
2867 error = xlog_recover_add_to_trans(log, trans,
2868 dp, be32_to_cpu(ohead->oh_len));
2869 break;
2870 default:
2871 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
2872 __func__, flags);
2873 ASSERT(0);
2874 error = XFS_ERROR(EIO);
2875 break;
2876 }
2877 if (error)
2878 return error;
2879 }
2880 dp += be32_to_cpu(ohead->oh_len);
2881 num_logops--;
2882 }
2883 return 0;
2884 }
2885
2886 /*
2887 * Process an extent free intent item that was recovered from
2888 * the log. We need to free the extents that it describes.
2889 */
2890 STATIC int
2891 xlog_recover_process_efi(
2892 xfs_mount_t *mp,
2893 xfs_efi_log_item_t *efip)
2894 {
2895 xfs_efd_log_item_t *efdp;
2896 xfs_trans_t *tp;
2897 int i;
2898 int error = 0;
2899 xfs_extent_t *extp;
2900 xfs_fsblock_t startblock_fsb;
2901
2902 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
2903
2904 /*
2905 * First check the validity of the extents described by the
2906 * EFI. If any are bad, then assume that all are bad and
2907 * just toss the EFI.
2908 */
2909 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2910 extp = &(efip->efi_format.efi_extents[i]);
2911 startblock_fsb = XFS_BB_TO_FSB(mp,
2912 XFS_FSB_TO_DADDR(mp, extp->ext_start));
2913 if ((startblock_fsb == 0) ||
2914 (extp->ext_len == 0) ||
2915 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2916 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
2917 /*
2918 * This will pull the EFI from the AIL and
2919 * free the memory associated with it.
2920 */
2921 xfs_efi_release(efip, efip->efi_format.efi_nextents);
2922 return XFS_ERROR(EIO);
2923 }
2924 }
2925
2926 tp = xfs_trans_alloc(mp, 0);
2927 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
2928 if (error)
2929 goto abort_error;
2930 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
2931
2932 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2933 extp = &(efip->efi_format.efi_extents[i]);
2934 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
2935 if (error)
2936 goto abort_error;
2937 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
2938 extp->ext_len);
2939 }
2940
2941 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
2942 error = xfs_trans_commit(tp, 0);
2943 return error;
2944
2945 abort_error:
2946 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
2947 return error;
2948 }
2949
2950 /*
2951 * When this is called, all of the EFIs which did not have
2952 * corresponding EFDs should be in the AIL. What we do now
2953 * is free the extents associated with each one.
2954 *
2955 * Since we process the EFIs in normal transactions, they
2956 * will be removed at some point after the commit. This prevents
2957 * us from just walking down the list processing each one.
2958 * We'll use a flag in the EFI to skip those that we've already
2959 * processed and use the AIL iteration mechanism's generation
2960 * count to try to speed this up at least a bit.
2961 *
2962 * When we start, we know that the EFIs are the only things in
2963 * the AIL. As we process them, however, other items are added
2964 * to the AIL. Since everything added to the AIL must come after
2965 * everything already in the AIL, we stop processing as soon as
2966 * we see something other than an EFI in the AIL.
2967 */
2968 STATIC int
2969 xlog_recover_process_efis(
2970 xlog_t *log)
2971 {
2972 xfs_log_item_t *lip;
2973 xfs_efi_log_item_t *efip;
2974 int error = 0;
2975 struct xfs_ail_cursor cur;
2976 struct xfs_ail *ailp;
2977
2978 ailp = log->l_ailp;
2979 spin_lock(&ailp->xa_lock);
2980 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2981 while (lip != NULL) {
2982 /*
2983 * We're done when we see something other than an EFI.
2984 * There should be no EFIs left in the AIL now.
2985 */
2986 if (lip->li_type != XFS_LI_EFI) {
2987 #ifdef DEBUG
2988 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2989 ASSERT(lip->li_type != XFS_LI_EFI);
2990 #endif
2991 break;
2992 }
2993
2994 /*
2995 * Skip EFIs that we've already processed.
2996 */
2997 efip = (xfs_efi_log_item_t *)lip;
2998 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
2999 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3000 continue;
3001 }
3002
3003 spin_unlock(&ailp->xa_lock);
3004 error = xlog_recover_process_efi(log->l_mp, efip);
3005 spin_lock(&ailp->xa_lock);
3006 if (error)
3007 goto out;
3008 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3009 }
3010 out:
3011 xfs_trans_ail_cursor_done(ailp, &cur);
3012 spin_unlock(&ailp->xa_lock);
3013 return error;
3014 }
3015
3016 /*
3017 * This routine performs a transaction to null out a bad inode pointer
3018 * in an agi unlinked inode hash bucket.
3019 */
3020 STATIC void
3021 xlog_recover_clear_agi_bucket(
3022 xfs_mount_t *mp,
3023 xfs_agnumber_t agno,
3024 int bucket)
3025 {
3026 xfs_trans_t *tp;
3027 xfs_agi_t *agi;
3028 xfs_buf_t *agibp;
3029 int offset;
3030 int error;
3031
3032 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3033 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3034 0, 0, 0);
3035 if (error)
3036 goto out_abort;
3037
3038 error = xfs_read_agi(mp, tp, agno, &agibp);
3039 if (error)
3040 goto out_abort;
3041
3042 agi = XFS_BUF_TO_AGI(agibp);
3043 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3044 offset = offsetof(xfs_agi_t, agi_unlinked) +
3045 (sizeof(xfs_agino_t) * bucket);
3046 xfs_trans_log_buf(tp, agibp, offset,
3047 (offset + sizeof(xfs_agino_t) - 1));
3048
3049 error = xfs_trans_commit(tp, 0);
3050 if (error)
3051 goto out_error;
3052 return;
3053
3054 out_abort:
3055 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3056 out_error:
3057 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3058 return;
3059 }
3060
3061 STATIC xfs_agino_t
3062 xlog_recover_process_one_iunlink(
3063 struct xfs_mount *mp,
3064 xfs_agnumber_t agno,
3065 xfs_agino_t agino,
3066 int bucket)
3067 {
3068 struct xfs_buf *ibp;
3069 struct xfs_dinode *dip;
3070 struct xfs_inode *ip;
3071 xfs_ino_t ino;
3072 int error;
3073
3074 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3075 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3076 if (error)
3077 goto fail;
3078
3079 /*
3080 * Get the on disk inode to find the next inode in the bucket.
3081 */
3082 error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
3083 if (error)
3084 goto fail_iput;
3085
3086 ASSERT(ip->i_d.di_nlink == 0);
3087 ASSERT(ip->i_d.di_mode != 0);
3088
3089 /* setup for the next pass */
3090 agino = be32_to_cpu(dip->di_next_unlinked);
3091 xfs_buf_relse(ibp);
3092
3093 /*
3094 * Prevent any DMAPI event from being sent when the reference on
3095 * the inode is dropped.
3096 */
3097 ip->i_d.di_dmevmask = 0;
3098
3099 IRELE(ip);
3100 return agino;
3101
3102 fail_iput:
3103 IRELE(ip);
3104 fail:
3105 /*
3106 * We can't read in the inode this bucket points to, or this inode
3107 * is messed up. Just ditch this bucket of inodes. We will lose
3108 * some inodes and space, but at least we won't hang.
3109 *
3110 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3111 * clear the inode pointer in the bucket.
3112 */
3113 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3114 return NULLAGINO;
3115 }
3116
3117 /*
3118 * xlog_iunlink_recover
3119 *
3120 * This is called during recovery to process any inodes which
3121 * we unlinked but not freed when the system crashed. These
3122 * inodes will be on the lists in the AGI blocks. What we do
3123 * here is scan all the AGIs and fully truncate and free any
3124 * inodes found on the lists. Each inode is removed from the
3125 * lists when it has been fully truncated and is freed. The
3126 * freeing of the inode and its removal from the list must be
3127 * atomic.
3128 */
3129 STATIC void
3130 xlog_recover_process_iunlinks(
3131 xlog_t *log)
3132 {
3133 xfs_mount_t *mp;
3134 xfs_agnumber_t agno;
3135 xfs_agi_t *agi;
3136 xfs_buf_t *agibp;
3137 xfs_agino_t agino;
3138 int bucket;
3139 int error;
3140 uint mp_dmevmask;
3141
3142 mp = log->l_mp;
3143
3144 /*
3145 * Prevent any DMAPI event from being sent while in this function.
3146 */
3147 mp_dmevmask = mp->m_dmevmask;
3148 mp->m_dmevmask = 0;
3149
3150 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3151 /*
3152 * Find the agi for this ag.
3153 */
3154 error = xfs_read_agi(mp, NULL, agno, &agibp);
3155 if (error) {
3156 /*
3157 * AGI is b0rked. Don't process it.
3158 *
3159 * We should probably mark the filesystem as corrupt
3160 * after we've recovered all the ag's we can....
3161 */
3162 continue;
3163 }
3164 agi = XFS_BUF_TO_AGI(agibp);
3165
3166 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3167 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3168 while (agino != NULLAGINO) {
3169 /*
3170 * Release the agi buffer so that it can
3171 * be acquired in the normal course of the
3172 * transaction to truncate and free the inode.
3173 */
3174 xfs_buf_relse(agibp);
3175
3176 agino = xlog_recover_process_one_iunlink(mp,
3177 agno, agino, bucket);
3178
3179 /*
3180 * Reacquire the agibuffer and continue around
3181 * the loop. This should never fail as we know
3182 * the buffer was good earlier on.
3183 */
3184 error = xfs_read_agi(mp, NULL, agno, &agibp);
3185 ASSERT(error == 0);
3186 agi = XFS_BUF_TO_AGI(agibp);
3187 }
3188 }
3189
3190 /*
3191 * Release the buffer for the current agi so we can
3192 * go on to the next one.
3193 */
3194 xfs_buf_relse(agibp);
3195 }
3196
3197 mp->m_dmevmask = mp_dmevmask;
3198 }
3199
3200
3201 #ifdef DEBUG
3202 STATIC void
3203 xlog_pack_data_checksum(
3204 xlog_t *log,
3205 xlog_in_core_t *iclog,
3206 int size)
3207 {
3208 int i;
3209 __be32 *up;
3210 uint chksum = 0;
3211
3212 up = (__be32 *)iclog->ic_datap;
3213 /* divide length by 4 to get # words */
3214 for (i = 0; i < (size >> 2); i++) {
3215 chksum ^= be32_to_cpu(*up);
3216 up++;
3217 }
3218 iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3219 }
3220 #else
3221 #define xlog_pack_data_checksum(log, iclog, size)
3222 #endif
3223
3224 /*
3225 * Stamp cycle number in every block
3226 */
3227 void
3228 xlog_pack_data(
3229 xlog_t *log,
3230 xlog_in_core_t *iclog,
3231 int roundoff)
3232 {
3233 int i, j, k;
3234 int size = iclog->ic_offset + roundoff;
3235 __be32 cycle_lsn;
3236 xfs_caddr_t dp;
3237
3238 xlog_pack_data_checksum(log, iclog, size);
3239
3240 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3241
3242 dp = iclog->ic_datap;
3243 for (i = 0; i < BTOBB(size) &&
3244 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3245 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3246 *(__be32 *)dp = cycle_lsn;
3247 dp += BBSIZE;
3248 }
3249
3250 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3251 xlog_in_core_2_t *xhdr = iclog->ic_data;
3252
3253 for ( ; i < BTOBB(size); i++) {
3254 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3255 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3256 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3257 *(__be32 *)dp = cycle_lsn;
3258 dp += BBSIZE;
3259 }
3260
3261 for (i = 1; i < log->l_iclog_heads; i++) {
3262 xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3263 }
3264 }
3265 }
3266
3267 STATIC void
3268 xlog_unpack_data(
3269 xlog_rec_header_t *rhead,
3270 xfs_caddr_t dp,
3271 xlog_t *log)
3272 {
3273 int i, j, k;
3274
3275 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3276 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3277 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3278 dp += BBSIZE;
3279 }
3280
3281 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3282 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3283 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3284 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3285 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3286 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3287 dp += BBSIZE;
3288 }
3289 }
3290 }
3291
3292 STATIC int
3293 xlog_valid_rec_header(
3294 xlog_t *log,
3295 xlog_rec_header_t *rhead,
3296 xfs_daddr_t blkno)
3297 {
3298 int hlen;
3299
3300 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3301 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3302 XFS_ERRLEVEL_LOW, log->l_mp);
3303 return XFS_ERROR(EFSCORRUPTED);
3304 }
3305 if (unlikely(
3306 (!rhead->h_version ||
3307 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3308 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3309 __func__, be32_to_cpu(rhead->h_version));
3310 return XFS_ERROR(EIO);
3311 }
3312
3313 /* LR body must have data or it wouldn't have been written */
3314 hlen = be32_to_cpu(rhead->h_len);
3315 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3316 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3317 XFS_ERRLEVEL_LOW, log->l_mp);
3318 return XFS_ERROR(EFSCORRUPTED);
3319 }
3320 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3321 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3322 XFS_ERRLEVEL_LOW, log->l_mp);
3323 return XFS_ERROR(EFSCORRUPTED);
3324 }
3325 return 0;
3326 }
3327
3328 /*
3329 * Read the log from tail to head and process the log records found.
3330 * Handle the two cases where the tail and head are in the same cycle
3331 * and where the active portion of the log wraps around the end of
3332 * the physical log separately. The pass parameter is passed through
3333 * to the routines called to process the data and is not looked at
3334 * here.
3335 */
3336 STATIC int
3337 xlog_do_recovery_pass(
3338 xlog_t *log,
3339 xfs_daddr_t head_blk,
3340 xfs_daddr_t tail_blk,
3341 int pass)
3342 {
3343 xlog_rec_header_t *rhead;
3344 xfs_daddr_t blk_no;
3345 xfs_caddr_t offset;
3346 xfs_buf_t *hbp, *dbp;
3347 int error = 0, h_size;
3348 int bblks, split_bblks;
3349 int hblks, split_hblks, wrapped_hblks;
3350 struct hlist_head rhash[XLOG_RHASH_SIZE];
3351
3352 ASSERT(head_blk != tail_blk);
3353
3354 /*
3355 * Read the header of the tail block and get the iclog buffer size from
3356 * h_size. Use this to tell how many sectors make up the log header.
3357 */
3358 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3359 /*
3360 * When using variable length iclogs, read first sector of
3361 * iclog header and extract the header size from it. Get a
3362 * new hbp that is the correct size.
3363 */
3364 hbp = xlog_get_bp(log, 1);
3365 if (!hbp)
3366 return ENOMEM;
3367
3368 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3369 if (error)
3370 goto bread_err1;
3371
3372 rhead = (xlog_rec_header_t *)offset;
3373 error = xlog_valid_rec_header(log, rhead, tail_blk);
3374 if (error)
3375 goto bread_err1;
3376 h_size = be32_to_cpu(rhead->h_size);
3377 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3378 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3379 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3380 if (h_size % XLOG_HEADER_CYCLE_SIZE)
3381 hblks++;
3382 xlog_put_bp(hbp);
3383 hbp = xlog_get_bp(log, hblks);
3384 } else {
3385 hblks = 1;
3386 }
3387 } else {
3388 ASSERT(log->l_sectBBsize == 1);
3389 hblks = 1;
3390 hbp = xlog_get_bp(log, 1);
3391 h_size = XLOG_BIG_RECORD_BSIZE;
3392 }
3393
3394 if (!hbp)
3395 return ENOMEM;
3396 dbp = xlog_get_bp(log, BTOBB(h_size));
3397 if (!dbp) {
3398 xlog_put_bp(hbp);
3399 return ENOMEM;
3400 }
3401
3402 memset(rhash, 0, sizeof(rhash));
3403 if (tail_blk <= head_blk) {
3404 for (blk_no = tail_blk; blk_no < head_blk; ) {
3405 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3406 if (error)
3407 goto bread_err2;
3408
3409 rhead = (xlog_rec_header_t *)offset;
3410 error = xlog_valid_rec_header(log, rhead, blk_no);
3411 if (error)
3412 goto bread_err2;
3413
3414 /* blocks in data section */
3415 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3416 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3417 &offset);
3418 if (error)
3419 goto bread_err2;
3420
3421 xlog_unpack_data(rhead, offset, log);
3422 if ((error = xlog_recover_process_data(log,
3423 rhash, rhead, offset, pass)))
3424 goto bread_err2;
3425 blk_no += bblks + hblks;
3426 }
3427 } else {
3428 /*
3429 * Perform recovery around the end of the physical log.
3430 * When the head is not on the same cycle number as the tail,
3431 * we can't do a sequential recovery as above.
3432 */
3433 blk_no = tail_blk;
3434 while (blk_no < log->l_logBBsize) {
3435 /*
3436 * Check for header wrapping around physical end-of-log
3437 */
3438 offset = XFS_BUF_PTR(hbp);
3439 split_hblks = 0;
3440 wrapped_hblks = 0;
3441 if (blk_no + hblks <= log->l_logBBsize) {
3442 /* Read header in one read */
3443 error = xlog_bread(log, blk_no, hblks, hbp,
3444 &offset);
3445 if (error)
3446 goto bread_err2;
3447 } else {
3448 /* This LR is split across physical log end */
3449 if (blk_no != log->l_logBBsize) {
3450 /* some data before physical log end */
3451 ASSERT(blk_no <= INT_MAX);
3452 split_hblks = log->l_logBBsize - (int)blk_no;
3453 ASSERT(split_hblks > 0);
3454 error = xlog_bread(log, blk_no,
3455 split_hblks, hbp,
3456 &offset);
3457 if (error)
3458 goto bread_err2;
3459 }
3460
3461 /*
3462 * Note: this black magic still works with
3463 * large sector sizes (non-512) only because:
3464 * - we increased the buffer size originally
3465 * by 1 sector giving us enough extra space
3466 * for the second read;
3467 * - the log start is guaranteed to be sector
3468 * aligned;
3469 * - we read the log end (LR header start)
3470 * _first_, then the log start (LR header end)
3471 * - order is important.
3472 */
3473 wrapped_hblks = hblks - split_hblks;
3474 error = xlog_bread_offset(log, 0,
3475 wrapped_hblks, hbp,
3476 offset + BBTOB(split_hblks));
3477 if (error)
3478 goto bread_err2;
3479 }
3480 rhead = (xlog_rec_header_t *)offset;
3481 error = xlog_valid_rec_header(log, rhead,
3482 split_hblks ? blk_no : 0);
3483 if (error)
3484 goto bread_err2;
3485
3486 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3487 blk_no += hblks;
3488
3489 /* Read in data for log record */
3490 if (blk_no + bblks <= log->l_logBBsize) {
3491 error = xlog_bread(log, blk_no, bblks, dbp,
3492 &offset);
3493 if (error)
3494 goto bread_err2;
3495 } else {
3496 /* This log record is split across the
3497 * physical end of log */
3498 offset = XFS_BUF_PTR(dbp);
3499 split_bblks = 0;
3500 if (blk_no != log->l_logBBsize) {
3501 /* some data is before the physical
3502 * end of log */
3503 ASSERT(!wrapped_hblks);
3504 ASSERT(blk_no <= INT_MAX);
3505 split_bblks =
3506 log->l_logBBsize - (int)blk_no;
3507 ASSERT(split_bblks > 0);
3508 error = xlog_bread(log, blk_no,
3509 split_bblks, dbp,
3510 &offset);
3511 if (error)
3512 goto bread_err2;
3513 }
3514
3515 /*
3516 * Note: this black magic still works with
3517 * large sector sizes (non-512) only because:
3518 * - we increased the buffer size originally
3519 * by 1 sector giving us enough extra space
3520 * for the second read;
3521 * - the log start is guaranteed to be sector
3522 * aligned;
3523 * - we read the log end (LR header start)
3524 * _first_, then the log start (LR header end)
3525 * - order is important.
3526 */
3527 error = xlog_bread_offset(log, 0,
3528 bblks - split_bblks, hbp,
3529 offset + BBTOB(split_bblks));
3530 if (error)
3531 goto bread_err2;
3532 }
3533 xlog_unpack_data(rhead, offset, log);
3534 if ((error = xlog_recover_process_data(log, rhash,
3535 rhead, offset, pass)))
3536 goto bread_err2;
3537 blk_no += bblks;
3538 }
3539
3540 ASSERT(blk_no >= log->l_logBBsize);
3541 blk_no -= log->l_logBBsize;
3542
3543 /* read first part of physical log */
3544 while (blk_no < head_blk) {
3545 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3546 if (error)
3547 goto bread_err2;
3548
3549 rhead = (xlog_rec_header_t *)offset;
3550 error = xlog_valid_rec_header(log, rhead, blk_no);
3551 if (error)
3552 goto bread_err2;
3553
3554 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3555 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3556 &offset);
3557 if (error)
3558 goto bread_err2;
3559
3560 xlog_unpack_data(rhead, offset, log);
3561 if ((error = xlog_recover_process_data(log, rhash,
3562 rhead, offset, pass)))
3563 goto bread_err2;
3564 blk_no += bblks + hblks;
3565 }
3566 }
3567
3568 bread_err2:
3569 xlog_put_bp(dbp);
3570 bread_err1:
3571 xlog_put_bp(hbp);
3572 return error;
3573 }
3574
3575 /*
3576 * Do the recovery of the log. We actually do this in two phases.
3577 * The two passes are necessary in order to implement the function
3578 * of cancelling a record written into the log. The first pass
3579 * determines those things which have been cancelled, and the
3580 * second pass replays log items normally except for those which
3581 * have been cancelled. The handling of the replay and cancellations
3582 * takes place in the log item type specific routines.
3583 *
3584 * The table of items which have cancel records in the log is allocated
3585 * and freed at this level, since only here do we know when all of
3586 * the log recovery has been completed.
3587 */
3588 STATIC int
3589 xlog_do_log_recovery(
3590 xlog_t *log,
3591 xfs_daddr_t head_blk,
3592 xfs_daddr_t tail_blk)
3593 {
3594 int error, i;
3595
3596 ASSERT(head_blk != tail_blk);
3597
3598 /*
3599 * First do a pass to find all of the cancelled buf log items.
3600 * Store them in the buf_cancel_table for use in the second pass.
3601 */
3602 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3603 sizeof(struct list_head),
3604 KM_SLEEP);
3605 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3606 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3607
3608 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3609 XLOG_RECOVER_PASS1);
3610 if (error != 0) {
3611 kmem_free(log->l_buf_cancel_table);
3612 log->l_buf_cancel_table = NULL;
3613 return error;
3614 }
3615 /*
3616 * Then do a second pass to actually recover the items in the log.
3617 * When it is complete free the table of buf cancel items.
3618 */
3619 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3620 XLOG_RECOVER_PASS2);
3621 #ifdef DEBUG
3622 if (!error) {
3623 int i;
3624
3625 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3626 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3627 }
3628 #endif /* DEBUG */
3629
3630 kmem_free(log->l_buf_cancel_table);
3631 log->l_buf_cancel_table = NULL;
3632
3633 return error;
3634 }
3635
3636 /*
3637 * Do the actual recovery
3638 */
3639 STATIC int
3640 xlog_do_recover(
3641 xlog_t *log,
3642 xfs_daddr_t head_blk,
3643 xfs_daddr_t tail_blk)
3644 {
3645 int error;
3646 xfs_buf_t *bp;
3647 xfs_sb_t *sbp;
3648
3649 /*
3650 * First replay the images in the log.
3651 */
3652 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3653 if (error) {
3654 return error;
3655 }
3656
3657 XFS_bflush(log->l_mp->m_ddev_targp);
3658
3659 /*
3660 * If IO errors happened during recovery, bail out.
3661 */
3662 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3663 return (EIO);
3664 }
3665
3666 /*
3667 * We now update the tail_lsn since much of the recovery has completed
3668 * and there may be space available to use. If there were no extent
3669 * or iunlinks, we can free up the entire log and set the tail_lsn to
3670 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3671 * lsn of the last known good LR on disk. If there are extent frees
3672 * or iunlinks they will have some entries in the AIL; so we look at
3673 * the AIL to determine how to set the tail_lsn.
3674 */
3675 xlog_assign_tail_lsn(log->l_mp);
3676
3677 /*
3678 * Now that we've finished replaying all buffer and inode
3679 * updates, re-read in the superblock.
3680 */
3681 bp = xfs_getsb(log->l_mp, 0);
3682 XFS_BUF_UNDONE(bp);
3683 ASSERT(!(XFS_BUF_ISWRITE(bp)));
3684 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3685 XFS_BUF_READ(bp);
3686 XFS_BUF_UNASYNC(bp);
3687 xfsbdstrat(log->l_mp, bp);
3688 error = xfs_buf_iowait(bp);
3689 if (error) {
3690 xfs_ioerror_alert("xlog_do_recover",
3691 log->l_mp, bp, XFS_BUF_ADDR(bp));
3692 ASSERT(0);
3693 xfs_buf_relse(bp);
3694 return error;
3695 }
3696
3697 /* Convert superblock from on-disk format */
3698 sbp = &log->l_mp->m_sb;
3699 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3700 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3701 ASSERT(xfs_sb_good_version(sbp));
3702 xfs_buf_relse(bp);
3703
3704 /* We've re-read the superblock so re-initialize per-cpu counters */
3705 xfs_icsb_reinit_counters(log->l_mp);
3706
3707 xlog_recover_check_summary(log);
3708
3709 /* Normal transactions can now occur */
3710 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3711 return 0;
3712 }
3713
3714 /*
3715 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3716 *
3717 * Return error or zero.
3718 */
3719 int
3720 xlog_recover(
3721 xlog_t *log)
3722 {
3723 xfs_daddr_t head_blk, tail_blk;
3724 int error;
3725
3726 /* find the tail of the log */
3727 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3728 return error;
3729
3730 if (tail_blk != head_blk) {
3731 /* There used to be a comment here:
3732 *
3733 * disallow recovery on read-only mounts. note -- mount
3734 * checks for ENOSPC and turns it into an intelligent
3735 * error message.
3736 * ...but this is no longer true. Now, unless you specify
3737 * NORECOVERY (in which case this function would never be
3738 * called), we just go ahead and recover. We do this all
3739 * under the vfs layer, so we can get away with it unless
3740 * the device itself is read-only, in which case we fail.
3741 */
3742 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3743 return error;
3744 }
3745
3746 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3747 log->l_mp->m_logname ? log->l_mp->m_logname
3748 : "internal");
3749
3750 error = xlog_do_recover(log, head_blk, tail_blk);
3751 log->l_flags |= XLOG_RECOVERY_NEEDED;
3752 }
3753 return error;
3754 }
3755
3756 /*
3757 * In the first part of recovery we replay inodes and buffers and build
3758 * up the list of extent free items which need to be processed. Here
3759 * we process the extent free items and clean up the on disk unlinked
3760 * inode lists. This is separated from the first part of recovery so
3761 * that the root and real-time bitmap inodes can be read in from disk in
3762 * between the two stages. This is necessary so that we can free space
3763 * in the real-time portion of the file system.
3764 */
3765 int
3766 xlog_recover_finish(
3767 xlog_t *log)
3768 {
3769 /*
3770 * Now we're ready to do the transactions needed for the
3771 * rest of recovery. Start with completing all the extent
3772 * free intent records and then process the unlinked inode
3773 * lists. At this point, we essentially run in normal mode
3774 * except that we're still performing recovery actions
3775 * rather than accepting new requests.
3776 */
3777 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3778 int error;
3779 error = xlog_recover_process_efis(log);
3780 if (error) {
3781 xfs_alert(log->l_mp, "Failed to recover EFIs");
3782 return error;
3783 }
3784 /*
3785 * Sync the log to get all the EFIs out of the AIL.
3786 * This isn't absolutely necessary, but it helps in
3787 * case the unlink transactions would have problems
3788 * pushing the EFIs out of the way.
3789 */
3790 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3791
3792 xlog_recover_process_iunlinks(log);
3793
3794 xlog_recover_check_summary(log);
3795
3796 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3797 log->l_mp->m_logname ? log->l_mp->m_logname
3798 : "internal");
3799 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3800 } else {
3801 xfs_info(log->l_mp, "Ending clean mount");
3802 }
3803 return 0;
3804 }
3805
3806
3807 #if defined(DEBUG)
3808 /*
3809 * Read all of the agf and agi counters and check that they
3810 * are consistent with the superblock counters.
3811 */
3812 void
3813 xlog_recover_check_summary(
3814 xlog_t *log)
3815 {
3816 xfs_mount_t *mp;
3817 xfs_agf_t *agfp;
3818 xfs_buf_t *agfbp;
3819 xfs_buf_t *agibp;
3820 xfs_agnumber_t agno;
3821 __uint64_t freeblks;
3822 __uint64_t itotal;
3823 __uint64_t ifree;
3824 int error;
3825
3826 mp = log->l_mp;
3827
3828 freeblks = 0LL;
3829 itotal = 0LL;
3830 ifree = 0LL;
3831 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3832 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3833 if (error) {
3834 xfs_alert(mp, "%s agf read failed agno %d error %d",
3835 __func__, agno, error);
3836 } else {
3837 agfp = XFS_BUF_TO_AGF(agfbp);
3838 freeblks += be32_to_cpu(agfp->agf_freeblks) +
3839 be32_to_cpu(agfp->agf_flcount);
3840 xfs_buf_relse(agfbp);
3841 }
3842
3843 error = xfs_read_agi(mp, NULL, agno, &agibp);
3844 if (error) {
3845 xfs_alert(mp, "%s agi read failed agno %d error %d",
3846 __func__, agno, error);
3847 } else {
3848 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
3849
3850 itotal += be32_to_cpu(agi->agi_count);
3851 ifree += be32_to_cpu(agi->agi_freecount);
3852 xfs_buf_relse(agibp);
3853 }
3854 }
3855 }
3856 #endif /* DEBUG */
This page took 0.111817 seconds and 5 git commands to generate.