Merge tag 'sound-fix-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[deliverable/linux.git] / fs / btrfs / tree-log.c
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "disk-io.h"
26 #include "locking.h"
27 #include "print-tree.h"
28 #include "backref.h"
29 #include "tree-log.h"
30 #include "hash.h"
31
32 /* magic values for the inode_only field in btrfs_log_inode:
33 *
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 * during log replay
37 */
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40
41 /*
42 * directory trouble cases
43 *
44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
45 * log, we must force a full commit before doing an fsync of the directory
46 * where the unlink was done.
47 * ---> record transid of last unlink/rename per directory
48 *
49 * mkdir foo/some_dir
50 * normal commit
51 * rename foo/some_dir foo2/some_dir
52 * mkdir foo/some_dir
53 * fsync foo/some_dir/some_file
54 *
55 * The fsync above will unlink the original some_dir without recording
56 * it in its new location (foo2). After a crash, some_dir will be gone
57 * unless the fsync of some_file forces a full commit
58 *
59 * 2) we must log any new names for any file or dir that is in the fsync
60 * log. ---> check inode while renaming/linking.
61 *
62 * 2a) we must log any new names for any file or dir during rename
63 * when the directory they are being removed from was logged.
64 * ---> check inode and old parent dir during rename
65 *
66 * 2a is actually the more important variant. With the extra logging
67 * a crash might unlink the old name without recreating the new one
68 *
69 * 3) after a crash, we must go through any directories with a link count
70 * of zero and redo the rm -rf
71 *
72 * mkdir f1/foo
73 * normal commit
74 * rm -rf f1/foo
75 * fsync(f1)
76 *
77 * The directory f1 was fully removed from the FS, but fsync was never
78 * called on f1, only its parent dir. After a crash the rm -rf must
79 * be replayed. This must be able to recurse down the entire
80 * directory tree. The inode link count fixup code takes care of the
81 * ugly details.
82 */
83
84 /*
85 * stages for the tree walking. The first
86 * stage (0) is to only pin down the blocks we find
87 * the second stage (1) is to make sure that all the inodes
88 * we find in the log are created in the subvolume.
89 *
90 * The last stage is to deal with directories and links and extents
91 * and all the other fun semantics
92 */
93 #define LOG_WALK_PIN_ONLY 0
94 #define LOG_WALK_REPLAY_INODES 1
95 #define LOG_WALK_REPLAY_DIR_INDEX 2
96 #define LOG_WALK_REPLAY_ALL 3
97
98 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root, struct inode *inode,
100 int inode_only);
101 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root,
103 struct btrfs_path *path, u64 objectid);
104 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
105 struct btrfs_root *root,
106 struct btrfs_root *log,
107 struct btrfs_path *path,
108 u64 dirid, int del_all);
109
110 /*
111 * tree logging is a special write ahead log used to make sure that
112 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 *
114 * Full tree commits are expensive because they require commonly
115 * modified blocks to be recowed, creating many dirty pages in the
116 * extent tree an 4x-6x higher write load than ext3.
117 *
118 * Instead of doing a tree commit on every fsync, we use the
119 * key ranges and transaction ids to find items for a given file or directory
120 * that have changed in this transaction. Those items are copied into
121 * a special tree (one per subvolume root), that tree is written to disk
122 * and then the fsync is considered complete.
123 *
124 * After a crash, items are copied out of the log-tree back into the
125 * subvolume tree. Any file data extents found are recorded in the extent
126 * allocation tree, and the log-tree freed.
127 *
128 * The log tree is read three times, once to pin down all the extents it is
129 * using in ram and once, once to create all the inodes logged in the tree
130 * and once to do all the other items.
131 */
132
133 /*
134 * start a sub transaction and setup the log tree
135 * this increments the log tree writer count to make the people
136 * syncing the tree wait for us to finish
137 */
138 static int start_log_trans(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root,
140 struct btrfs_log_ctx *ctx)
141 {
142 int index;
143 int ret;
144
145 mutex_lock(&root->log_mutex);
146 if (root->log_root) {
147 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) ==
148 trans->transid) {
149 ret = -EAGAIN;
150 goto out;
151 }
152
153 if (!root->log_start_pid) {
154 root->log_start_pid = current->pid;
155 root->log_multiple_pids = false;
156 } else if (root->log_start_pid != current->pid) {
157 root->log_multiple_pids = true;
158 }
159
160 atomic_inc(&root->log_batch);
161 atomic_inc(&root->log_writers);
162 if (ctx) {
163 index = root->log_transid % 2;
164 list_add_tail(&ctx->list, &root->log_ctxs[index]);
165 ctx->log_transid = root->log_transid;
166 }
167 mutex_unlock(&root->log_mutex);
168 return 0;
169 }
170
171 ret = 0;
172 mutex_lock(&root->fs_info->tree_log_mutex);
173 if (!root->fs_info->log_root_tree)
174 ret = btrfs_init_log_root_tree(trans, root->fs_info);
175 mutex_unlock(&root->fs_info->tree_log_mutex);
176 if (ret)
177 goto out;
178
179 if (!root->log_root) {
180 ret = btrfs_add_log_tree(trans, root);
181 if (ret)
182 goto out;
183 }
184 root->log_multiple_pids = false;
185 root->log_start_pid = current->pid;
186 atomic_inc(&root->log_batch);
187 atomic_inc(&root->log_writers);
188 if (ctx) {
189 index = root->log_transid % 2;
190 list_add_tail(&ctx->list, &root->log_ctxs[index]);
191 ctx->log_transid = root->log_transid;
192 }
193 out:
194 mutex_unlock(&root->log_mutex);
195 return ret;
196 }
197
198 /*
199 * returns 0 if there was a log transaction running and we were able
200 * to join, or returns -ENOENT if there were not transactions
201 * in progress
202 */
203 static int join_running_log_trans(struct btrfs_root *root)
204 {
205 int ret = -ENOENT;
206
207 smp_mb();
208 if (!root->log_root)
209 return -ENOENT;
210
211 mutex_lock(&root->log_mutex);
212 if (root->log_root) {
213 ret = 0;
214 atomic_inc(&root->log_writers);
215 }
216 mutex_unlock(&root->log_mutex);
217 return ret;
218 }
219
220 /*
221 * This either makes the current running log transaction wait
222 * until you call btrfs_end_log_trans() or it makes any future
223 * log transactions wait until you call btrfs_end_log_trans()
224 */
225 int btrfs_pin_log_trans(struct btrfs_root *root)
226 {
227 int ret = -ENOENT;
228
229 mutex_lock(&root->log_mutex);
230 atomic_inc(&root->log_writers);
231 mutex_unlock(&root->log_mutex);
232 return ret;
233 }
234
235 /*
236 * indicate we're done making changes to the log tree
237 * and wake up anyone waiting to do a sync
238 */
239 void btrfs_end_log_trans(struct btrfs_root *root)
240 {
241 if (atomic_dec_and_test(&root->log_writers)) {
242 smp_mb();
243 if (waitqueue_active(&root->log_writer_wait))
244 wake_up(&root->log_writer_wait);
245 }
246 }
247
248
249 /*
250 * the walk control struct is used to pass state down the chain when
251 * processing the log tree. The stage field tells us which part
252 * of the log tree processing we are currently doing. The others
253 * are state fields used for that specific part
254 */
255 struct walk_control {
256 /* should we free the extent on disk when done? This is used
257 * at transaction commit time while freeing a log tree
258 */
259 int free;
260
261 /* should we write out the extent buffer? This is used
262 * while flushing the log tree to disk during a sync
263 */
264 int write;
265
266 /* should we wait for the extent buffer io to finish? Also used
267 * while flushing the log tree to disk for a sync
268 */
269 int wait;
270
271 /* pin only walk, we record which extents on disk belong to the
272 * log trees
273 */
274 int pin;
275
276 /* what stage of the replay code we're currently in */
277 int stage;
278
279 /* the root we are currently replaying */
280 struct btrfs_root *replay_dest;
281
282 /* the trans handle for the current replay */
283 struct btrfs_trans_handle *trans;
284
285 /* the function that gets used to process blocks we find in the
286 * tree. Note the extent_buffer might not be up to date when it is
287 * passed in, and it must be checked or read if you need the data
288 * inside it
289 */
290 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
291 struct walk_control *wc, u64 gen);
292 };
293
294 /*
295 * process_func used to pin down extents, write them or wait on them
296 */
297 static int process_one_buffer(struct btrfs_root *log,
298 struct extent_buffer *eb,
299 struct walk_control *wc, u64 gen)
300 {
301 int ret = 0;
302
303 /*
304 * If this fs is mixed then we need to be able to process the leaves to
305 * pin down any logged extents, so we have to read the block.
306 */
307 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
308 ret = btrfs_read_buffer(eb, gen);
309 if (ret)
310 return ret;
311 }
312
313 if (wc->pin)
314 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
315 eb->start, eb->len);
316
317 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
318 if (wc->pin && btrfs_header_level(eb) == 0)
319 ret = btrfs_exclude_logged_extents(log, eb);
320 if (wc->write)
321 btrfs_write_tree_block(eb);
322 if (wc->wait)
323 btrfs_wait_tree_block_writeback(eb);
324 }
325 return ret;
326 }
327
328 /*
329 * Item overwrite used by replay and tree logging. eb, slot and key all refer
330 * to the src data we are copying out.
331 *
332 * root is the tree we are copying into, and path is a scratch
333 * path for use in this function (it should be released on entry and
334 * will be released on exit).
335 *
336 * If the key is already in the destination tree the existing item is
337 * overwritten. If the existing item isn't big enough, it is extended.
338 * If it is too large, it is truncated.
339 *
340 * If the key isn't in the destination yet, a new item is inserted.
341 */
342 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
343 struct btrfs_root *root,
344 struct btrfs_path *path,
345 struct extent_buffer *eb, int slot,
346 struct btrfs_key *key)
347 {
348 int ret;
349 u32 item_size;
350 u64 saved_i_size = 0;
351 int save_old_i_size = 0;
352 unsigned long src_ptr;
353 unsigned long dst_ptr;
354 int overwrite_root = 0;
355 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356
357 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
358 overwrite_root = 1;
359
360 item_size = btrfs_item_size_nr(eb, slot);
361 src_ptr = btrfs_item_ptr_offset(eb, slot);
362
363 /* look for the key in the destination tree */
364 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
365 if (ret < 0)
366 return ret;
367
368 if (ret == 0) {
369 char *src_copy;
370 char *dst_copy;
371 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
372 path->slots[0]);
373 if (dst_size != item_size)
374 goto insert;
375
376 if (item_size == 0) {
377 btrfs_release_path(path);
378 return 0;
379 }
380 dst_copy = kmalloc(item_size, GFP_NOFS);
381 src_copy = kmalloc(item_size, GFP_NOFS);
382 if (!dst_copy || !src_copy) {
383 btrfs_release_path(path);
384 kfree(dst_copy);
385 kfree(src_copy);
386 return -ENOMEM;
387 }
388
389 read_extent_buffer(eb, src_copy, src_ptr, item_size);
390
391 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
392 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
393 item_size);
394 ret = memcmp(dst_copy, src_copy, item_size);
395
396 kfree(dst_copy);
397 kfree(src_copy);
398 /*
399 * they have the same contents, just return, this saves
400 * us from cowing blocks in the destination tree and doing
401 * extra writes that may not have been done by a previous
402 * sync
403 */
404 if (ret == 0) {
405 btrfs_release_path(path);
406 return 0;
407 }
408
409 /*
410 * We need to load the old nbytes into the inode so when we
411 * replay the extents we've logged we get the right nbytes.
412 */
413 if (inode_item) {
414 struct btrfs_inode_item *item;
415 u64 nbytes;
416 u32 mode;
417
418 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
419 struct btrfs_inode_item);
420 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
421 item = btrfs_item_ptr(eb, slot,
422 struct btrfs_inode_item);
423 btrfs_set_inode_nbytes(eb, item, nbytes);
424
425 /*
426 * If this is a directory we need to reset the i_size to
427 * 0 so that we can set it up properly when replaying
428 * the rest of the items in this log.
429 */
430 mode = btrfs_inode_mode(eb, item);
431 if (S_ISDIR(mode))
432 btrfs_set_inode_size(eb, item, 0);
433 }
434 } else if (inode_item) {
435 struct btrfs_inode_item *item;
436 u32 mode;
437
438 /*
439 * New inode, set nbytes to 0 so that the nbytes comes out
440 * properly when we replay the extents.
441 */
442 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
443 btrfs_set_inode_nbytes(eb, item, 0);
444
445 /*
446 * If this is a directory we need to reset the i_size to 0 so
447 * that we can set it up properly when replaying the rest of
448 * the items in this log.
449 */
450 mode = btrfs_inode_mode(eb, item);
451 if (S_ISDIR(mode))
452 btrfs_set_inode_size(eb, item, 0);
453 }
454 insert:
455 btrfs_release_path(path);
456 /* try to insert the key into the destination tree */
457 ret = btrfs_insert_empty_item(trans, root, path,
458 key, item_size);
459
460 /* make sure any existing item is the correct size */
461 if (ret == -EEXIST) {
462 u32 found_size;
463 found_size = btrfs_item_size_nr(path->nodes[0],
464 path->slots[0]);
465 if (found_size > item_size)
466 btrfs_truncate_item(root, path, item_size, 1);
467 else if (found_size < item_size)
468 btrfs_extend_item(root, path,
469 item_size - found_size);
470 } else if (ret) {
471 return ret;
472 }
473 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
474 path->slots[0]);
475
476 /* don't overwrite an existing inode if the generation number
477 * was logged as zero. This is done when the tree logging code
478 * is just logging an inode to make sure it exists after recovery.
479 *
480 * Also, don't overwrite i_size on directories during replay.
481 * log replay inserts and removes directory items based on the
482 * state of the tree found in the subvolume, and i_size is modified
483 * as it goes
484 */
485 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
486 struct btrfs_inode_item *src_item;
487 struct btrfs_inode_item *dst_item;
488
489 src_item = (struct btrfs_inode_item *)src_ptr;
490 dst_item = (struct btrfs_inode_item *)dst_ptr;
491
492 if (btrfs_inode_generation(eb, src_item) == 0)
493 goto no_copy;
494
495 if (overwrite_root &&
496 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
497 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
498 save_old_i_size = 1;
499 saved_i_size = btrfs_inode_size(path->nodes[0],
500 dst_item);
501 }
502 }
503
504 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
505 src_ptr, item_size);
506
507 if (save_old_i_size) {
508 struct btrfs_inode_item *dst_item;
509 dst_item = (struct btrfs_inode_item *)dst_ptr;
510 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
511 }
512
513 /* make sure the generation is filled in */
514 if (key->type == BTRFS_INODE_ITEM_KEY) {
515 struct btrfs_inode_item *dst_item;
516 dst_item = (struct btrfs_inode_item *)dst_ptr;
517 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
518 btrfs_set_inode_generation(path->nodes[0], dst_item,
519 trans->transid);
520 }
521 }
522 no_copy:
523 btrfs_mark_buffer_dirty(path->nodes[0]);
524 btrfs_release_path(path);
525 return 0;
526 }
527
528 /*
529 * simple helper to read an inode off the disk from a given root
530 * This can only be called for subvolume roots and not for the log
531 */
532 static noinline struct inode *read_one_inode(struct btrfs_root *root,
533 u64 objectid)
534 {
535 struct btrfs_key key;
536 struct inode *inode;
537
538 key.objectid = objectid;
539 key.type = BTRFS_INODE_ITEM_KEY;
540 key.offset = 0;
541 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
542 if (IS_ERR(inode)) {
543 inode = NULL;
544 } else if (is_bad_inode(inode)) {
545 iput(inode);
546 inode = NULL;
547 }
548 return inode;
549 }
550
551 /* replays a single extent in 'eb' at 'slot' with 'key' into the
552 * subvolume 'root'. path is released on entry and should be released
553 * on exit.
554 *
555 * extents in the log tree have not been allocated out of the extent
556 * tree yet. So, this completes the allocation, taking a reference
557 * as required if the extent already exists or creating a new extent
558 * if it isn't in the extent allocation tree yet.
559 *
560 * The extent is inserted into the file, dropping any existing extents
561 * from the file that overlap the new one.
562 */
563 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
564 struct btrfs_root *root,
565 struct btrfs_path *path,
566 struct extent_buffer *eb, int slot,
567 struct btrfs_key *key)
568 {
569 int found_type;
570 u64 extent_end;
571 u64 start = key->offset;
572 u64 nbytes = 0;
573 struct btrfs_file_extent_item *item;
574 struct inode *inode = NULL;
575 unsigned long size;
576 int ret = 0;
577
578 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
579 found_type = btrfs_file_extent_type(eb, item);
580
581 if (found_type == BTRFS_FILE_EXTENT_REG ||
582 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
583 nbytes = btrfs_file_extent_num_bytes(eb, item);
584 extent_end = start + nbytes;
585
586 /*
587 * We don't add to the inodes nbytes if we are prealloc or a
588 * hole.
589 */
590 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
591 nbytes = 0;
592 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
593 size = btrfs_file_extent_inline_len(eb, slot, item);
594 nbytes = btrfs_file_extent_ram_bytes(eb, item);
595 extent_end = ALIGN(start + size, root->sectorsize);
596 } else {
597 ret = 0;
598 goto out;
599 }
600
601 inode = read_one_inode(root, key->objectid);
602 if (!inode) {
603 ret = -EIO;
604 goto out;
605 }
606
607 /*
608 * first check to see if we already have this extent in the
609 * file. This must be done before the btrfs_drop_extents run
610 * so we don't try to drop this extent.
611 */
612 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
613 start, 0);
614
615 if (ret == 0 &&
616 (found_type == BTRFS_FILE_EXTENT_REG ||
617 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
618 struct btrfs_file_extent_item cmp1;
619 struct btrfs_file_extent_item cmp2;
620 struct btrfs_file_extent_item *existing;
621 struct extent_buffer *leaf;
622
623 leaf = path->nodes[0];
624 existing = btrfs_item_ptr(leaf, path->slots[0],
625 struct btrfs_file_extent_item);
626
627 read_extent_buffer(eb, &cmp1, (unsigned long)item,
628 sizeof(cmp1));
629 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
630 sizeof(cmp2));
631
632 /*
633 * we already have a pointer to this exact extent,
634 * we don't have to do anything
635 */
636 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
637 btrfs_release_path(path);
638 goto out;
639 }
640 }
641 btrfs_release_path(path);
642
643 /* drop any overlapping extents */
644 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
645 if (ret)
646 goto out;
647
648 if (found_type == BTRFS_FILE_EXTENT_REG ||
649 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
650 u64 offset;
651 unsigned long dest_offset;
652 struct btrfs_key ins;
653
654 ret = btrfs_insert_empty_item(trans, root, path, key,
655 sizeof(*item));
656 if (ret)
657 goto out;
658 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
659 path->slots[0]);
660 copy_extent_buffer(path->nodes[0], eb, dest_offset,
661 (unsigned long)item, sizeof(*item));
662
663 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
664 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
665 ins.type = BTRFS_EXTENT_ITEM_KEY;
666 offset = key->offset - btrfs_file_extent_offset(eb, item);
667
668 if (ins.objectid > 0) {
669 u64 csum_start;
670 u64 csum_end;
671 LIST_HEAD(ordered_sums);
672 /*
673 * is this extent already allocated in the extent
674 * allocation tree? If so, just add a reference
675 */
676 ret = btrfs_lookup_extent(root, ins.objectid,
677 ins.offset);
678 if (ret == 0) {
679 ret = btrfs_inc_extent_ref(trans, root,
680 ins.objectid, ins.offset,
681 0, root->root_key.objectid,
682 key->objectid, offset, 0);
683 if (ret)
684 goto out;
685 } else {
686 /*
687 * insert the extent pointer in the extent
688 * allocation tree
689 */
690 ret = btrfs_alloc_logged_file_extent(trans,
691 root, root->root_key.objectid,
692 key->objectid, offset, &ins);
693 if (ret)
694 goto out;
695 }
696 btrfs_release_path(path);
697
698 if (btrfs_file_extent_compression(eb, item)) {
699 csum_start = ins.objectid;
700 csum_end = csum_start + ins.offset;
701 } else {
702 csum_start = ins.objectid +
703 btrfs_file_extent_offset(eb, item);
704 csum_end = csum_start +
705 btrfs_file_extent_num_bytes(eb, item);
706 }
707
708 ret = btrfs_lookup_csums_range(root->log_root,
709 csum_start, csum_end - 1,
710 &ordered_sums, 0);
711 if (ret)
712 goto out;
713 while (!list_empty(&ordered_sums)) {
714 struct btrfs_ordered_sum *sums;
715 sums = list_entry(ordered_sums.next,
716 struct btrfs_ordered_sum,
717 list);
718 if (!ret)
719 ret = btrfs_csum_file_blocks(trans,
720 root->fs_info->csum_root,
721 sums);
722 list_del(&sums->list);
723 kfree(sums);
724 }
725 if (ret)
726 goto out;
727 } else {
728 btrfs_release_path(path);
729 }
730 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
731 /* inline extents are easy, we just overwrite them */
732 ret = overwrite_item(trans, root, path, eb, slot, key);
733 if (ret)
734 goto out;
735 }
736
737 inode_add_bytes(inode, nbytes);
738 ret = btrfs_update_inode(trans, root, inode);
739 out:
740 if (inode)
741 iput(inode);
742 return ret;
743 }
744
745 /*
746 * when cleaning up conflicts between the directory names in the
747 * subvolume, directory names in the log and directory names in the
748 * inode back references, we may have to unlink inodes from directories.
749 *
750 * This is a helper function to do the unlink of a specific directory
751 * item
752 */
753 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
754 struct btrfs_root *root,
755 struct btrfs_path *path,
756 struct inode *dir,
757 struct btrfs_dir_item *di)
758 {
759 struct inode *inode;
760 char *name;
761 int name_len;
762 struct extent_buffer *leaf;
763 struct btrfs_key location;
764 int ret;
765
766 leaf = path->nodes[0];
767
768 btrfs_dir_item_key_to_cpu(leaf, di, &location);
769 name_len = btrfs_dir_name_len(leaf, di);
770 name = kmalloc(name_len, GFP_NOFS);
771 if (!name)
772 return -ENOMEM;
773
774 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
775 btrfs_release_path(path);
776
777 inode = read_one_inode(root, location.objectid);
778 if (!inode) {
779 ret = -EIO;
780 goto out;
781 }
782
783 ret = link_to_fixup_dir(trans, root, path, location.objectid);
784 if (ret)
785 goto out;
786
787 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
788 if (ret)
789 goto out;
790 else
791 ret = btrfs_run_delayed_items(trans, root);
792 out:
793 kfree(name);
794 iput(inode);
795 return ret;
796 }
797
798 /*
799 * helper function to see if a given name and sequence number found
800 * in an inode back reference are already in a directory and correctly
801 * point to this inode
802 */
803 static noinline int inode_in_dir(struct btrfs_root *root,
804 struct btrfs_path *path,
805 u64 dirid, u64 objectid, u64 index,
806 const char *name, int name_len)
807 {
808 struct btrfs_dir_item *di;
809 struct btrfs_key location;
810 int match = 0;
811
812 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
813 index, name, name_len, 0);
814 if (di && !IS_ERR(di)) {
815 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
816 if (location.objectid != objectid)
817 goto out;
818 } else
819 goto out;
820 btrfs_release_path(path);
821
822 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
823 if (di && !IS_ERR(di)) {
824 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
825 if (location.objectid != objectid)
826 goto out;
827 } else
828 goto out;
829 match = 1;
830 out:
831 btrfs_release_path(path);
832 return match;
833 }
834
835 /*
836 * helper function to check a log tree for a named back reference in
837 * an inode. This is used to decide if a back reference that is
838 * found in the subvolume conflicts with what we find in the log.
839 *
840 * inode backreferences may have multiple refs in a single item,
841 * during replay we process one reference at a time, and we don't
842 * want to delete valid links to a file from the subvolume if that
843 * link is also in the log.
844 */
845 static noinline int backref_in_log(struct btrfs_root *log,
846 struct btrfs_key *key,
847 u64 ref_objectid,
848 char *name, int namelen)
849 {
850 struct btrfs_path *path;
851 struct btrfs_inode_ref *ref;
852 unsigned long ptr;
853 unsigned long ptr_end;
854 unsigned long name_ptr;
855 int found_name_len;
856 int item_size;
857 int ret;
858 int match = 0;
859
860 path = btrfs_alloc_path();
861 if (!path)
862 return -ENOMEM;
863
864 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
865 if (ret != 0)
866 goto out;
867
868 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
869
870 if (key->type == BTRFS_INODE_EXTREF_KEY) {
871 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
872 name, namelen, NULL))
873 match = 1;
874
875 goto out;
876 }
877
878 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
879 ptr_end = ptr + item_size;
880 while (ptr < ptr_end) {
881 ref = (struct btrfs_inode_ref *)ptr;
882 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
883 if (found_name_len == namelen) {
884 name_ptr = (unsigned long)(ref + 1);
885 ret = memcmp_extent_buffer(path->nodes[0], name,
886 name_ptr, namelen);
887 if (ret == 0) {
888 match = 1;
889 goto out;
890 }
891 }
892 ptr = (unsigned long)(ref + 1) + found_name_len;
893 }
894 out:
895 btrfs_free_path(path);
896 return match;
897 }
898
899 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
900 struct btrfs_root *root,
901 struct btrfs_path *path,
902 struct btrfs_root *log_root,
903 struct inode *dir, struct inode *inode,
904 struct extent_buffer *eb,
905 u64 inode_objectid, u64 parent_objectid,
906 u64 ref_index, char *name, int namelen,
907 int *search_done)
908 {
909 int ret;
910 char *victim_name;
911 int victim_name_len;
912 struct extent_buffer *leaf;
913 struct btrfs_dir_item *di;
914 struct btrfs_key search_key;
915 struct btrfs_inode_extref *extref;
916
917 again:
918 /* Search old style refs */
919 search_key.objectid = inode_objectid;
920 search_key.type = BTRFS_INODE_REF_KEY;
921 search_key.offset = parent_objectid;
922 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
923 if (ret == 0) {
924 struct btrfs_inode_ref *victim_ref;
925 unsigned long ptr;
926 unsigned long ptr_end;
927
928 leaf = path->nodes[0];
929
930 /* are we trying to overwrite a back ref for the root directory
931 * if so, just jump out, we're done
932 */
933 if (search_key.objectid == search_key.offset)
934 return 1;
935
936 /* check all the names in this back reference to see
937 * if they are in the log. if so, we allow them to stay
938 * otherwise they must be unlinked as a conflict
939 */
940 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
941 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
942 while (ptr < ptr_end) {
943 victim_ref = (struct btrfs_inode_ref *)ptr;
944 victim_name_len = btrfs_inode_ref_name_len(leaf,
945 victim_ref);
946 victim_name = kmalloc(victim_name_len, GFP_NOFS);
947 if (!victim_name)
948 return -ENOMEM;
949
950 read_extent_buffer(leaf, victim_name,
951 (unsigned long)(victim_ref + 1),
952 victim_name_len);
953
954 if (!backref_in_log(log_root, &search_key,
955 parent_objectid,
956 victim_name,
957 victim_name_len)) {
958 inc_nlink(inode);
959 btrfs_release_path(path);
960
961 ret = btrfs_unlink_inode(trans, root, dir,
962 inode, victim_name,
963 victim_name_len);
964 kfree(victim_name);
965 if (ret)
966 return ret;
967 ret = btrfs_run_delayed_items(trans, root);
968 if (ret)
969 return ret;
970 *search_done = 1;
971 goto again;
972 }
973 kfree(victim_name);
974
975 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
976 }
977
978 /*
979 * NOTE: we have searched root tree and checked the
980 * coresponding ref, it does not need to check again.
981 */
982 *search_done = 1;
983 }
984 btrfs_release_path(path);
985
986 /* Same search but for extended refs */
987 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
988 inode_objectid, parent_objectid, 0,
989 0);
990 if (!IS_ERR_OR_NULL(extref)) {
991 u32 item_size;
992 u32 cur_offset = 0;
993 unsigned long base;
994 struct inode *victim_parent;
995
996 leaf = path->nodes[0];
997
998 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
999 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1000
1001 while (cur_offset < item_size) {
1002 extref = (struct btrfs_inode_extref *)base + cur_offset;
1003
1004 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1005
1006 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1007 goto next;
1008
1009 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1010 if (!victim_name)
1011 return -ENOMEM;
1012 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1013 victim_name_len);
1014
1015 search_key.objectid = inode_objectid;
1016 search_key.type = BTRFS_INODE_EXTREF_KEY;
1017 search_key.offset = btrfs_extref_hash(parent_objectid,
1018 victim_name,
1019 victim_name_len);
1020 ret = 0;
1021 if (!backref_in_log(log_root, &search_key,
1022 parent_objectid, victim_name,
1023 victim_name_len)) {
1024 ret = -ENOENT;
1025 victim_parent = read_one_inode(root,
1026 parent_objectid);
1027 if (victim_parent) {
1028 inc_nlink(inode);
1029 btrfs_release_path(path);
1030
1031 ret = btrfs_unlink_inode(trans, root,
1032 victim_parent,
1033 inode,
1034 victim_name,
1035 victim_name_len);
1036 if (!ret)
1037 ret = btrfs_run_delayed_items(
1038 trans, root);
1039 }
1040 iput(victim_parent);
1041 kfree(victim_name);
1042 if (ret)
1043 return ret;
1044 *search_done = 1;
1045 goto again;
1046 }
1047 kfree(victim_name);
1048 if (ret)
1049 return ret;
1050 next:
1051 cur_offset += victim_name_len + sizeof(*extref);
1052 }
1053 *search_done = 1;
1054 }
1055 btrfs_release_path(path);
1056
1057 /* look for a conflicting sequence number */
1058 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1059 ref_index, name, namelen, 0);
1060 if (di && !IS_ERR(di)) {
1061 ret = drop_one_dir_item(trans, root, path, dir, di);
1062 if (ret)
1063 return ret;
1064 }
1065 btrfs_release_path(path);
1066
1067 /* look for a conflicing name */
1068 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1069 name, namelen, 0);
1070 if (di && !IS_ERR(di)) {
1071 ret = drop_one_dir_item(trans, root, path, dir, di);
1072 if (ret)
1073 return ret;
1074 }
1075 btrfs_release_path(path);
1076
1077 return 0;
1078 }
1079
1080 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1081 u32 *namelen, char **name, u64 *index,
1082 u64 *parent_objectid)
1083 {
1084 struct btrfs_inode_extref *extref;
1085
1086 extref = (struct btrfs_inode_extref *)ref_ptr;
1087
1088 *namelen = btrfs_inode_extref_name_len(eb, extref);
1089 *name = kmalloc(*namelen, GFP_NOFS);
1090 if (*name == NULL)
1091 return -ENOMEM;
1092
1093 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1094 *namelen);
1095
1096 *index = btrfs_inode_extref_index(eb, extref);
1097 if (parent_objectid)
1098 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1099
1100 return 0;
1101 }
1102
1103 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1104 u32 *namelen, char **name, u64 *index)
1105 {
1106 struct btrfs_inode_ref *ref;
1107
1108 ref = (struct btrfs_inode_ref *)ref_ptr;
1109
1110 *namelen = btrfs_inode_ref_name_len(eb, ref);
1111 *name = kmalloc(*namelen, GFP_NOFS);
1112 if (*name == NULL)
1113 return -ENOMEM;
1114
1115 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1116
1117 *index = btrfs_inode_ref_index(eb, ref);
1118
1119 return 0;
1120 }
1121
1122 /*
1123 * replay one inode back reference item found in the log tree.
1124 * eb, slot and key refer to the buffer and key found in the log tree.
1125 * root is the destination we are replaying into, and path is for temp
1126 * use by this function. (it should be released on return).
1127 */
1128 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1129 struct btrfs_root *root,
1130 struct btrfs_root *log,
1131 struct btrfs_path *path,
1132 struct extent_buffer *eb, int slot,
1133 struct btrfs_key *key)
1134 {
1135 struct inode *dir = NULL;
1136 struct inode *inode = NULL;
1137 unsigned long ref_ptr;
1138 unsigned long ref_end;
1139 char *name = NULL;
1140 int namelen;
1141 int ret;
1142 int search_done = 0;
1143 int log_ref_ver = 0;
1144 u64 parent_objectid;
1145 u64 inode_objectid;
1146 u64 ref_index = 0;
1147 int ref_struct_size;
1148
1149 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1150 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1151
1152 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1153 struct btrfs_inode_extref *r;
1154
1155 ref_struct_size = sizeof(struct btrfs_inode_extref);
1156 log_ref_ver = 1;
1157 r = (struct btrfs_inode_extref *)ref_ptr;
1158 parent_objectid = btrfs_inode_extref_parent(eb, r);
1159 } else {
1160 ref_struct_size = sizeof(struct btrfs_inode_ref);
1161 parent_objectid = key->offset;
1162 }
1163 inode_objectid = key->objectid;
1164
1165 /*
1166 * it is possible that we didn't log all the parent directories
1167 * for a given inode. If we don't find the dir, just don't
1168 * copy the back ref in. The link count fixup code will take
1169 * care of the rest
1170 */
1171 dir = read_one_inode(root, parent_objectid);
1172 if (!dir) {
1173 ret = -ENOENT;
1174 goto out;
1175 }
1176
1177 inode = read_one_inode(root, inode_objectid);
1178 if (!inode) {
1179 ret = -EIO;
1180 goto out;
1181 }
1182
1183 while (ref_ptr < ref_end) {
1184 if (log_ref_ver) {
1185 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1186 &ref_index, &parent_objectid);
1187 /*
1188 * parent object can change from one array
1189 * item to another.
1190 */
1191 if (!dir)
1192 dir = read_one_inode(root, parent_objectid);
1193 if (!dir) {
1194 ret = -ENOENT;
1195 goto out;
1196 }
1197 } else {
1198 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1199 &ref_index);
1200 }
1201 if (ret)
1202 goto out;
1203
1204 /* if we already have a perfect match, we're done */
1205 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1206 ref_index, name, namelen)) {
1207 /*
1208 * look for a conflicting back reference in the
1209 * metadata. if we find one we have to unlink that name
1210 * of the file before we add our new link. Later on, we
1211 * overwrite any existing back reference, and we don't
1212 * want to create dangling pointers in the directory.
1213 */
1214
1215 if (!search_done) {
1216 ret = __add_inode_ref(trans, root, path, log,
1217 dir, inode, eb,
1218 inode_objectid,
1219 parent_objectid,
1220 ref_index, name, namelen,
1221 &search_done);
1222 if (ret) {
1223 if (ret == 1)
1224 ret = 0;
1225 goto out;
1226 }
1227 }
1228
1229 /* insert our name */
1230 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1231 0, ref_index);
1232 if (ret)
1233 goto out;
1234
1235 btrfs_update_inode(trans, root, inode);
1236 }
1237
1238 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1239 kfree(name);
1240 name = NULL;
1241 if (log_ref_ver) {
1242 iput(dir);
1243 dir = NULL;
1244 }
1245 }
1246
1247 /* finally write the back reference in the inode */
1248 ret = overwrite_item(trans, root, path, eb, slot, key);
1249 out:
1250 btrfs_release_path(path);
1251 kfree(name);
1252 iput(dir);
1253 iput(inode);
1254 return ret;
1255 }
1256
1257 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1258 struct btrfs_root *root, u64 offset)
1259 {
1260 int ret;
1261 ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
1262 offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
1263 if (ret > 0)
1264 ret = btrfs_insert_orphan_item(trans, root, offset);
1265 return ret;
1266 }
1267
1268 static int count_inode_extrefs(struct btrfs_root *root,
1269 struct inode *inode, struct btrfs_path *path)
1270 {
1271 int ret = 0;
1272 int name_len;
1273 unsigned int nlink = 0;
1274 u32 item_size;
1275 u32 cur_offset = 0;
1276 u64 inode_objectid = btrfs_ino(inode);
1277 u64 offset = 0;
1278 unsigned long ptr;
1279 struct btrfs_inode_extref *extref;
1280 struct extent_buffer *leaf;
1281
1282 while (1) {
1283 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1284 &extref, &offset);
1285 if (ret)
1286 break;
1287
1288 leaf = path->nodes[0];
1289 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1290 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1291
1292 while (cur_offset < item_size) {
1293 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1294 name_len = btrfs_inode_extref_name_len(leaf, extref);
1295
1296 nlink++;
1297
1298 cur_offset += name_len + sizeof(*extref);
1299 }
1300
1301 offset++;
1302 btrfs_release_path(path);
1303 }
1304 btrfs_release_path(path);
1305
1306 if (ret < 0)
1307 return ret;
1308 return nlink;
1309 }
1310
1311 static int count_inode_refs(struct btrfs_root *root,
1312 struct inode *inode, struct btrfs_path *path)
1313 {
1314 int ret;
1315 struct btrfs_key key;
1316 unsigned int nlink = 0;
1317 unsigned long ptr;
1318 unsigned long ptr_end;
1319 int name_len;
1320 u64 ino = btrfs_ino(inode);
1321
1322 key.objectid = ino;
1323 key.type = BTRFS_INODE_REF_KEY;
1324 key.offset = (u64)-1;
1325
1326 while (1) {
1327 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1328 if (ret < 0)
1329 break;
1330 if (ret > 0) {
1331 if (path->slots[0] == 0)
1332 break;
1333 path->slots[0]--;
1334 }
1335 process_slot:
1336 btrfs_item_key_to_cpu(path->nodes[0], &key,
1337 path->slots[0]);
1338 if (key.objectid != ino ||
1339 key.type != BTRFS_INODE_REF_KEY)
1340 break;
1341 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1342 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1343 path->slots[0]);
1344 while (ptr < ptr_end) {
1345 struct btrfs_inode_ref *ref;
1346
1347 ref = (struct btrfs_inode_ref *)ptr;
1348 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1349 ref);
1350 ptr = (unsigned long)(ref + 1) + name_len;
1351 nlink++;
1352 }
1353
1354 if (key.offset == 0)
1355 break;
1356 if (path->slots[0] > 0) {
1357 path->slots[0]--;
1358 goto process_slot;
1359 }
1360 key.offset--;
1361 btrfs_release_path(path);
1362 }
1363 btrfs_release_path(path);
1364
1365 return nlink;
1366 }
1367
1368 /*
1369 * There are a few corners where the link count of the file can't
1370 * be properly maintained during replay. So, instead of adding
1371 * lots of complexity to the log code, we just scan the backrefs
1372 * for any file that has been through replay.
1373 *
1374 * The scan will update the link count on the inode to reflect the
1375 * number of back refs found. If it goes down to zero, the iput
1376 * will free the inode.
1377 */
1378 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1379 struct btrfs_root *root,
1380 struct inode *inode)
1381 {
1382 struct btrfs_path *path;
1383 int ret;
1384 u64 nlink = 0;
1385 u64 ino = btrfs_ino(inode);
1386
1387 path = btrfs_alloc_path();
1388 if (!path)
1389 return -ENOMEM;
1390
1391 ret = count_inode_refs(root, inode, path);
1392 if (ret < 0)
1393 goto out;
1394
1395 nlink = ret;
1396
1397 ret = count_inode_extrefs(root, inode, path);
1398 if (ret == -ENOENT)
1399 ret = 0;
1400
1401 if (ret < 0)
1402 goto out;
1403
1404 nlink += ret;
1405
1406 ret = 0;
1407
1408 if (nlink != inode->i_nlink) {
1409 set_nlink(inode, nlink);
1410 btrfs_update_inode(trans, root, inode);
1411 }
1412 BTRFS_I(inode)->index_cnt = (u64)-1;
1413
1414 if (inode->i_nlink == 0) {
1415 if (S_ISDIR(inode->i_mode)) {
1416 ret = replay_dir_deletes(trans, root, NULL, path,
1417 ino, 1);
1418 if (ret)
1419 goto out;
1420 }
1421 ret = insert_orphan_item(trans, root, ino);
1422 }
1423
1424 out:
1425 btrfs_free_path(path);
1426 return ret;
1427 }
1428
1429 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1430 struct btrfs_root *root,
1431 struct btrfs_path *path)
1432 {
1433 int ret;
1434 struct btrfs_key key;
1435 struct inode *inode;
1436
1437 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1438 key.type = BTRFS_ORPHAN_ITEM_KEY;
1439 key.offset = (u64)-1;
1440 while (1) {
1441 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1442 if (ret < 0)
1443 break;
1444
1445 if (ret == 1) {
1446 if (path->slots[0] == 0)
1447 break;
1448 path->slots[0]--;
1449 }
1450
1451 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1452 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1453 key.type != BTRFS_ORPHAN_ITEM_KEY)
1454 break;
1455
1456 ret = btrfs_del_item(trans, root, path);
1457 if (ret)
1458 goto out;
1459
1460 btrfs_release_path(path);
1461 inode = read_one_inode(root, key.offset);
1462 if (!inode)
1463 return -EIO;
1464
1465 ret = fixup_inode_link_count(trans, root, inode);
1466 iput(inode);
1467 if (ret)
1468 goto out;
1469
1470 /*
1471 * fixup on a directory may create new entries,
1472 * make sure we always look for the highset possible
1473 * offset
1474 */
1475 key.offset = (u64)-1;
1476 }
1477 ret = 0;
1478 out:
1479 btrfs_release_path(path);
1480 return ret;
1481 }
1482
1483
1484 /*
1485 * record a given inode in the fixup dir so we can check its link
1486 * count when replay is done. The link count is incremented here
1487 * so the inode won't go away until we check it
1488 */
1489 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1490 struct btrfs_root *root,
1491 struct btrfs_path *path,
1492 u64 objectid)
1493 {
1494 struct btrfs_key key;
1495 int ret = 0;
1496 struct inode *inode;
1497
1498 inode = read_one_inode(root, objectid);
1499 if (!inode)
1500 return -EIO;
1501
1502 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1503 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1504 key.offset = objectid;
1505
1506 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1507
1508 btrfs_release_path(path);
1509 if (ret == 0) {
1510 if (!inode->i_nlink)
1511 set_nlink(inode, 1);
1512 else
1513 inc_nlink(inode);
1514 ret = btrfs_update_inode(trans, root, inode);
1515 } else if (ret == -EEXIST) {
1516 ret = 0;
1517 } else {
1518 BUG(); /* Logic Error */
1519 }
1520 iput(inode);
1521
1522 return ret;
1523 }
1524
1525 /*
1526 * when replaying the log for a directory, we only insert names
1527 * for inodes that actually exist. This means an fsync on a directory
1528 * does not implicitly fsync all the new files in it
1529 */
1530 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1531 struct btrfs_root *root,
1532 struct btrfs_path *path,
1533 u64 dirid, u64 index,
1534 char *name, int name_len, u8 type,
1535 struct btrfs_key *location)
1536 {
1537 struct inode *inode;
1538 struct inode *dir;
1539 int ret;
1540
1541 inode = read_one_inode(root, location->objectid);
1542 if (!inode)
1543 return -ENOENT;
1544
1545 dir = read_one_inode(root, dirid);
1546 if (!dir) {
1547 iput(inode);
1548 return -EIO;
1549 }
1550
1551 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1552
1553 /* FIXME, put inode into FIXUP list */
1554
1555 iput(inode);
1556 iput(dir);
1557 return ret;
1558 }
1559
1560 /*
1561 * take a single entry in a log directory item and replay it into
1562 * the subvolume.
1563 *
1564 * if a conflicting item exists in the subdirectory already,
1565 * the inode it points to is unlinked and put into the link count
1566 * fix up tree.
1567 *
1568 * If a name from the log points to a file or directory that does
1569 * not exist in the FS, it is skipped. fsyncs on directories
1570 * do not force down inodes inside that directory, just changes to the
1571 * names or unlinks in a directory.
1572 */
1573 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1574 struct btrfs_root *root,
1575 struct btrfs_path *path,
1576 struct extent_buffer *eb,
1577 struct btrfs_dir_item *di,
1578 struct btrfs_key *key)
1579 {
1580 char *name;
1581 int name_len;
1582 struct btrfs_dir_item *dst_di;
1583 struct btrfs_key found_key;
1584 struct btrfs_key log_key;
1585 struct inode *dir;
1586 u8 log_type;
1587 int exists;
1588 int ret = 0;
1589 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1590
1591 dir = read_one_inode(root, key->objectid);
1592 if (!dir)
1593 return -EIO;
1594
1595 name_len = btrfs_dir_name_len(eb, di);
1596 name = kmalloc(name_len, GFP_NOFS);
1597 if (!name) {
1598 ret = -ENOMEM;
1599 goto out;
1600 }
1601
1602 log_type = btrfs_dir_type(eb, di);
1603 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1604 name_len);
1605
1606 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1607 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1608 if (exists == 0)
1609 exists = 1;
1610 else
1611 exists = 0;
1612 btrfs_release_path(path);
1613
1614 if (key->type == BTRFS_DIR_ITEM_KEY) {
1615 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1616 name, name_len, 1);
1617 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1618 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1619 key->objectid,
1620 key->offset, name,
1621 name_len, 1);
1622 } else {
1623 /* Corruption */
1624 ret = -EINVAL;
1625 goto out;
1626 }
1627 if (IS_ERR_OR_NULL(dst_di)) {
1628 /* we need a sequence number to insert, so we only
1629 * do inserts for the BTRFS_DIR_INDEX_KEY types
1630 */
1631 if (key->type != BTRFS_DIR_INDEX_KEY)
1632 goto out;
1633 goto insert;
1634 }
1635
1636 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1637 /* the existing item matches the logged item */
1638 if (found_key.objectid == log_key.objectid &&
1639 found_key.type == log_key.type &&
1640 found_key.offset == log_key.offset &&
1641 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1642 goto out;
1643 }
1644
1645 /*
1646 * don't drop the conflicting directory entry if the inode
1647 * for the new entry doesn't exist
1648 */
1649 if (!exists)
1650 goto out;
1651
1652 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1653 if (ret)
1654 goto out;
1655
1656 if (key->type == BTRFS_DIR_INDEX_KEY)
1657 goto insert;
1658 out:
1659 btrfs_release_path(path);
1660 if (!ret && update_size) {
1661 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1662 ret = btrfs_update_inode(trans, root, dir);
1663 }
1664 kfree(name);
1665 iput(dir);
1666 return ret;
1667
1668 insert:
1669 btrfs_release_path(path);
1670 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1671 name, name_len, log_type, &log_key);
1672 if (ret && ret != -ENOENT)
1673 goto out;
1674 update_size = false;
1675 ret = 0;
1676 goto out;
1677 }
1678
1679 /*
1680 * find all the names in a directory item and reconcile them into
1681 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1682 * one name in a directory item, but the same code gets used for
1683 * both directory index types
1684 */
1685 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1686 struct btrfs_root *root,
1687 struct btrfs_path *path,
1688 struct extent_buffer *eb, int slot,
1689 struct btrfs_key *key)
1690 {
1691 int ret;
1692 u32 item_size = btrfs_item_size_nr(eb, slot);
1693 struct btrfs_dir_item *di;
1694 int name_len;
1695 unsigned long ptr;
1696 unsigned long ptr_end;
1697
1698 ptr = btrfs_item_ptr_offset(eb, slot);
1699 ptr_end = ptr + item_size;
1700 while (ptr < ptr_end) {
1701 di = (struct btrfs_dir_item *)ptr;
1702 if (verify_dir_item(root, eb, di))
1703 return -EIO;
1704 name_len = btrfs_dir_name_len(eb, di);
1705 ret = replay_one_name(trans, root, path, eb, di, key);
1706 if (ret)
1707 return ret;
1708 ptr = (unsigned long)(di + 1);
1709 ptr += name_len;
1710 }
1711 return 0;
1712 }
1713
1714 /*
1715 * directory replay has two parts. There are the standard directory
1716 * items in the log copied from the subvolume, and range items
1717 * created in the log while the subvolume was logged.
1718 *
1719 * The range items tell us which parts of the key space the log
1720 * is authoritative for. During replay, if a key in the subvolume
1721 * directory is in a logged range item, but not actually in the log
1722 * that means it was deleted from the directory before the fsync
1723 * and should be removed.
1724 */
1725 static noinline int find_dir_range(struct btrfs_root *root,
1726 struct btrfs_path *path,
1727 u64 dirid, int key_type,
1728 u64 *start_ret, u64 *end_ret)
1729 {
1730 struct btrfs_key key;
1731 u64 found_end;
1732 struct btrfs_dir_log_item *item;
1733 int ret;
1734 int nritems;
1735
1736 if (*start_ret == (u64)-1)
1737 return 1;
1738
1739 key.objectid = dirid;
1740 key.type = key_type;
1741 key.offset = *start_ret;
1742
1743 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1744 if (ret < 0)
1745 goto out;
1746 if (ret > 0) {
1747 if (path->slots[0] == 0)
1748 goto out;
1749 path->slots[0]--;
1750 }
1751 if (ret != 0)
1752 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1753
1754 if (key.type != key_type || key.objectid != dirid) {
1755 ret = 1;
1756 goto next;
1757 }
1758 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1759 struct btrfs_dir_log_item);
1760 found_end = btrfs_dir_log_end(path->nodes[0], item);
1761
1762 if (*start_ret >= key.offset && *start_ret <= found_end) {
1763 ret = 0;
1764 *start_ret = key.offset;
1765 *end_ret = found_end;
1766 goto out;
1767 }
1768 ret = 1;
1769 next:
1770 /* check the next slot in the tree to see if it is a valid item */
1771 nritems = btrfs_header_nritems(path->nodes[0]);
1772 if (path->slots[0] >= nritems) {
1773 ret = btrfs_next_leaf(root, path);
1774 if (ret)
1775 goto out;
1776 } else {
1777 path->slots[0]++;
1778 }
1779
1780 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1781
1782 if (key.type != key_type || key.objectid != dirid) {
1783 ret = 1;
1784 goto out;
1785 }
1786 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1787 struct btrfs_dir_log_item);
1788 found_end = btrfs_dir_log_end(path->nodes[0], item);
1789 *start_ret = key.offset;
1790 *end_ret = found_end;
1791 ret = 0;
1792 out:
1793 btrfs_release_path(path);
1794 return ret;
1795 }
1796
1797 /*
1798 * this looks for a given directory item in the log. If the directory
1799 * item is not in the log, the item is removed and the inode it points
1800 * to is unlinked
1801 */
1802 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1803 struct btrfs_root *root,
1804 struct btrfs_root *log,
1805 struct btrfs_path *path,
1806 struct btrfs_path *log_path,
1807 struct inode *dir,
1808 struct btrfs_key *dir_key)
1809 {
1810 int ret;
1811 struct extent_buffer *eb;
1812 int slot;
1813 u32 item_size;
1814 struct btrfs_dir_item *di;
1815 struct btrfs_dir_item *log_di;
1816 int name_len;
1817 unsigned long ptr;
1818 unsigned long ptr_end;
1819 char *name;
1820 struct inode *inode;
1821 struct btrfs_key location;
1822
1823 again:
1824 eb = path->nodes[0];
1825 slot = path->slots[0];
1826 item_size = btrfs_item_size_nr(eb, slot);
1827 ptr = btrfs_item_ptr_offset(eb, slot);
1828 ptr_end = ptr + item_size;
1829 while (ptr < ptr_end) {
1830 di = (struct btrfs_dir_item *)ptr;
1831 if (verify_dir_item(root, eb, di)) {
1832 ret = -EIO;
1833 goto out;
1834 }
1835
1836 name_len = btrfs_dir_name_len(eb, di);
1837 name = kmalloc(name_len, GFP_NOFS);
1838 if (!name) {
1839 ret = -ENOMEM;
1840 goto out;
1841 }
1842 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1843 name_len);
1844 log_di = NULL;
1845 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1846 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1847 dir_key->objectid,
1848 name, name_len, 0);
1849 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1850 log_di = btrfs_lookup_dir_index_item(trans, log,
1851 log_path,
1852 dir_key->objectid,
1853 dir_key->offset,
1854 name, name_len, 0);
1855 }
1856 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
1857 btrfs_dir_item_key_to_cpu(eb, di, &location);
1858 btrfs_release_path(path);
1859 btrfs_release_path(log_path);
1860 inode = read_one_inode(root, location.objectid);
1861 if (!inode) {
1862 kfree(name);
1863 return -EIO;
1864 }
1865
1866 ret = link_to_fixup_dir(trans, root,
1867 path, location.objectid);
1868 if (ret) {
1869 kfree(name);
1870 iput(inode);
1871 goto out;
1872 }
1873
1874 inc_nlink(inode);
1875 ret = btrfs_unlink_inode(trans, root, dir, inode,
1876 name, name_len);
1877 if (!ret)
1878 ret = btrfs_run_delayed_items(trans, root);
1879 kfree(name);
1880 iput(inode);
1881 if (ret)
1882 goto out;
1883
1884 /* there might still be more names under this key
1885 * check and repeat if required
1886 */
1887 ret = btrfs_search_slot(NULL, root, dir_key, path,
1888 0, 0);
1889 if (ret == 0)
1890 goto again;
1891 ret = 0;
1892 goto out;
1893 } else if (IS_ERR(log_di)) {
1894 kfree(name);
1895 return PTR_ERR(log_di);
1896 }
1897 btrfs_release_path(log_path);
1898 kfree(name);
1899
1900 ptr = (unsigned long)(di + 1);
1901 ptr += name_len;
1902 }
1903 ret = 0;
1904 out:
1905 btrfs_release_path(path);
1906 btrfs_release_path(log_path);
1907 return ret;
1908 }
1909
1910 /*
1911 * deletion replay happens before we copy any new directory items
1912 * out of the log or out of backreferences from inodes. It
1913 * scans the log to find ranges of keys that log is authoritative for,
1914 * and then scans the directory to find items in those ranges that are
1915 * not present in the log.
1916 *
1917 * Anything we don't find in the log is unlinked and removed from the
1918 * directory.
1919 */
1920 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1921 struct btrfs_root *root,
1922 struct btrfs_root *log,
1923 struct btrfs_path *path,
1924 u64 dirid, int del_all)
1925 {
1926 u64 range_start;
1927 u64 range_end;
1928 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1929 int ret = 0;
1930 struct btrfs_key dir_key;
1931 struct btrfs_key found_key;
1932 struct btrfs_path *log_path;
1933 struct inode *dir;
1934
1935 dir_key.objectid = dirid;
1936 dir_key.type = BTRFS_DIR_ITEM_KEY;
1937 log_path = btrfs_alloc_path();
1938 if (!log_path)
1939 return -ENOMEM;
1940
1941 dir = read_one_inode(root, dirid);
1942 /* it isn't an error if the inode isn't there, that can happen
1943 * because we replay the deletes before we copy in the inode item
1944 * from the log
1945 */
1946 if (!dir) {
1947 btrfs_free_path(log_path);
1948 return 0;
1949 }
1950 again:
1951 range_start = 0;
1952 range_end = 0;
1953 while (1) {
1954 if (del_all)
1955 range_end = (u64)-1;
1956 else {
1957 ret = find_dir_range(log, path, dirid, key_type,
1958 &range_start, &range_end);
1959 if (ret != 0)
1960 break;
1961 }
1962
1963 dir_key.offset = range_start;
1964 while (1) {
1965 int nritems;
1966 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1967 0, 0);
1968 if (ret < 0)
1969 goto out;
1970
1971 nritems = btrfs_header_nritems(path->nodes[0]);
1972 if (path->slots[0] >= nritems) {
1973 ret = btrfs_next_leaf(root, path);
1974 if (ret)
1975 break;
1976 }
1977 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1978 path->slots[0]);
1979 if (found_key.objectid != dirid ||
1980 found_key.type != dir_key.type)
1981 goto next_type;
1982
1983 if (found_key.offset > range_end)
1984 break;
1985
1986 ret = check_item_in_log(trans, root, log, path,
1987 log_path, dir,
1988 &found_key);
1989 if (ret)
1990 goto out;
1991 if (found_key.offset == (u64)-1)
1992 break;
1993 dir_key.offset = found_key.offset + 1;
1994 }
1995 btrfs_release_path(path);
1996 if (range_end == (u64)-1)
1997 break;
1998 range_start = range_end + 1;
1999 }
2000
2001 next_type:
2002 ret = 0;
2003 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2004 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2005 dir_key.type = BTRFS_DIR_INDEX_KEY;
2006 btrfs_release_path(path);
2007 goto again;
2008 }
2009 out:
2010 btrfs_release_path(path);
2011 btrfs_free_path(log_path);
2012 iput(dir);
2013 return ret;
2014 }
2015
2016 /*
2017 * the process_func used to replay items from the log tree. This
2018 * gets called in two different stages. The first stage just looks
2019 * for inodes and makes sure they are all copied into the subvolume.
2020 *
2021 * The second stage copies all the other item types from the log into
2022 * the subvolume. The two stage approach is slower, but gets rid of
2023 * lots of complexity around inodes referencing other inodes that exist
2024 * only in the log (references come from either directory items or inode
2025 * back refs).
2026 */
2027 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2028 struct walk_control *wc, u64 gen)
2029 {
2030 int nritems;
2031 struct btrfs_path *path;
2032 struct btrfs_root *root = wc->replay_dest;
2033 struct btrfs_key key;
2034 int level;
2035 int i;
2036 int ret;
2037
2038 ret = btrfs_read_buffer(eb, gen);
2039 if (ret)
2040 return ret;
2041
2042 level = btrfs_header_level(eb);
2043
2044 if (level != 0)
2045 return 0;
2046
2047 path = btrfs_alloc_path();
2048 if (!path)
2049 return -ENOMEM;
2050
2051 nritems = btrfs_header_nritems(eb);
2052 for (i = 0; i < nritems; i++) {
2053 btrfs_item_key_to_cpu(eb, &key, i);
2054
2055 /* inode keys are done during the first stage */
2056 if (key.type == BTRFS_INODE_ITEM_KEY &&
2057 wc->stage == LOG_WALK_REPLAY_INODES) {
2058 struct btrfs_inode_item *inode_item;
2059 u32 mode;
2060
2061 inode_item = btrfs_item_ptr(eb, i,
2062 struct btrfs_inode_item);
2063 mode = btrfs_inode_mode(eb, inode_item);
2064 if (S_ISDIR(mode)) {
2065 ret = replay_dir_deletes(wc->trans,
2066 root, log, path, key.objectid, 0);
2067 if (ret)
2068 break;
2069 }
2070 ret = overwrite_item(wc->trans, root, path,
2071 eb, i, &key);
2072 if (ret)
2073 break;
2074
2075 /* for regular files, make sure corresponding
2076 * orhpan item exist. extents past the new EOF
2077 * will be truncated later by orphan cleanup.
2078 */
2079 if (S_ISREG(mode)) {
2080 ret = insert_orphan_item(wc->trans, root,
2081 key.objectid);
2082 if (ret)
2083 break;
2084 }
2085
2086 ret = link_to_fixup_dir(wc->trans, root,
2087 path, key.objectid);
2088 if (ret)
2089 break;
2090 }
2091
2092 if (key.type == BTRFS_DIR_INDEX_KEY &&
2093 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2094 ret = replay_one_dir_item(wc->trans, root, path,
2095 eb, i, &key);
2096 if (ret)
2097 break;
2098 }
2099
2100 if (wc->stage < LOG_WALK_REPLAY_ALL)
2101 continue;
2102
2103 /* these keys are simply copied */
2104 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2105 ret = overwrite_item(wc->trans, root, path,
2106 eb, i, &key);
2107 if (ret)
2108 break;
2109 } else if (key.type == BTRFS_INODE_REF_KEY ||
2110 key.type == BTRFS_INODE_EXTREF_KEY) {
2111 ret = add_inode_ref(wc->trans, root, log, path,
2112 eb, i, &key);
2113 if (ret && ret != -ENOENT)
2114 break;
2115 ret = 0;
2116 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2117 ret = replay_one_extent(wc->trans, root, path,
2118 eb, i, &key);
2119 if (ret)
2120 break;
2121 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2122 ret = replay_one_dir_item(wc->trans, root, path,
2123 eb, i, &key);
2124 if (ret)
2125 break;
2126 }
2127 }
2128 btrfs_free_path(path);
2129 return ret;
2130 }
2131
2132 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2133 struct btrfs_root *root,
2134 struct btrfs_path *path, int *level,
2135 struct walk_control *wc)
2136 {
2137 u64 root_owner;
2138 u64 bytenr;
2139 u64 ptr_gen;
2140 struct extent_buffer *next;
2141 struct extent_buffer *cur;
2142 struct extent_buffer *parent;
2143 u32 blocksize;
2144 int ret = 0;
2145
2146 WARN_ON(*level < 0);
2147 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2148
2149 while (*level > 0) {
2150 WARN_ON(*level < 0);
2151 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2152 cur = path->nodes[*level];
2153
2154 WARN_ON(btrfs_header_level(cur) != *level);
2155
2156 if (path->slots[*level] >=
2157 btrfs_header_nritems(cur))
2158 break;
2159
2160 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2161 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2162 blocksize = btrfs_level_size(root, *level - 1);
2163
2164 parent = path->nodes[*level];
2165 root_owner = btrfs_header_owner(parent);
2166
2167 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
2168 if (!next)
2169 return -ENOMEM;
2170
2171 if (*level == 1) {
2172 ret = wc->process_func(root, next, wc, ptr_gen);
2173 if (ret) {
2174 free_extent_buffer(next);
2175 return ret;
2176 }
2177
2178 path->slots[*level]++;
2179 if (wc->free) {
2180 ret = btrfs_read_buffer(next, ptr_gen);
2181 if (ret) {
2182 free_extent_buffer(next);
2183 return ret;
2184 }
2185
2186 if (trans) {
2187 btrfs_tree_lock(next);
2188 btrfs_set_lock_blocking(next);
2189 clean_tree_block(trans, root, next);
2190 btrfs_wait_tree_block_writeback(next);
2191 btrfs_tree_unlock(next);
2192 }
2193
2194 WARN_ON(root_owner !=
2195 BTRFS_TREE_LOG_OBJECTID);
2196 ret = btrfs_free_and_pin_reserved_extent(root,
2197 bytenr, blocksize);
2198 if (ret) {
2199 free_extent_buffer(next);
2200 return ret;
2201 }
2202 }
2203 free_extent_buffer(next);
2204 continue;
2205 }
2206 ret = btrfs_read_buffer(next, ptr_gen);
2207 if (ret) {
2208 free_extent_buffer(next);
2209 return ret;
2210 }
2211
2212 WARN_ON(*level <= 0);
2213 if (path->nodes[*level-1])
2214 free_extent_buffer(path->nodes[*level-1]);
2215 path->nodes[*level-1] = next;
2216 *level = btrfs_header_level(next);
2217 path->slots[*level] = 0;
2218 cond_resched();
2219 }
2220 WARN_ON(*level < 0);
2221 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2222
2223 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2224
2225 cond_resched();
2226 return 0;
2227 }
2228
2229 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2230 struct btrfs_root *root,
2231 struct btrfs_path *path, int *level,
2232 struct walk_control *wc)
2233 {
2234 u64 root_owner;
2235 int i;
2236 int slot;
2237 int ret;
2238
2239 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2240 slot = path->slots[i];
2241 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2242 path->slots[i]++;
2243 *level = i;
2244 WARN_ON(*level == 0);
2245 return 0;
2246 } else {
2247 struct extent_buffer *parent;
2248 if (path->nodes[*level] == root->node)
2249 parent = path->nodes[*level];
2250 else
2251 parent = path->nodes[*level + 1];
2252
2253 root_owner = btrfs_header_owner(parent);
2254 ret = wc->process_func(root, path->nodes[*level], wc,
2255 btrfs_header_generation(path->nodes[*level]));
2256 if (ret)
2257 return ret;
2258
2259 if (wc->free) {
2260 struct extent_buffer *next;
2261
2262 next = path->nodes[*level];
2263
2264 if (trans) {
2265 btrfs_tree_lock(next);
2266 btrfs_set_lock_blocking(next);
2267 clean_tree_block(trans, root, next);
2268 btrfs_wait_tree_block_writeback(next);
2269 btrfs_tree_unlock(next);
2270 }
2271
2272 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2273 ret = btrfs_free_and_pin_reserved_extent(root,
2274 path->nodes[*level]->start,
2275 path->nodes[*level]->len);
2276 if (ret)
2277 return ret;
2278 }
2279 free_extent_buffer(path->nodes[*level]);
2280 path->nodes[*level] = NULL;
2281 *level = i + 1;
2282 }
2283 }
2284 return 1;
2285 }
2286
2287 /*
2288 * drop the reference count on the tree rooted at 'snap'. This traverses
2289 * the tree freeing any blocks that have a ref count of zero after being
2290 * decremented.
2291 */
2292 static int walk_log_tree(struct btrfs_trans_handle *trans,
2293 struct btrfs_root *log, struct walk_control *wc)
2294 {
2295 int ret = 0;
2296 int wret;
2297 int level;
2298 struct btrfs_path *path;
2299 int orig_level;
2300
2301 path = btrfs_alloc_path();
2302 if (!path)
2303 return -ENOMEM;
2304
2305 level = btrfs_header_level(log->node);
2306 orig_level = level;
2307 path->nodes[level] = log->node;
2308 extent_buffer_get(log->node);
2309 path->slots[level] = 0;
2310
2311 while (1) {
2312 wret = walk_down_log_tree(trans, log, path, &level, wc);
2313 if (wret > 0)
2314 break;
2315 if (wret < 0) {
2316 ret = wret;
2317 goto out;
2318 }
2319
2320 wret = walk_up_log_tree(trans, log, path, &level, wc);
2321 if (wret > 0)
2322 break;
2323 if (wret < 0) {
2324 ret = wret;
2325 goto out;
2326 }
2327 }
2328
2329 /* was the root node processed? if not, catch it here */
2330 if (path->nodes[orig_level]) {
2331 ret = wc->process_func(log, path->nodes[orig_level], wc,
2332 btrfs_header_generation(path->nodes[orig_level]));
2333 if (ret)
2334 goto out;
2335 if (wc->free) {
2336 struct extent_buffer *next;
2337
2338 next = path->nodes[orig_level];
2339
2340 if (trans) {
2341 btrfs_tree_lock(next);
2342 btrfs_set_lock_blocking(next);
2343 clean_tree_block(trans, log, next);
2344 btrfs_wait_tree_block_writeback(next);
2345 btrfs_tree_unlock(next);
2346 }
2347
2348 WARN_ON(log->root_key.objectid !=
2349 BTRFS_TREE_LOG_OBJECTID);
2350 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2351 next->len);
2352 if (ret)
2353 goto out;
2354 }
2355 }
2356
2357 out:
2358 btrfs_free_path(path);
2359 return ret;
2360 }
2361
2362 /*
2363 * helper function to update the item for a given subvolumes log root
2364 * in the tree of log roots
2365 */
2366 static int update_log_root(struct btrfs_trans_handle *trans,
2367 struct btrfs_root *log)
2368 {
2369 int ret;
2370
2371 if (log->log_transid == 1) {
2372 /* insert root item on the first sync */
2373 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2374 &log->root_key, &log->root_item);
2375 } else {
2376 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2377 &log->root_key, &log->root_item);
2378 }
2379 return ret;
2380 }
2381
2382 static void wait_log_commit(struct btrfs_trans_handle *trans,
2383 struct btrfs_root *root, int transid)
2384 {
2385 DEFINE_WAIT(wait);
2386 int index = transid % 2;
2387
2388 /*
2389 * we only allow two pending log transactions at a time,
2390 * so we know that if ours is more than 2 older than the
2391 * current transaction, we're done
2392 */
2393 do {
2394 prepare_to_wait(&root->log_commit_wait[index],
2395 &wait, TASK_UNINTERRUPTIBLE);
2396 mutex_unlock(&root->log_mutex);
2397
2398 if (root->log_transid_committed < transid &&
2399 atomic_read(&root->log_commit[index]))
2400 schedule();
2401
2402 finish_wait(&root->log_commit_wait[index], &wait);
2403 mutex_lock(&root->log_mutex);
2404 } while (root->log_transid_committed < transid &&
2405 atomic_read(&root->log_commit[index]));
2406 }
2407
2408 static void wait_for_writer(struct btrfs_trans_handle *trans,
2409 struct btrfs_root *root)
2410 {
2411 DEFINE_WAIT(wait);
2412
2413 while (atomic_read(&root->log_writers)) {
2414 prepare_to_wait(&root->log_writer_wait,
2415 &wait, TASK_UNINTERRUPTIBLE);
2416 mutex_unlock(&root->log_mutex);
2417 if (atomic_read(&root->log_writers))
2418 schedule();
2419 mutex_lock(&root->log_mutex);
2420 finish_wait(&root->log_writer_wait, &wait);
2421 }
2422 }
2423
2424 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2425 struct btrfs_log_ctx *ctx)
2426 {
2427 if (!ctx)
2428 return;
2429
2430 mutex_lock(&root->log_mutex);
2431 list_del_init(&ctx->list);
2432 mutex_unlock(&root->log_mutex);
2433 }
2434
2435 /*
2436 * Invoked in log mutex context, or be sure there is no other task which
2437 * can access the list.
2438 */
2439 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2440 int index, int error)
2441 {
2442 struct btrfs_log_ctx *ctx;
2443
2444 if (!error) {
2445 INIT_LIST_HEAD(&root->log_ctxs[index]);
2446 return;
2447 }
2448
2449 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2450 ctx->log_ret = error;
2451
2452 INIT_LIST_HEAD(&root->log_ctxs[index]);
2453 }
2454
2455 /*
2456 * btrfs_sync_log does sends a given tree log down to the disk and
2457 * updates the super blocks to record it. When this call is done,
2458 * you know that any inodes previously logged are safely on disk only
2459 * if it returns 0.
2460 *
2461 * Any other return value means you need to call btrfs_commit_transaction.
2462 * Some of the edge cases for fsyncing directories that have had unlinks
2463 * or renames done in the past mean that sometimes the only safe
2464 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2465 * that has happened.
2466 */
2467 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2468 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2469 {
2470 int index1;
2471 int index2;
2472 int mark;
2473 int ret;
2474 struct btrfs_root *log = root->log_root;
2475 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2476 int log_transid = 0;
2477 struct btrfs_log_ctx root_log_ctx;
2478 struct blk_plug plug;
2479
2480 mutex_lock(&root->log_mutex);
2481 log_transid = ctx->log_transid;
2482 if (root->log_transid_committed >= log_transid) {
2483 mutex_unlock(&root->log_mutex);
2484 return ctx->log_ret;
2485 }
2486
2487 index1 = log_transid % 2;
2488 if (atomic_read(&root->log_commit[index1])) {
2489 wait_log_commit(trans, root, log_transid);
2490 mutex_unlock(&root->log_mutex);
2491 return ctx->log_ret;
2492 }
2493 ASSERT(log_transid == root->log_transid);
2494 atomic_set(&root->log_commit[index1], 1);
2495
2496 /* wait for previous tree log sync to complete */
2497 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2498 wait_log_commit(trans, root, log_transid - 1);
2499
2500 while (1) {
2501 int batch = atomic_read(&root->log_batch);
2502 /* when we're on an ssd, just kick the log commit out */
2503 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
2504 mutex_unlock(&root->log_mutex);
2505 schedule_timeout_uninterruptible(1);
2506 mutex_lock(&root->log_mutex);
2507 }
2508 wait_for_writer(trans, root);
2509 if (batch == atomic_read(&root->log_batch))
2510 break;
2511 }
2512
2513 /* bail out if we need to do a full commit */
2514 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) ==
2515 trans->transid) {
2516 ret = -EAGAIN;
2517 btrfs_free_logged_extents(log, log_transid);
2518 mutex_unlock(&root->log_mutex);
2519 goto out;
2520 }
2521
2522 if (log_transid % 2 == 0)
2523 mark = EXTENT_DIRTY;
2524 else
2525 mark = EXTENT_NEW;
2526
2527 /* we start IO on all the marked extents here, but we don't actually
2528 * wait for them until later.
2529 */
2530 blk_start_plug(&plug);
2531 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2532 if (ret) {
2533 blk_finish_plug(&plug);
2534 btrfs_abort_transaction(trans, root, ret);
2535 btrfs_free_logged_extents(log, log_transid);
2536 ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) =
2537 trans->transid;
2538 mutex_unlock(&root->log_mutex);
2539 goto out;
2540 }
2541
2542 btrfs_set_root_node(&log->root_item, log->node);
2543
2544 root->log_transid++;
2545 log->log_transid = root->log_transid;
2546 root->log_start_pid = 0;
2547 /*
2548 * IO has been started, blocks of the log tree have WRITTEN flag set
2549 * in their headers. new modifications of the log will be written to
2550 * new positions. so it's safe to allow log writers to go in.
2551 */
2552 mutex_unlock(&root->log_mutex);
2553
2554 btrfs_init_log_ctx(&root_log_ctx);
2555
2556 mutex_lock(&log_root_tree->log_mutex);
2557 atomic_inc(&log_root_tree->log_batch);
2558 atomic_inc(&log_root_tree->log_writers);
2559
2560 index2 = log_root_tree->log_transid % 2;
2561 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2562 root_log_ctx.log_transid = log_root_tree->log_transid;
2563
2564 mutex_unlock(&log_root_tree->log_mutex);
2565
2566 ret = update_log_root(trans, log);
2567
2568 mutex_lock(&log_root_tree->log_mutex);
2569 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2570 smp_mb();
2571 if (waitqueue_active(&log_root_tree->log_writer_wait))
2572 wake_up(&log_root_tree->log_writer_wait);
2573 }
2574
2575 if (ret) {
2576 if (!list_empty(&root_log_ctx.list))
2577 list_del_init(&root_log_ctx.list);
2578
2579 blk_finish_plug(&plug);
2580 ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) =
2581 trans->transid;
2582 if (ret != -ENOSPC) {
2583 btrfs_abort_transaction(trans, root, ret);
2584 mutex_unlock(&log_root_tree->log_mutex);
2585 goto out;
2586 }
2587 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2588 btrfs_free_logged_extents(log, log_transid);
2589 mutex_unlock(&log_root_tree->log_mutex);
2590 ret = -EAGAIN;
2591 goto out;
2592 }
2593
2594 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2595 mutex_unlock(&log_root_tree->log_mutex);
2596 ret = root_log_ctx.log_ret;
2597 goto out;
2598 }
2599
2600 index2 = root_log_ctx.log_transid % 2;
2601 if (atomic_read(&log_root_tree->log_commit[index2])) {
2602 blk_finish_plug(&plug);
2603 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2604 wait_log_commit(trans, log_root_tree,
2605 root_log_ctx.log_transid);
2606 btrfs_free_logged_extents(log, log_transid);
2607 mutex_unlock(&log_root_tree->log_mutex);
2608 ret = root_log_ctx.log_ret;
2609 goto out;
2610 }
2611 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2612 atomic_set(&log_root_tree->log_commit[index2], 1);
2613
2614 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2615 wait_log_commit(trans, log_root_tree,
2616 root_log_ctx.log_transid - 1);
2617 }
2618
2619 wait_for_writer(trans, log_root_tree);
2620
2621 /*
2622 * now that we've moved on to the tree of log tree roots,
2623 * check the full commit flag again
2624 */
2625 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) ==
2626 trans->transid) {
2627 blk_finish_plug(&plug);
2628 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2629 btrfs_free_logged_extents(log, log_transid);
2630 mutex_unlock(&log_root_tree->log_mutex);
2631 ret = -EAGAIN;
2632 goto out_wake_log_root;
2633 }
2634
2635 ret = btrfs_write_marked_extents(log_root_tree,
2636 &log_root_tree->dirty_log_pages,
2637 EXTENT_DIRTY | EXTENT_NEW);
2638 blk_finish_plug(&plug);
2639 if (ret) {
2640 ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) =
2641 trans->transid;
2642 btrfs_abort_transaction(trans, root, ret);
2643 btrfs_free_logged_extents(log, log_transid);
2644 mutex_unlock(&log_root_tree->log_mutex);
2645 goto out_wake_log_root;
2646 }
2647 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2648 btrfs_wait_marked_extents(log_root_tree,
2649 &log_root_tree->dirty_log_pages,
2650 EXTENT_NEW | EXTENT_DIRTY);
2651 btrfs_wait_logged_extents(log, log_transid);
2652
2653 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2654 log_root_tree->node->start);
2655 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2656 btrfs_header_level(log_root_tree->node));
2657
2658 log_root_tree->log_transid++;
2659 mutex_unlock(&log_root_tree->log_mutex);
2660
2661 /*
2662 * nobody else is going to jump in and write the the ctree
2663 * super here because the log_commit atomic below is protecting
2664 * us. We must be called with a transaction handle pinning
2665 * the running transaction open, so a full commit can't hop
2666 * in and cause problems either.
2667 */
2668 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2669 if (ret) {
2670 ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) =
2671 trans->transid;
2672 btrfs_abort_transaction(trans, root, ret);
2673 goto out_wake_log_root;
2674 }
2675
2676 mutex_lock(&root->log_mutex);
2677 if (root->last_log_commit < log_transid)
2678 root->last_log_commit = log_transid;
2679 mutex_unlock(&root->log_mutex);
2680
2681 out_wake_log_root:
2682 /*
2683 * We needn't get log_mutex here because we are sure all
2684 * the other tasks are blocked.
2685 */
2686 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2687
2688 mutex_lock(&log_root_tree->log_mutex);
2689 log_root_tree->log_transid_committed++;
2690 atomic_set(&log_root_tree->log_commit[index2], 0);
2691 mutex_unlock(&log_root_tree->log_mutex);
2692
2693 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2694 wake_up(&log_root_tree->log_commit_wait[index2]);
2695 out:
2696 /* See above. */
2697 btrfs_remove_all_log_ctxs(root, index1, ret);
2698
2699 mutex_lock(&root->log_mutex);
2700 root->log_transid_committed++;
2701 atomic_set(&root->log_commit[index1], 0);
2702 mutex_unlock(&root->log_mutex);
2703
2704 if (waitqueue_active(&root->log_commit_wait[index1]))
2705 wake_up(&root->log_commit_wait[index1]);
2706 return ret;
2707 }
2708
2709 static void free_log_tree(struct btrfs_trans_handle *trans,
2710 struct btrfs_root *log)
2711 {
2712 int ret;
2713 u64 start;
2714 u64 end;
2715 struct walk_control wc = {
2716 .free = 1,
2717 .process_func = process_one_buffer
2718 };
2719
2720 ret = walk_log_tree(trans, log, &wc);
2721 /* I don't think this can happen but just in case */
2722 if (ret)
2723 btrfs_abort_transaction(trans, log, ret);
2724
2725 while (1) {
2726 ret = find_first_extent_bit(&log->dirty_log_pages,
2727 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2728 NULL);
2729 if (ret)
2730 break;
2731
2732 clear_extent_bits(&log->dirty_log_pages, start, end,
2733 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2734 }
2735
2736 /*
2737 * We may have short-circuited the log tree with the full commit logic
2738 * and left ordered extents on our list, so clear these out to keep us
2739 * from leaking inodes and memory.
2740 */
2741 btrfs_free_logged_extents(log, 0);
2742 btrfs_free_logged_extents(log, 1);
2743
2744 free_extent_buffer(log->node);
2745 kfree(log);
2746 }
2747
2748 /*
2749 * free all the extents used by the tree log. This should be called
2750 * at commit time of the full transaction
2751 */
2752 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2753 {
2754 if (root->log_root) {
2755 free_log_tree(trans, root->log_root);
2756 root->log_root = NULL;
2757 }
2758 return 0;
2759 }
2760
2761 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2762 struct btrfs_fs_info *fs_info)
2763 {
2764 if (fs_info->log_root_tree) {
2765 free_log_tree(trans, fs_info->log_root_tree);
2766 fs_info->log_root_tree = NULL;
2767 }
2768 return 0;
2769 }
2770
2771 /*
2772 * If both a file and directory are logged, and unlinks or renames are
2773 * mixed in, we have a few interesting corners:
2774 *
2775 * create file X in dir Y
2776 * link file X to X.link in dir Y
2777 * fsync file X
2778 * unlink file X but leave X.link
2779 * fsync dir Y
2780 *
2781 * After a crash we would expect only X.link to exist. But file X
2782 * didn't get fsync'd again so the log has back refs for X and X.link.
2783 *
2784 * We solve this by removing directory entries and inode backrefs from the
2785 * log when a file that was logged in the current transaction is
2786 * unlinked. Any later fsync will include the updated log entries, and
2787 * we'll be able to reconstruct the proper directory items from backrefs.
2788 *
2789 * This optimizations allows us to avoid relogging the entire inode
2790 * or the entire directory.
2791 */
2792 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2793 struct btrfs_root *root,
2794 const char *name, int name_len,
2795 struct inode *dir, u64 index)
2796 {
2797 struct btrfs_root *log;
2798 struct btrfs_dir_item *di;
2799 struct btrfs_path *path;
2800 int ret;
2801 int err = 0;
2802 int bytes_del = 0;
2803 u64 dir_ino = btrfs_ino(dir);
2804
2805 if (BTRFS_I(dir)->logged_trans < trans->transid)
2806 return 0;
2807
2808 ret = join_running_log_trans(root);
2809 if (ret)
2810 return 0;
2811
2812 mutex_lock(&BTRFS_I(dir)->log_mutex);
2813
2814 log = root->log_root;
2815 path = btrfs_alloc_path();
2816 if (!path) {
2817 err = -ENOMEM;
2818 goto out_unlock;
2819 }
2820
2821 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2822 name, name_len, -1);
2823 if (IS_ERR(di)) {
2824 err = PTR_ERR(di);
2825 goto fail;
2826 }
2827 if (di) {
2828 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2829 bytes_del += name_len;
2830 if (ret) {
2831 err = ret;
2832 goto fail;
2833 }
2834 }
2835 btrfs_release_path(path);
2836 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2837 index, name, name_len, -1);
2838 if (IS_ERR(di)) {
2839 err = PTR_ERR(di);
2840 goto fail;
2841 }
2842 if (di) {
2843 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2844 bytes_del += name_len;
2845 if (ret) {
2846 err = ret;
2847 goto fail;
2848 }
2849 }
2850
2851 /* update the directory size in the log to reflect the names
2852 * we have removed
2853 */
2854 if (bytes_del) {
2855 struct btrfs_key key;
2856
2857 key.objectid = dir_ino;
2858 key.offset = 0;
2859 key.type = BTRFS_INODE_ITEM_KEY;
2860 btrfs_release_path(path);
2861
2862 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2863 if (ret < 0) {
2864 err = ret;
2865 goto fail;
2866 }
2867 if (ret == 0) {
2868 struct btrfs_inode_item *item;
2869 u64 i_size;
2870
2871 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2872 struct btrfs_inode_item);
2873 i_size = btrfs_inode_size(path->nodes[0], item);
2874 if (i_size > bytes_del)
2875 i_size -= bytes_del;
2876 else
2877 i_size = 0;
2878 btrfs_set_inode_size(path->nodes[0], item, i_size);
2879 btrfs_mark_buffer_dirty(path->nodes[0]);
2880 } else
2881 ret = 0;
2882 btrfs_release_path(path);
2883 }
2884 fail:
2885 btrfs_free_path(path);
2886 out_unlock:
2887 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2888 if (ret == -ENOSPC) {
2889 root->fs_info->last_trans_log_full_commit = trans->transid;
2890 ret = 0;
2891 } else if (ret < 0)
2892 btrfs_abort_transaction(trans, root, ret);
2893
2894 btrfs_end_log_trans(root);
2895
2896 return err;
2897 }
2898
2899 /* see comments for btrfs_del_dir_entries_in_log */
2900 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2901 struct btrfs_root *root,
2902 const char *name, int name_len,
2903 struct inode *inode, u64 dirid)
2904 {
2905 struct btrfs_root *log;
2906 u64 index;
2907 int ret;
2908
2909 if (BTRFS_I(inode)->logged_trans < trans->transid)
2910 return 0;
2911
2912 ret = join_running_log_trans(root);
2913 if (ret)
2914 return 0;
2915 log = root->log_root;
2916 mutex_lock(&BTRFS_I(inode)->log_mutex);
2917
2918 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2919 dirid, &index);
2920 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2921 if (ret == -ENOSPC) {
2922 root->fs_info->last_trans_log_full_commit = trans->transid;
2923 ret = 0;
2924 } else if (ret < 0 && ret != -ENOENT)
2925 btrfs_abort_transaction(trans, root, ret);
2926 btrfs_end_log_trans(root);
2927
2928 return ret;
2929 }
2930
2931 /*
2932 * creates a range item in the log for 'dirid'. first_offset and
2933 * last_offset tell us which parts of the key space the log should
2934 * be considered authoritative for.
2935 */
2936 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2937 struct btrfs_root *log,
2938 struct btrfs_path *path,
2939 int key_type, u64 dirid,
2940 u64 first_offset, u64 last_offset)
2941 {
2942 int ret;
2943 struct btrfs_key key;
2944 struct btrfs_dir_log_item *item;
2945
2946 key.objectid = dirid;
2947 key.offset = first_offset;
2948 if (key_type == BTRFS_DIR_ITEM_KEY)
2949 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2950 else
2951 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2952 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2953 if (ret)
2954 return ret;
2955
2956 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2957 struct btrfs_dir_log_item);
2958 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2959 btrfs_mark_buffer_dirty(path->nodes[0]);
2960 btrfs_release_path(path);
2961 return 0;
2962 }
2963
2964 /*
2965 * log all the items included in the current transaction for a given
2966 * directory. This also creates the range items in the log tree required
2967 * to replay anything deleted before the fsync
2968 */
2969 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2970 struct btrfs_root *root, struct inode *inode,
2971 struct btrfs_path *path,
2972 struct btrfs_path *dst_path, int key_type,
2973 u64 min_offset, u64 *last_offset_ret)
2974 {
2975 struct btrfs_key min_key;
2976 struct btrfs_root *log = root->log_root;
2977 struct extent_buffer *src;
2978 int err = 0;
2979 int ret;
2980 int i;
2981 int nritems;
2982 u64 first_offset = min_offset;
2983 u64 last_offset = (u64)-1;
2984 u64 ino = btrfs_ino(inode);
2985
2986 log = root->log_root;
2987
2988 min_key.objectid = ino;
2989 min_key.type = key_type;
2990 min_key.offset = min_offset;
2991
2992 path->keep_locks = 1;
2993
2994 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
2995
2996 /*
2997 * we didn't find anything from this transaction, see if there
2998 * is anything at all
2999 */
3000 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3001 min_key.objectid = ino;
3002 min_key.type = key_type;
3003 min_key.offset = (u64)-1;
3004 btrfs_release_path(path);
3005 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3006 if (ret < 0) {
3007 btrfs_release_path(path);
3008 return ret;
3009 }
3010 ret = btrfs_previous_item(root, path, ino, key_type);
3011
3012 /* if ret == 0 there are items for this type,
3013 * create a range to tell us the last key of this type.
3014 * otherwise, there are no items in this directory after
3015 * *min_offset, and we create a range to indicate that.
3016 */
3017 if (ret == 0) {
3018 struct btrfs_key tmp;
3019 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3020 path->slots[0]);
3021 if (key_type == tmp.type)
3022 first_offset = max(min_offset, tmp.offset) + 1;
3023 }
3024 goto done;
3025 }
3026
3027 /* go backward to find any previous key */
3028 ret = btrfs_previous_item(root, path, ino, key_type);
3029 if (ret == 0) {
3030 struct btrfs_key tmp;
3031 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3032 if (key_type == tmp.type) {
3033 first_offset = tmp.offset;
3034 ret = overwrite_item(trans, log, dst_path,
3035 path->nodes[0], path->slots[0],
3036 &tmp);
3037 if (ret) {
3038 err = ret;
3039 goto done;
3040 }
3041 }
3042 }
3043 btrfs_release_path(path);
3044
3045 /* find the first key from this transaction again */
3046 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3047 if (WARN_ON(ret != 0))
3048 goto done;
3049
3050 /*
3051 * we have a block from this transaction, log every item in it
3052 * from our directory
3053 */
3054 while (1) {
3055 struct btrfs_key tmp;
3056 src = path->nodes[0];
3057 nritems = btrfs_header_nritems(src);
3058 for (i = path->slots[0]; i < nritems; i++) {
3059 btrfs_item_key_to_cpu(src, &min_key, i);
3060
3061 if (min_key.objectid != ino || min_key.type != key_type)
3062 goto done;
3063 ret = overwrite_item(trans, log, dst_path, src, i,
3064 &min_key);
3065 if (ret) {
3066 err = ret;
3067 goto done;
3068 }
3069 }
3070 path->slots[0] = nritems;
3071
3072 /*
3073 * look ahead to the next item and see if it is also
3074 * from this directory and from this transaction
3075 */
3076 ret = btrfs_next_leaf(root, path);
3077 if (ret == 1) {
3078 last_offset = (u64)-1;
3079 goto done;
3080 }
3081 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3082 if (tmp.objectid != ino || tmp.type != key_type) {
3083 last_offset = (u64)-1;
3084 goto done;
3085 }
3086 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3087 ret = overwrite_item(trans, log, dst_path,
3088 path->nodes[0], path->slots[0],
3089 &tmp);
3090 if (ret)
3091 err = ret;
3092 else
3093 last_offset = tmp.offset;
3094 goto done;
3095 }
3096 }
3097 done:
3098 btrfs_release_path(path);
3099 btrfs_release_path(dst_path);
3100
3101 if (err == 0) {
3102 *last_offset_ret = last_offset;
3103 /*
3104 * insert the log range keys to indicate where the log
3105 * is valid
3106 */
3107 ret = insert_dir_log_key(trans, log, path, key_type,
3108 ino, first_offset, last_offset);
3109 if (ret)
3110 err = ret;
3111 }
3112 return err;
3113 }
3114
3115 /*
3116 * logging directories is very similar to logging inodes, We find all the items
3117 * from the current transaction and write them to the log.
3118 *
3119 * The recovery code scans the directory in the subvolume, and if it finds a
3120 * key in the range logged that is not present in the log tree, then it means
3121 * that dir entry was unlinked during the transaction.
3122 *
3123 * In order for that scan to work, we must include one key smaller than
3124 * the smallest logged by this transaction and one key larger than the largest
3125 * key logged by this transaction.
3126 */
3127 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3128 struct btrfs_root *root, struct inode *inode,
3129 struct btrfs_path *path,
3130 struct btrfs_path *dst_path)
3131 {
3132 u64 min_key;
3133 u64 max_key;
3134 int ret;
3135 int key_type = BTRFS_DIR_ITEM_KEY;
3136
3137 again:
3138 min_key = 0;
3139 max_key = 0;
3140 while (1) {
3141 ret = log_dir_items(trans, root, inode, path,
3142 dst_path, key_type, min_key,
3143 &max_key);
3144 if (ret)
3145 return ret;
3146 if (max_key == (u64)-1)
3147 break;
3148 min_key = max_key + 1;
3149 }
3150
3151 if (key_type == BTRFS_DIR_ITEM_KEY) {
3152 key_type = BTRFS_DIR_INDEX_KEY;
3153 goto again;
3154 }
3155 return 0;
3156 }
3157
3158 /*
3159 * a helper function to drop items from the log before we relog an
3160 * inode. max_key_type indicates the highest item type to remove.
3161 * This cannot be run for file data extents because it does not
3162 * free the extents they point to.
3163 */
3164 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3165 struct btrfs_root *log,
3166 struct btrfs_path *path,
3167 u64 objectid, int max_key_type)
3168 {
3169 int ret;
3170 struct btrfs_key key;
3171 struct btrfs_key found_key;
3172 int start_slot;
3173
3174 key.objectid = objectid;
3175 key.type = max_key_type;
3176 key.offset = (u64)-1;
3177
3178 while (1) {
3179 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3180 BUG_ON(ret == 0); /* Logic error */
3181 if (ret < 0)
3182 break;
3183
3184 if (path->slots[0] == 0)
3185 break;
3186
3187 path->slots[0]--;
3188 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3189 path->slots[0]);
3190
3191 if (found_key.objectid != objectid)
3192 break;
3193
3194 found_key.offset = 0;
3195 found_key.type = 0;
3196 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3197 &start_slot);
3198
3199 ret = btrfs_del_items(trans, log, path, start_slot,
3200 path->slots[0] - start_slot + 1);
3201 /*
3202 * If start slot isn't 0 then we don't need to re-search, we've
3203 * found the last guy with the objectid in this tree.
3204 */
3205 if (ret || start_slot != 0)
3206 break;
3207 btrfs_release_path(path);
3208 }
3209 btrfs_release_path(path);
3210 if (ret > 0)
3211 ret = 0;
3212 return ret;
3213 }
3214
3215 static void fill_inode_item(struct btrfs_trans_handle *trans,
3216 struct extent_buffer *leaf,
3217 struct btrfs_inode_item *item,
3218 struct inode *inode, int log_inode_only)
3219 {
3220 struct btrfs_map_token token;
3221
3222 btrfs_init_map_token(&token);
3223
3224 if (log_inode_only) {
3225 /* set the generation to zero so the recover code
3226 * can tell the difference between an logging
3227 * just to say 'this inode exists' and a logging
3228 * to say 'update this inode with these values'
3229 */
3230 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3231 btrfs_set_token_inode_size(leaf, item, 0, &token);
3232 } else {
3233 btrfs_set_token_inode_generation(leaf, item,
3234 BTRFS_I(inode)->generation,
3235 &token);
3236 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3237 }
3238
3239 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3240 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3241 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3242 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3243
3244 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3245 inode->i_atime.tv_sec, &token);
3246 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3247 inode->i_atime.tv_nsec, &token);
3248
3249 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3250 inode->i_mtime.tv_sec, &token);
3251 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3252 inode->i_mtime.tv_nsec, &token);
3253
3254 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3255 inode->i_ctime.tv_sec, &token);
3256 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3257 inode->i_ctime.tv_nsec, &token);
3258
3259 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3260 &token);
3261
3262 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3263 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3264 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3265 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3266 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3267 }
3268
3269 static int log_inode_item(struct btrfs_trans_handle *trans,
3270 struct btrfs_root *log, struct btrfs_path *path,
3271 struct inode *inode)
3272 {
3273 struct btrfs_inode_item *inode_item;
3274 int ret;
3275
3276 ret = btrfs_insert_empty_item(trans, log, path,
3277 &BTRFS_I(inode)->location,
3278 sizeof(*inode_item));
3279 if (ret && ret != -EEXIST)
3280 return ret;
3281 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3282 struct btrfs_inode_item);
3283 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
3284 btrfs_release_path(path);
3285 return 0;
3286 }
3287
3288 static noinline int copy_items(struct btrfs_trans_handle *trans,
3289 struct inode *inode,
3290 struct btrfs_path *dst_path,
3291 struct btrfs_path *src_path, u64 *last_extent,
3292 int start_slot, int nr, int inode_only)
3293 {
3294 unsigned long src_offset;
3295 unsigned long dst_offset;
3296 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3297 struct btrfs_file_extent_item *extent;
3298 struct btrfs_inode_item *inode_item;
3299 struct extent_buffer *src = src_path->nodes[0];
3300 struct btrfs_key first_key, last_key, key;
3301 int ret;
3302 struct btrfs_key *ins_keys;
3303 u32 *ins_sizes;
3304 char *ins_data;
3305 int i;
3306 struct list_head ordered_sums;
3307 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3308 bool has_extents = false;
3309 bool need_find_last_extent = (*last_extent == 0);
3310 bool done = false;
3311
3312 INIT_LIST_HEAD(&ordered_sums);
3313
3314 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3315 nr * sizeof(u32), GFP_NOFS);
3316 if (!ins_data)
3317 return -ENOMEM;
3318
3319 first_key.objectid = (u64)-1;
3320
3321 ins_sizes = (u32 *)ins_data;
3322 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3323
3324 for (i = 0; i < nr; i++) {
3325 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3326 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3327 }
3328 ret = btrfs_insert_empty_items(trans, log, dst_path,
3329 ins_keys, ins_sizes, nr);
3330 if (ret) {
3331 kfree(ins_data);
3332 return ret;
3333 }
3334
3335 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3336 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3337 dst_path->slots[0]);
3338
3339 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3340
3341 if ((i == (nr - 1)))
3342 last_key = ins_keys[i];
3343
3344 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3345 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3346 dst_path->slots[0],
3347 struct btrfs_inode_item);
3348 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3349 inode, inode_only == LOG_INODE_EXISTS);
3350 } else {
3351 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3352 src_offset, ins_sizes[i]);
3353 }
3354
3355 /*
3356 * We set need_find_last_extent here in case we know we were
3357 * processing other items and then walk into the first extent in
3358 * the inode. If we don't hit an extent then nothing changes,
3359 * we'll do the last search the next time around.
3360 */
3361 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3362 has_extents = true;
3363 if (need_find_last_extent &&
3364 first_key.objectid == (u64)-1)
3365 first_key = ins_keys[i];
3366 } else {
3367 need_find_last_extent = false;
3368 }
3369
3370 /* take a reference on file data extents so that truncates
3371 * or deletes of this inode don't have to relog the inode
3372 * again
3373 */
3374 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
3375 !skip_csum) {
3376 int found_type;
3377 extent = btrfs_item_ptr(src, start_slot + i,
3378 struct btrfs_file_extent_item);
3379
3380 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3381 continue;
3382
3383 found_type = btrfs_file_extent_type(src, extent);
3384 if (found_type == BTRFS_FILE_EXTENT_REG) {
3385 u64 ds, dl, cs, cl;
3386 ds = btrfs_file_extent_disk_bytenr(src,
3387 extent);
3388 /* ds == 0 is a hole */
3389 if (ds == 0)
3390 continue;
3391
3392 dl = btrfs_file_extent_disk_num_bytes(src,
3393 extent);
3394 cs = btrfs_file_extent_offset(src, extent);
3395 cl = btrfs_file_extent_num_bytes(src,
3396 extent);
3397 if (btrfs_file_extent_compression(src,
3398 extent)) {
3399 cs = 0;
3400 cl = dl;
3401 }
3402
3403 ret = btrfs_lookup_csums_range(
3404 log->fs_info->csum_root,
3405 ds + cs, ds + cs + cl - 1,
3406 &ordered_sums, 0);
3407 if (ret) {
3408 btrfs_release_path(dst_path);
3409 kfree(ins_data);
3410 return ret;
3411 }
3412 }
3413 }
3414 }
3415
3416 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3417 btrfs_release_path(dst_path);
3418 kfree(ins_data);
3419
3420 /*
3421 * we have to do this after the loop above to avoid changing the
3422 * log tree while trying to change the log tree.
3423 */
3424 ret = 0;
3425 while (!list_empty(&ordered_sums)) {
3426 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3427 struct btrfs_ordered_sum,
3428 list);
3429 if (!ret)
3430 ret = btrfs_csum_file_blocks(trans, log, sums);
3431 list_del(&sums->list);
3432 kfree(sums);
3433 }
3434
3435 if (!has_extents)
3436 return ret;
3437
3438 /*
3439 * Because we use btrfs_search_forward we could skip leaves that were
3440 * not modified and then assume *last_extent is valid when it really
3441 * isn't. So back up to the previous leaf and read the end of the last
3442 * extent before we go and fill in holes.
3443 */
3444 if (need_find_last_extent) {
3445 u64 len;
3446
3447 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3448 if (ret < 0)
3449 return ret;
3450 if (ret)
3451 goto fill_holes;
3452 if (src_path->slots[0])
3453 src_path->slots[0]--;
3454 src = src_path->nodes[0];
3455 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3456 if (key.objectid != btrfs_ino(inode) ||
3457 key.type != BTRFS_EXTENT_DATA_KEY)
3458 goto fill_holes;
3459 extent = btrfs_item_ptr(src, src_path->slots[0],
3460 struct btrfs_file_extent_item);
3461 if (btrfs_file_extent_type(src, extent) ==
3462 BTRFS_FILE_EXTENT_INLINE) {
3463 len = btrfs_file_extent_inline_len(src,
3464 src_path->slots[0],
3465 extent);
3466 *last_extent = ALIGN(key.offset + len,
3467 log->sectorsize);
3468 } else {
3469 len = btrfs_file_extent_num_bytes(src, extent);
3470 *last_extent = key.offset + len;
3471 }
3472 }
3473 fill_holes:
3474 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3475 * things could have happened
3476 *
3477 * 1) A merge could have happened, so we could currently be on a leaf
3478 * that holds what we were copying in the first place.
3479 * 2) A split could have happened, and now not all of the items we want
3480 * are on the same leaf.
3481 *
3482 * So we need to adjust how we search for holes, we need to drop the
3483 * path and re-search for the first extent key we found, and then walk
3484 * forward until we hit the last one we copied.
3485 */
3486 if (need_find_last_extent) {
3487 /* btrfs_prev_leaf could return 1 without releasing the path */
3488 btrfs_release_path(src_path);
3489 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3490 src_path, 0, 0);
3491 if (ret < 0)
3492 return ret;
3493 ASSERT(ret == 0);
3494 src = src_path->nodes[0];
3495 i = src_path->slots[0];
3496 } else {
3497 i = start_slot;
3498 }
3499
3500 /*
3501 * Ok so here we need to go through and fill in any holes we may have
3502 * to make sure that holes are punched for those areas in case they had
3503 * extents previously.
3504 */
3505 while (!done) {
3506 u64 offset, len;
3507 u64 extent_end;
3508
3509 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3510 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3511 if (ret < 0)
3512 return ret;
3513 ASSERT(ret == 0);
3514 src = src_path->nodes[0];
3515 i = 0;
3516 }
3517
3518 btrfs_item_key_to_cpu(src, &key, i);
3519 if (!btrfs_comp_cpu_keys(&key, &last_key))
3520 done = true;
3521 if (key.objectid != btrfs_ino(inode) ||
3522 key.type != BTRFS_EXTENT_DATA_KEY) {
3523 i++;
3524 continue;
3525 }
3526 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3527 if (btrfs_file_extent_type(src, extent) ==
3528 BTRFS_FILE_EXTENT_INLINE) {
3529 len = btrfs_file_extent_inline_len(src, i, extent);
3530 extent_end = ALIGN(key.offset + len, log->sectorsize);
3531 } else {
3532 len = btrfs_file_extent_num_bytes(src, extent);
3533 extent_end = key.offset + len;
3534 }
3535 i++;
3536
3537 if (*last_extent == key.offset) {
3538 *last_extent = extent_end;
3539 continue;
3540 }
3541 offset = *last_extent;
3542 len = key.offset - *last_extent;
3543 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3544 offset, 0, 0, len, 0, len, 0,
3545 0, 0);
3546 if (ret)
3547 break;
3548 *last_extent = offset + len;
3549 }
3550 /*
3551 * Need to let the callers know we dropped the path so they should
3552 * re-search.
3553 */
3554 if (!ret && need_find_last_extent)
3555 ret = 1;
3556 return ret;
3557 }
3558
3559 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3560 {
3561 struct extent_map *em1, *em2;
3562
3563 em1 = list_entry(a, struct extent_map, list);
3564 em2 = list_entry(b, struct extent_map, list);
3565
3566 if (em1->start < em2->start)
3567 return -1;
3568 else if (em1->start > em2->start)
3569 return 1;
3570 return 0;
3571 }
3572
3573 static int log_one_extent(struct btrfs_trans_handle *trans,
3574 struct inode *inode, struct btrfs_root *root,
3575 struct extent_map *em, struct btrfs_path *path,
3576 struct list_head *logged_list)
3577 {
3578 struct btrfs_root *log = root->log_root;
3579 struct btrfs_file_extent_item *fi;
3580 struct extent_buffer *leaf;
3581 struct btrfs_ordered_extent *ordered;
3582 struct list_head ordered_sums;
3583 struct btrfs_map_token token;
3584 struct btrfs_key key;
3585 u64 mod_start = em->mod_start;
3586 u64 mod_len = em->mod_len;
3587 u64 csum_offset;
3588 u64 csum_len;
3589 u64 extent_offset = em->start - em->orig_start;
3590 u64 block_len;
3591 int ret;
3592 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3593 int extent_inserted = 0;
3594
3595 INIT_LIST_HEAD(&ordered_sums);
3596 btrfs_init_map_token(&token);
3597
3598 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3599 em->start + em->len, NULL, 0, 1,
3600 sizeof(*fi), &extent_inserted);
3601 if (ret)
3602 return ret;
3603
3604 if (!extent_inserted) {
3605 key.objectid = btrfs_ino(inode);
3606 key.type = BTRFS_EXTENT_DATA_KEY;
3607 key.offset = em->start;
3608
3609 ret = btrfs_insert_empty_item(trans, log, path, &key,
3610 sizeof(*fi));
3611 if (ret)
3612 return ret;
3613 }
3614 leaf = path->nodes[0];
3615 fi = btrfs_item_ptr(leaf, path->slots[0],
3616 struct btrfs_file_extent_item);
3617
3618 btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
3619 &token);
3620 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3621 skip_csum = true;
3622 btrfs_set_token_file_extent_type(leaf, fi,
3623 BTRFS_FILE_EXTENT_PREALLOC,
3624 &token);
3625 } else {
3626 btrfs_set_token_file_extent_type(leaf, fi,
3627 BTRFS_FILE_EXTENT_REG,
3628 &token);
3629 if (em->block_start == EXTENT_MAP_HOLE)
3630 skip_csum = true;
3631 }
3632
3633 block_len = max(em->block_len, em->orig_block_len);
3634 if (em->compress_type != BTRFS_COMPRESS_NONE) {
3635 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3636 em->block_start,
3637 &token);
3638 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3639 &token);
3640 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3641 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3642 em->block_start -
3643 extent_offset, &token);
3644 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3645 &token);
3646 } else {
3647 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3648 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3649 &token);
3650 }
3651
3652 btrfs_set_token_file_extent_offset(leaf, fi,
3653 em->start - em->orig_start,
3654 &token);
3655 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3656 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
3657 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3658 &token);
3659 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3660 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3661 btrfs_mark_buffer_dirty(leaf);
3662
3663 btrfs_release_path(path);
3664 if (ret) {
3665 return ret;
3666 }
3667
3668 if (skip_csum)
3669 return 0;
3670
3671 /*
3672 * First check and see if our csums are on our outstanding ordered
3673 * extents.
3674 */
3675 list_for_each_entry(ordered, logged_list, log_list) {
3676 struct btrfs_ordered_sum *sum;
3677
3678 if (!mod_len)
3679 break;
3680
3681 if (ordered->file_offset + ordered->len <= mod_start ||
3682 mod_start + mod_len <= ordered->file_offset)
3683 continue;
3684
3685 /*
3686 * We are going to copy all the csums on this ordered extent, so
3687 * go ahead and adjust mod_start and mod_len in case this
3688 * ordered extent has already been logged.
3689 */
3690 if (ordered->file_offset > mod_start) {
3691 if (ordered->file_offset + ordered->len >=
3692 mod_start + mod_len)
3693 mod_len = ordered->file_offset - mod_start;
3694 /*
3695 * If we have this case
3696 *
3697 * |--------- logged extent ---------|
3698 * |----- ordered extent ----|
3699 *
3700 * Just don't mess with mod_start and mod_len, we'll
3701 * just end up logging more csums than we need and it
3702 * will be ok.
3703 */
3704 } else {
3705 if (ordered->file_offset + ordered->len <
3706 mod_start + mod_len) {
3707 mod_len = (mod_start + mod_len) -
3708 (ordered->file_offset + ordered->len);
3709 mod_start = ordered->file_offset +
3710 ordered->len;
3711 } else {
3712 mod_len = 0;
3713 }
3714 }
3715
3716 /*
3717 * To keep us from looping for the above case of an ordered
3718 * extent that falls inside of the logged extent.
3719 */
3720 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3721 &ordered->flags))
3722 continue;
3723
3724 if (ordered->csum_bytes_left) {
3725 btrfs_start_ordered_extent(inode, ordered, 0);
3726 wait_event(ordered->wait,
3727 ordered->csum_bytes_left == 0);
3728 }
3729
3730 list_for_each_entry(sum, &ordered->list, list) {
3731 ret = btrfs_csum_file_blocks(trans, log, sum);
3732 if (ret)
3733 goto unlocked;
3734 }
3735
3736 }
3737 unlocked:
3738
3739 if (!mod_len || ret)
3740 return ret;
3741
3742 if (em->compress_type) {
3743 csum_offset = 0;
3744 csum_len = block_len;
3745 } else {
3746 csum_offset = mod_start - em->start;
3747 csum_len = mod_len;
3748 }
3749
3750 /* block start is already adjusted for the file extent offset. */
3751 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3752 em->block_start + csum_offset,
3753 em->block_start + csum_offset +
3754 csum_len - 1, &ordered_sums, 0);
3755 if (ret)
3756 return ret;
3757
3758 while (!list_empty(&ordered_sums)) {
3759 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3760 struct btrfs_ordered_sum,
3761 list);
3762 if (!ret)
3763 ret = btrfs_csum_file_blocks(trans, log, sums);
3764 list_del(&sums->list);
3765 kfree(sums);
3766 }
3767
3768 return ret;
3769 }
3770
3771 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3772 struct btrfs_root *root,
3773 struct inode *inode,
3774 struct btrfs_path *path,
3775 struct list_head *logged_list)
3776 {
3777 struct extent_map *em, *n;
3778 struct list_head extents;
3779 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3780 u64 test_gen;
3781 int ret = 0;
3782 int num = 0;
3783
3784 INIT_LIST_HEAD(&extents);
3785
3786 write_lock(&tree->lock);
3787 test_gen = root->fs_info->last_trans_committed;
3788
3789 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3790 list_del_init(&em->list);
3791
3792 /*
3793 * Just an arbitrary number, this can be really CPU intensive
3794 * once we start getting a lot of extents, and really once we
3795 * have a bunch of extents we just want to commit since it will
3796 * be faster.
3797 */
3798 if (++num > 32768) {
3799 list_del_init(&tree->modified_extents);
3800 ret = -EFBIG;
3801 goto process;
3802 }
3803
3804 if (em->generation <= test_gen)
3805 continue;
3806 /* Need a ref to keep it from getting evicted from cache */
3807 atomic_inc(&em->refs);
3808 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3809 list_add_tail(&em->list, &extents);
3810 num++;
3811 }
3812
3813 list_sort(NULL, &extents, extent_cmp);
3814
3815 process:
3816 while (!list_empty(&extents)) {
3817 em = list_entry(extents.next, struct extent_map, list);
3818
3819 list_del_init(&em->list);
3820
3821 /*
3822 * If we had an error we just need to delete everybody from our
3823 * private list.
3824 */
3825 if (ret) {
3826 clear_em_logging(tree, em);
3827 free_extent_map(em);
3828 continue;
3829 }
3830
3831 write_unlock(&tree->lock);
3832
3833 ret = log_one_extent(trans, inode, root, em, path, logged_list);
3834 write_lock(&tree->lock);
3835 clear_em_logging(tree, em);
3836 free_extent_map(em);
3837 }
3838 WARN_ON(!list_empty(&extents));
3839 write_unlock(&tree->lock);
3840
3841 btrfs_release_path(path);
3842 return ret;
3843 }
3844
3845 /* log a single inode in the tree log.
3846 * At least one parent directory for this inode must exist in the tree
3847 * or be logged already.
3848 *
3849 * Any items from this inode changed by the current transaction are copied
3850 * to the log tree. An extra reference is taken on any extents in this
3851 * file, allowing us to avoid a whole pile of corner cases around logging
3852 * blocks that have been removed from the tree.
3853 *
3854 * See LOG_INODE_ALL and related defines for a description of what inode_only
3855 * does.
3856 *
3857 * This handles both files and directories.
3858 */
3859 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3860 struct btrfs_root *root, struct inode *inode,
3861 int inode_only)
3862 {
3863 struct btrfs_path *path;
3864 struct btrfs_path *dst_path;
3865 struct btrfs_key min_key;
3866 struct btrfs_key max_key;
3867 struct btrfs_root *log = root->log_root;
3868 struct extent_buffer *src = NULL;
3869 LIST_HEAD(logged_list);
3870 u64 last_extent = 0;
3871 int err = 0;
3872 int ret;
3873 int nritems;
3874 int ins_start_slot = 0;
3875 int ins_nr;
3876 bool fast_search = false;
3877 u64 ino = btrfs_ino(inode);
3878
3879 path = btrfs_alloc_path();
3880 if (!path)
3881 return -ENOMEM;
3882 dst_path = btrfs_alloc_path();
3883 if (!dst_path) {
3884 btrfs_free_path(path);
3885 return -ENOMEM;
3886 }
3887
3888 min_key.objectid = ino;
3889 min_key.type = BTRFS_INODE_ITEM_KEY;
3890 min_key.offset = 0;
3891
3892 max_key.objectid = ino;
3893
3894
3895 /* today the code can only do partial logging of directories */
3896 if (S_ISDIR(inode->i_mode) ||
3897 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3898 &BTRFS_I(inode)->runtime_flags) &&
3899 inode_only == LOG_INODE_EXISTS))
3900 max_key.type = BTRFS_XATTR_ITEM_KEY;
3901 else
3902 max_key.type = (u8)-1;
3903 max_key.offset = (u64)-1;
3904
3905 /* Only run delayed items if we are a dir or a new file */
3906 if (S_ISDIR(inode->i_mode) ||
3907 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
3908 ret = btrfs_commit_inode_delayed_items(trans, inode);
3909 if (ret) {
3910 btrfs_free_path(path);
3911 btrfs_free_path(dst_path);
3912 return ret;
3913 }
3914 }
3915
3916 mutex_lock(&BTRFS_I(inode)->log_mutex);
3917
3918 btrfs_get_logged_extents(inode, &logged_list);
3919
3920 /*
3921 * a brute force approach to making sure we get the most uptodate
3922 * copies of everything.
3923 */
3924 if (S_ISDIR(inode->i_mode)) {
3925 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3926
3927 if (inode_only == LOG_INODE_EXISTS)
3928 max_key_type = BTRFS_XATTR_ITEM_KEY;
3929 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3930 } else {
3931 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3932 &BTRFS_I(inode)->runtime_flags)) {
3933 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3934 &BTRFS_I(inode)->runtime_flags);
3935 ret = btrfs_truncate_inode_items(trans, log,
3936 inode, 0, 0);
3937 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3938 &BTRFS_I(inode)->runtime_flags) ||
3939 inode_only == LOG_INODE_EXISTS) {
3940 if (inode_only == LOG_INODE_ALL)
3941 fast_search = true;
3942 max_key.type = BTRFS_XATTR_ITEM_KEY;
3943 ret = drop_objectid_items(trans, log, path, ino,
3944 max_key.type);
3945 } else {
3946 if (inode_only == LOG_INODE_ALL)
3947 fast_search = true;
3948 ret = log_inode_item(trans, log, dst_path, inode);
3949 if (ret) {
3950 err = ret;
3951 goto out_unlock;
3952 }
3953 goto log_extents;
3954 }
3955
3956 }
3957 if (ret) {
3958 err = ret;
3959 goto out_unlock;
3960 }
3961 path->keep_locks = 1;
3962
3963 while (1) {
3964 ins_nr = 0;
3965 ret = btrfs_search_forward(root, &min_key,
3966 path, trans->transid);
3967 if (ret != 0)
3968 break;
3969 again:
3970 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3971 if (min_key.objectid != ino)
3972 break;
3973 if (min_key.type > max_key.type)
3974 break;
3975
3976 src = path->nodes[0];
3977 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
3978 ins_nr++;
3979 goto next_slot;
3980 } else if (!ins_nr) {
3981 ins_start_slot = path->slots[0];
3982 ins_nr = 1;
3983 goto next_slot;
3984 }
3985
3986 ret = copy_items(trans, inode, dst_path, path, &last_extent,
3987 ins_start_slot, ins_nr, inode_only);
3988 if (ret < 0) {
3989 err = ret;
3990 goto out_unlock;
3991 } if (ret) {
3992 ins_nr = 0;
3993 btrfs_release_path(path);
3994 continue;
3995 }
3996 ins_nr = 1;
3997 ins_start_slot = path->slots[0];
3998 next_slot:
3999
4000 nritems = btrfs_header_nritems(path->nodes[0]);
4001 path->slots[0]++;
4002 if (path->slots[0] < nritems) {
4003 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4004 path->slots[0]);
4005 goto again;
4006 }
4007 if (ins_nr) {
4008 ret = copy_items(trans, inode, dst_path, path,
4009 &last_extent, ins_start_slot,
4010 ins_nr, inode_only);
4011 if (ret < 0) {
4012 err = ret;
4013 goto out_unlock;
4014 }
4015 ret = 0;
4016 ins_nr = 0;
4017 }
4018 btrfs_release_path(path);
4019
4020 if (min_key.offset < (u64)-1) {
4021 min_key.offset++;
4022 } else if (min_key.type < max_key.type) {
4023 min_key.type++;
4024 min_key.offset = 0;
4025 } else {
4026 break;
4027 }
4028 }
4029 if (ins_nr) {
4030 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4031 ins_start_slot, ins_nr, inode_only);
4032 if (ret < 0) {
4033 err = ret;
4034 goto out_unlock;
4035 }
4036 ret = 0;
4037 ins_nr = 0;
4038 }
4039
4040 log_extents:
4041 btrfs_release_path(path);
4042 btrfs_release_path(dst_path);
4043 if (fast_search) {
4044 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4045 &logged_list);
4046 if (ret) {
4047 err = ret;
4048 goto out_unlock;
4049 }
4050 } else if (inode_only == LOG_INODE_ALL) {
4051 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4052 struct extent_map *em, *n;
4053
4054 write_lock(&tree->lock);
4055 list_for_each_entry_safe(em, n, &tree->modified_extents, list)
4056 list_del_init(&em->list);
4057 write_unlock(&tree->lock);
4058 }
4059
4060 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4061 ret = log_directory_changes(trans, root, inode, path, dst_path);
4062 if (ret) {
4063 err = ret;
4064 goto out_unlock;
4065 }
4066 }
4067 BTRFS_I(inode)->logged_trans = trans->transid;
4068 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4069 out_unlock:
4070 if (unlikely(err))
4071 btrfs_put_logged_extents(&logged_list);
4072 else
4073 btrfs_submit_logged_extents(&logged_list, log);
4074 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4075
4076 btrfs_free_path(path);
4077 btrfs_free_path(dst_path);
4078 return err;
4079 }
4080
4081 /*
4082 * follow the dentry parent pointers up the chain and see if any
4083 * of the directories in it require a full commit before they can
4084 * be logged. Returns zero if nothing special needs to be done or 1 if
4085 * a full commit is required.
4086 */
4087 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4088 struct inode *inode,
4089 struct dentry *parent,
4090 struct super_block *sb,
4091 u64 last_committed)
4092 {
4093 int ret = 0;
4094 struct btrfs_root *root;
4095 struct dentry *old_parent = NULL;
4096 struct inode *orig_inode = inode;
4097
4098 /*
4099 * for regular files, if its inode is already on disk, we don't
4100 * have to worry about the parents at all. This is because
4101 * we can use the last_unlink_trans field to record renames
4102 * and other fun in this file.
4103 */
4104 if (S_ISREG(inode->i_mode) &&
4105 BTRFS_I(inode)->generation <= last_committed &&
4106 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4107 goto out;
4108
4109 if (!S_ISDIR(inode->i_mode)) {
4110 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4111 goto out;
4112 inode = parent->d_inode;
4113 }
4114
4115 while (1) {
4116 /*
4117 * If we are logging a directory then we start with our inode,
4118 * not our parents inode, so we need to skipp setting the
4119 * logged_trans so that further down in the log code we don't
4120 * think this inode has already been logged.
4121 */
4122 if (inode != orig_inode)
4123 BTRFS_I(inode)->logged_trans = trans->transid;
4124 smp_mb();
4125
4126 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4127 root = BTRFS_I(inode)->root;
4128
4129 /*
4130 * make sure any commits to the log are forced
4131 * to be full commits
4132 */
4133 root->fs_info->last_trans_log_full_commit =
4134 trans->transid;
4135 ret = 1;
4136 break;
4137 }
4138
4139 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4140 break;
4141
4142 if (IS_ROOT(parent))
4143 break;
4144
4145 parent = dget_parent(parent);
4146 dput(old_parent);
4147 old_parent = parent;
4148 inode = parent->d_inode;
4149
4150 }
4151 dput(old_parent);
4152 out:
4153 return ret;
4154 }
4155
4156 /*
4157 * helper function around btrfs_log_inode to make sure newly created
4158 * parent directories also end up in the log. A minimal inode and backref
4159 * only logging is done of any parent directories that are older than
4160 * the last committed transaction
4161 */
4162 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4163 struct btrfs_root *root, struct inode *inode,
4164 struct dentry *parent, int exists_only,
4165 struct btrfs_log_ctx *ctx)
4166 {
4167 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
4168 struct super_block *sb;
4169 struct dentry *old_parent = NULL;
4170 int ret = 0;
4171 u64 last_committed = root->fs_info->last_trans_committed;
4172
4173 sb = inode->i_sb;
4174
4175 if (btrfs_test_opt(root, NOTREELOG)) {
4176 ret = 1;
4177 goto end_no_trans;
4178 }
4179
4180 if (root->fs_info->last_trans_log_full_commit >
4181 root->fs_info->last_trans_committed) {
4182 ret = 1;
4183 goto end_no_trans;
4184 }
4185
4186 if (root != BTRFS_I(inode)->root ||
4187 btrfs_root_refs(&root->root_item) == 0) {
4188 ret = 1;
4189 goto end_no_trans;
4190 }
4191
4192 ret = check_parent_dirs_for_sync(trans, inode, parent,
4193 sb, last_committed);
4194 if (ret)
4195 goto end_no_trans;
4196
4197 if (btrfs_inode_in_log(inode, trans->transid)) {
4198 ret = BTRFS_NO_LOG_SYNC;
4199 goto end_no_trans;
4200 }
4201
4202 ret = start_log_trans(trans, root, ctx);
4203 if (ret)
4204 goto end_no_trans;
4205
4206 ret = btrfs_log_inode(trans, root, inode, inode_only);
4207 if (ret)
4208 goto end_trans;
4209
4210 /*
4211 * for regular files, if its inode is already on disk, we don't
4212 * have to worry about the parents at all. This is because
4213 * we can use the last_unlink_trans field to record renames
4214 * and other fun in this file.
4215 */
4216 if (S_ISREG(inode->i_mode) &&
4217 BTRFS_I(inode)->generation <= last_committed &&
4218 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
4219 ret = 0;
4220 goto end_trans;
4221 }
4222
4223 inode_only = LOG_INODE_EXISTS;
4224 while (1) {
4225 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4226 break;
4227
4228 inode = parent->d_inode;
4229 if (root != BTRFS_I(inode)->root)
4230 break;
4231
4232 if (BTRFS_I(inode)->generation >
4233 root->fs_info->last_trans_committed) {
4234 ret = btrfs_log_inode(trans, root, inode, inode_only);
4235 if (ret)
4236 goto end_trans;
4237 }
4238 if (IS_ROOT(parent))
4239 break;
4240
4241 parent = dget_parent(parent);
4242 dput(old_parent);
4243 old_parent = parent;
4244 }
4245 ret = 0;
4246 end_trans:
4247 dput(old_parent);
4248 if (ret < 0) {
4249 root->fs_info->last_trans_log_full_commit = trans->transid;
4250 ret = 1;
4251 }
4252
4253 if (ret)
4254 btrfs_remove_log_ctx(root, ctx);
4255 btrfs_end_log_trans(root);
4256 end_no_trans:
4257 return ret;
4258 }
4259
4260 /*
4261 * it is not safe to log dentry if the chunk root has added new
4262 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4263 * If this returns 1, you must commit the transaction to safely get your
4264 * data on disk.
4265 */
4266 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
4267 struct btrfs_root *root, struct dentry *dentry,
4268 struct btrfs_log_ctx *ctx)
4269 {
4270 struct dentry *parent = dget_parent(dentry);
4271 int ret;
4272
4273 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
4274 0, ctx);
4275 dput(parent);
4276
4277 return ret;
4278 }
4279
4280 /*
4281 * should be called during mount to recover any replay any log trees
4282 * from the FS
4283 */
4284 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4285 {
4286 int ret;
4287 struct btrfs_path *path;
4288 struct btrfs_trans_handle *trans;
4289 struct btrfs_key key;
4290 struct btrfs_key found_key;
4291 struct btrfs_key tmp_key;
4292 struct btrfs_root *log;
4293 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
4294 struct walk_control wc = {
4295 .process_func = process_one_buffer,
4296 .stage = 0,
4297 };
4298
4299 path = btrfs_alloc_path();
4300 if (!path)
4301 return -ENOMEM;
4302
4303 fs_info->log_root_recovering = 1;
4304
4305 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4306 if (IS_ERR(trans)) {
4307 ret = PTR_ERR(trans);
4308 goto error;
4309 }
4310
4311 wc.trans = trans;
4312 wc.pin = 1;
4313
4314 ret = walk_log_tree(trans, log_root_tree, &wc);
4315 if (ret) {
4316 btrfs_error(fs_info, ret, "Failed to pin buffers while "
4317 "recovering log root tree.");
4318 goto error;
4319 }
4320
4321 again:
4322 key.objectid = BTRFS_TREE_LOG_OBJECTID;
4323 key.offset = (u64)-1;
4324 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
4325
4326 while (1) {
4327 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
4328
4329 if (ret < 0) {
4330 btrfs_error(fs_info, ret,
4331 "Couldn't find tree log root.");
4332 goto error;
4333 }
4334 if (ret > 0) {
4335 if (path->slots[0] == 0)
4336 break;
4337 path->slots[0]--;
4338 }
4339 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4340 path->slots[0]);
4341 btrfs_release_path(path);
4342 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4343 break;
4344
4345 log = btrfs_read_fs_root(log_root_tree, &found_key);
4346 if (IS_ERR(log)) {
4347 ret = PTR_ERR(log);
4348 btrfs_error(fs_info, ret,
4349 "Couldn't read tree log root.");
4350 goto error;
4351 }
4352
4353 tmp_key.objectid = found_key.offset;
4354 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
4355 tmp_key.offset = (u64)-1;
4356
4357 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4358 if (IS_ERR(wc.replay_dest)) {
4359 ret = PTR_ERR(wc.replay_dest);
4360 free_extent_buffer(log->node);
4361 free_extent_buffer(log->commit_root);
4362 kfree(log);
4363 btrfs_error(fs_info, ret, "Couldn't read target root "
4364 "for tree log recovery.");
4365 goto error;
4366 }
4367
4368 wc.replay_dest->log_root = log;
4369 btrfs_record_root_in_trans(trans, wc.replay_dest);
4370 ret = walk_log_tree(trans, log, &wc);
4371
4372 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4373 ret = fixup_inode_link_counts(trans, wc.replay_dest,
4374 path);
4375 }
4376
4377 key.offset = found_key.offset - 1;
4378 wc.replay_dest->log_root = NULL;
4379 free_extent_buffer(log->node);
4380 free_extent_buffer(log->commit_root);
4381 kfree(log);
4382
4383 if (ret)
4384 goto error;
4385
4386 if (found_key.offset == 0)
4387 break;
4388 }
4389 btrfs_release_path(path);
4390
4391 /* step one is to pin it all, step two is to replay just inodes */
4392 if (wc.pin) {
4393 wc.pin = 0;
4394 wc.process_func = replay_one_buffer;
4395 wc.stage = LOG_WALK_REPLAY_INODES;
4396 goto again;
4397 }
4398 /* step three is to replay everything */
4399 if (wc.stage < LOG_WALK_REPLAY_ALL) {
4400 wc.stage++;
4401 goto again;
4402 }
4403
4404 btrfs_free_path(path);
4405
4406 /* step 4: commit the transaction, which also unpins the blocks */
4407 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
4408 if (ret)
4409 return ret;
4410
4411 free_extent_buffer(log_root_tree->node);
4412 log_root_tree->log_root = NULL;
4413 fs_info->log_root_recovering = 0;
4414 kfree(log_root_tree);
4415
4416 return 0;
4417 error:
4418 if (wc.trans)
4419 btrfs_end_transaction(wc.trans, fs_info->tree_root);
4420 btrfs_free_path(path);
4421 return ret;
4422 }
4423
4424 /*
4425 * there are some corner cases where we want to force a full
4426 * commit instead of allowing a directory to be logged.
4427 *
4428 * They revolve around files there were unlinked from the directory, and
4429 * this function updates the parent directory so that a full commit is
4430 * properly done if it is fsync'd later after the unlinks are done.
4431 */
4432 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4433 struct inode *dir, struct inode *inode,
4434 int for_rename)
4435 {
4436 /*
4437 * when we're logging a file, if it hasn't been renamed
4438 * or unlinked, and its inode is fully committed on disk,
4439 * we don't have to worry about walking up the directory chain
4440 * to log its parents.
4441 *
4442 * So, we use the last_unlink_trans field to put this transid
4443 * into the file. When the file is logged we check it and
4444 * don't log the parents if the file is fully on disk.
4445 */
4446 if (S_ISREG(inode->i_mode))
4447 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4448
4449 /*
4450 * if this directory was already logged any new
4451 * names for this file/dir will get recorded
4452 */
4453 smp_mb();
4454 if (BTRFS_I(dir)->logged_trans == trans->transid)
4455 return;
4456
4457 /*
4458 * if the inode we're about to unlink was logged,
4459 * the log will be properly updated for any new names
4460 */
4461 if (BTRFS_I(inode)->logged_trans == trans->transid)
4462 return;
4463
4464 /*
4465 * when renaming files across directories, if the directory
4466 * there we're unlinking from gets fsync'd later on, there's
4467 * no way to find the destination directory later and fsync it
4468 * properly. So, we have to be conservative and force commits
4469 * so the new name gets discovered.
4470 */
4471 if (for_rename)
4472 goto record;
4473
4474 /* we can safely do the unlink without any special recording */
4475 return;
4476
4477 record:
4478 BTRFS_I(dir)->last_unlink_trans = trans->transid;
4479 }
4480
4481 /*
4482 * Call this after adding a new name for a file and it will properly
4483 * update the log to reflect the new name.
4484 *
4485 * It will return zero if all goes well, and it will return 1 if a
4486 * full transaction commit is required.
4487 */
4488 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4489 struct inode *inode, struct inode *old_dir,
4490 struct dentry *parent)
4491 {
4492 struct btrfs_root * root = BTRFS_I(inode)->root;
4493
4494 /*
4495 * this will force the logging code to walk the dentry chain
4496 * up for the file
4497 */
4498 if (S_ISREG(inode->i_mode))
4499 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4500
4501 /*
4502 * if this inode hasn't been logged and directory we're renaming it
4503 * from hasn't been logged, we don't need to log it
4504 */
4505 if (BTRFS_I(inode)->logged_trans <=
4506 root->fs_info->last_trans_committed &&
4507 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4508 root->fs_info->last_trans_committed))
4509 return 0;
4510
4511 return btrfs_log_inode_parent(trans, root, inode, parent, 1, NULL);
4512 }
4513
This page took 0.124933 seconds and 5 git commands to generate.