Merge branches 'x86/amd', 'x86/vt-d', 'arm/exynos', 'arm/mediatek' and 'arm/renesas...
[deliverable/linux.git] / fs / btrfs / tree-log.c
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "tree-log.h"
24 #include "disk-io.h"
25 #include "locking.h"
26 #include "print-tree.h"
27 #include "backref.h"
28 #include "hash.h"
29 #include "compression.h"
30 #include "qgroup.h"
31
32 /* magic values for the inode_only field in btrfs_log_inode:
33 *
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 * during log replay
37 */
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40
41 /*
42 * directory trouble cases
43 *
44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
45 * log, we must force a full commit before doing an fsync of the directory
46 * where the unlink was done.
47 * ---> record transid of last unlink/rename per directory
48 *
49 * mkdir foo/some_dir
50 * normal commit
51 * rename foo/some_dir foo2/some_dir
52 * mkdir foo/some_dir
53 * fsync foo/some_dir/some_file
54 *
55 * The fsync above will unlink the original some_dir without recording
56 * it in its new location (foo2). After a crash, some_dir will be gone
57 * unless the fsync of some_file forces a full commit
58 *
59 * 2) we must log any new names for any file or dir that is in the fsync
60 * log. ---> check inode while renaming/linking.
61 *
62 * 2a) we must log any new names for any file or dir during rename
63 * when the directory they are being removed from was logged.
64 * ---> check inode and old parent dir during rename
65 *
66 * 2a is actually the more important variant. With the extra logging
67 * a crash might unlink the old name without recreating the new one
68 *
69 * 3) after a crash, we must go through any directories with a link count
70 * of zero and redo the rm -rf
71 *
72 * mkdir f1/foo
73 * normal commit
74 * rm -rf f1/foo
75 * fsync(f1)
76 *
77 * The directory f1 was fully removed from the FS, but fsync was never
78 * called on f1, only its parent dir. After a crash the rm -rf must
79 * be replayed. This must be able to recurse down the entire
80 * directory tree. The inode link count fixup code takes care of the
81 * ugly details.
82 */
83
84 /*
85 * stages for the tree walking. The first
86 * stage (0) is to only pin down the blocks we find
87 * the second stage (1) is to make sure that all the inodes
88 * we find in the log are created in the subvolume.
89 *
90 * The last stage is to deal with directories and links and extents
91 * and all the other fun semantics
92 */
93 #define LOG_WALK_PIN_ONLY 0
94 #define LOG_WALK_REPLAY_INODES 1
95 #define LOG_WALK_REPLAY_DIR_INDEX 2
96 #define LOG_WALK_REPLAY_ALL 3
97
98 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root, struct inode *inode,
100 int inode_only,
101 const loff_t start,
102 const loff_t end,
103 struct btrfs_log_ctx *ctx);
104 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
105 struct btrfs_root *root,
106 struct btrfs_path *path, u64 objectid);
107 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
108 struct btrfs_root *root,
109 struct btrfs_root *log,
110 struct btrfs_path *path,
111 u64 dirid, int del_all);
112
113 /*
114 * tree logging is a special write ahead log used to make sure that
115 * fsyncs and O_SYNCs can happen without doing full tree commits.
116 *
117 * Full tree commits are expensive because they require commonly
118 * modified blocks to be recowed, creating many dirty pages in the
119 * extent tree an 4x-6x higher write load than ext3.
120 *
121 * Instead of doing a tree commit on every fsync, we use the
122 * key ranges and transaction ids to find items for a given file or directory
123 * that have changed in this transaction. Those items are copied into
124 * a special tree (one per subvolume root), that tree is written to disk
125 * and then the fsync is considered complete.
126 *
127 * After a crash, items are copied out of the log-tree back into the
128 * subvolume tree. Any file data extents found are recorded in the extent
129 * allocation tree, and the log-tree freed.
130 *
131 * The log tree is read three times, once to pin down all the extents it is
132 * using in ram and once, once to create all the inodes logged in the tree
133 * and once to do all the other items.
134 */
135
136 /*
137 * start a sub transaction and setup the log tree
138 * this increments the log tree writer count to make the people
139 * syncing the tree wait for us to finish
140 */
141 static int start_log_trans(struct btrfs_trans_handle *trans,
142 struct btrfs_root *root,
143 struct btrfs_log_ctx *ctx)
144 {
145 int ret = 0;
146
147 mutex_lock(&root->log_mutex);
148
149 if (root->log_root) {
150 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
151 ret = -EAGAIN;
152 goto out;
153 }
154
155 if (!root->log_start_pid) {
156 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
157 root->log_start_pid = current->pid;
158 } else if (root->log_start_pid != current->pid) {
159 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
160 }
161 } else {
162 mutex_lock(&root->fs_info->tree_log_mutex);
163 if (!root->fs_info->log_root_tree)
164 ret = btrfs_init_log_root_tree(trans, root->fs_info);
165 mutex_unlock(&root->fs_info->tree_log_mutex);
166 if (ret)
167 goto out;
168
169 ret = btrfs_add_log_tree(trans, root);
170 if (ret)
171 goto out;
172
173 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
174 root->log_start_pid = current->pid;
175 }
176
177 atomic_inc(&root->log_batch);
178 atomic_inc(&root->log_writers);
179 if (ctx) {
180 int index = root->log_transid % 2;
181 list_add_tail(&ctx->list, &root->log_ctxs[index]);
182 ctx->log_transid = root->log_transid;
183 }
184
185 out:
186 mutex_unlock(&root->log_mutex);
187 return ret;
188 }
189
190 /*
191 * returns 0 if there was a log transaction running and we were able
192 * to join, or returns -ENOENT if there were not transactions
193 * in progress
194 */
195 static int join_running_log_trans(struct btrfs_root *root)
196 {
197 int ret = -ENOENT;
198
199 smp_mb();
200 if (!root->log_root)
201 return -ENOENT;
202
203 mutex_lock(&root->log_mutex);
204 if (root->log_root) {
205 ret = 0;
206 atomic_inc(&root->log_writers);
207 }
208 mutex_unlock(&root->log_mutex);
209 return ret;
210 }
211
212 /*
213 * This either makes the current running log transaction wait
214 * until you call btrfs_end_log_trans() or it makes any future
215 * log transactions wait until you call btrfs_end_log_trans()
216 */
217 int btrfs_pin_log_trans(struct btrfs_root *root)
218 {
219 int ret = -ENOENT;
220
221 mutex_lock(&root->log_mutex);
222 atomic_inc(&root->log_writers);
223 mutex_unlock(&root->log_mutex);
224 return ret;
225 }
226
227 /*
228 * indicate we're done making changes to the log tree
229 * and wake up anyone waiting to do a sync
230 */
231 void btrfs_end_log_trans(struct btrfs_root *root)
232 {
233 if (atomic_dec_and_test(&root->log_writers)) {
234 /*
235 * Implicit memory barrier after atomic_dec_and_test
236 */
237 if (waitqueue_active(&root->log_writer_wait))
238 wake_up(&root->log_writer_wait);
239 }
240 }
241
242
243 /*
244 * the walk control struct is used to pass state down the chain when
245 * processing the log tree. The stage field tells us which part
246 * of the log tree processing we are currently doing. The others
247 * are state fields used for that specific part
248 */
249 struct walk_control {
250 /* should we free the extent on disk when done? This is used
251 * at transaction commit time while freeing a log tree
252 */
253 int free;
254
255 /* should we write out the extent buffer? This is used
256 * while flushing the log tree to disk during a sync
257 */
258 int write;
259
260 /* should we wait for the extent buffer io to finish? Also used
261 * while flushing the log tree to disk for a sync
262 */
263 int wait;
264
265 /* pin only walk, we record which extents on disk belong to the
266 * log trees
267 */
268 int pin;
269
270 /* what stage of the replay code we're currently in */
271 int stage;
272
273 /* the root we are currently replaying */
274 struct btrfs_root *replay_dest;
275
276 /* the trans handle for the current replay */
277 struct btrfs_trans_handle *trans;
278
279 /* the function that gets used to process blocks we find in the
280 * tree. Note the extent_buffer might not be up to date when it is
281 * passed in, and it must be checked or read if you need the data
282 * inside it
283 */
284 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
285 struct walk_control *wc, u64 gen);
286 };
287
288 /*
289 * process_func used to pin down extents, write them or wait on them
290 */
291 static int process_one_buffer(struct btrfs_root *log,
292 struct extent_buffer *eb,
293 struct walk_control *wc, u64 gen)
294 {
295 int ret = 0;
296
297 /*
298 * If this fs is mixed then we need to be able to process the leaves to
299 * pin down any logged extents, so we have to read the block.
300 */
301 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
302 ret = btrfs_read_buffer(eb, gen);
303 if (ret)
304 return ret;
305 }
306
307 if (wc->pin)
308 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
309 eb->start, eb->len);
310
311 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
312 if (wc->pin && btrfs_header_level(eb) == 0)
313 ret = btrfs_exclude_logged_extents(log, eb);
314 if (wc->write)
315 btrfs_write_tree_block(eb);
316 if (wc->wait)
317 btrfs_wait_tree_block_writeback(eb);
318 }
319 return ret;
320 }
321
322 /*
323 * Item overwrite used by replay and tree logging. eb, slot and key all refer
324 * to the src data we are copying out.
325 *
326 * root is the tree we are copying into, and path is a scratch
327 * path for use in this function (it should be released on entry and
328 * will be released on exit).
329 *
330 * If the key is already in the destination tree the existing item is
331 * overwritten. If the existing item isn't big enough, it is extended.
332 * If it is too large, it is truncated.
333 *
334 * If the key isn't in the destination yet, a new item is inserted.
335 */
336 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
337 struct btrfs_root *root,
338 struct btrfs_path *path,
339 struct extent_buffer *eb, int slot,
340 struct btrfs_key *key)
341 {
342 int ret;
343 u32 item_size;
344 u64 saved_i_size = 0;
345 int save_old_i_size = 0;
346 unsigned long src_ptr;
347 unsigned long dst_ptr;
348 int overwrite_root = 0;
349 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
350
351 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
352 overwrite_root = 1;
353
354 item_size = btrfs_item_size_nr(eb, slot);
355 src_ptr = btrfs_item_ptr_offset(eb, slot);
356
357 /* look for the key in the destination tree */
358 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
359 if (ret < 0)
360 return ret;
361
362 if (ret == 0) {
363 char *src_copy;
364 char *dst_copy;
365 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
366 path->slots[0]);
367 if (dst_size != item_size)
368 goto insert;
369
370 if (item_size == 0) {
371 btrfs_release_path(path);
372 return 0;
373 }
374 dst_copy = kmalloc(item_size, GFP_NOFS);
375 src_copy = kmalloc(item_size, GFP_NOFS);
376 if (!dst_copy || !src_copy) {
377 btrfs_release_path(path);
378 kfree(dst_copy);
379 kfree(src_copy);
380 return -ENOMEM;
381 }
382
383 read_extent_buffer(eb, src_copy, src_ptr, item_size);
384
385 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
386 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
387 item_size);
388 ret = memcmp(dst_copy, src_copy, item_size);
389
390 kfree(dst_copy);
391 kfree(src_copy);
392 /*
393 * they have the same contents, just return, this saves
394 * us from cowing blocks in the destination tree and doing
395 * extra writes that may not have been done by a previous
396 * sync
397 */
398 if (ret == 0) {
399 btrfs_release_path(path);
400 return 0;
401 }
402
403 /*
404 * We need to load the old nbytes into the inode so when we
405 * replay the extents we've logged we get the right nbytes.
406 */
407 if (inode_item) {
408 struct btrfs_inode_item *item;
409 u64 nbytes;
410 u32 mode;
411
412 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
413 struct btrfs_inode_item);
414 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
415 item = btrfs_item_ptr(eb, slot,
416 struct btrfs_inode_item);
417 btrfs_set_inode_nbytes(eb, item, nbytes);
418
419 /*
420 * If this is a directory we need to reset the i_size to
421 * 0 so that we can set it up properly when replaying
422 * the rest of the items in this log.
423 */
424 mode = btrfs_inode_mode(eb, item);
425 if (S_ISDIR(mode))
426 btrfs_set_inode_size(eb, item, 0);
427 }
428 } else if (inode_item) {
429 struct btrfs_inode_item *item;
430 u32 mode;
431
432 /*
433 * New inode, set nbytes to 0 so that the nbytes comes out
434 * properly when we replay the extents.
435 */
436 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
437 btrfs_set_inode_nbytes(eb, item, 0);
438
439 /*
440 * If this is a directory we need to reset the i_size to 0 so
441 * that we can set it up properly when replaying the rest of
442 * the items in this log.
443 */
444 mode = btrfs_inode_mode(eb, item);
445 if (S_ISDIR(mode))
446 btrfs_set_inode_size(eb, item, 0);
447 }
448 insert:
449 btrfs_release_path(path);
450 /* try to insert the key into the destination tree */
451 path->skip_release_on_error = 1;
452 ret = btrfs_insert_empty_item(trans, root, path,
453 key, item_size);
454 path->skip_release_on_error = 0;
455
456 /* make sure any existing item is the correct size */
457 if (ret == -EEXIST || ret == -EOVERFLOW) {
458 u32 found_size;
459 found_size = btrfs_item_size_nr(path->nodes[0],
460 path->slots[0]);
461 if (found_size > item_size)
462 btrfs_truncate_item(root, path, item_size, 1);
463 else if (found_size < item_size)
464 btrfs_extend_item(root, path,
465 item_size - found_size);
466 } else if (ret) {
467 return ret;
468 }
469 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
470 path->slots[0]);
471
472 /* don't overwrite an existing inode if the generation number
473 * was logged as zero. This is done when the tree logging code
474 * is just logging an inode to make sure it exists after recovery.
475 *
476 * Also, don't overwrite i_size on directories during replay.
477 * log replay inserts and removes directory items based on the
478 * state of the tree found in the subvolume, and i_size is modified
479 * as it goes
480 */
481 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
482 struct btrfs_inode_item *src_item;
483 struct btrfs_inode_item *dst_item;
484
485 src_item = (struct btrfs_inode_item *)src_ptr;
486 dst_item = (struct btrfs_inode_item *)dst_ptr;
487
488 if (btrfs_inode_generation(eb, src_item) == 0) {
489 struct extent_buffer *dst_eb = path->nodes[0];
490 const u64 ino_size = btrfs_inode_size(eb, src_item);
491
492 /*
493 * For regular files an ino_size == 0 is used only when
494 * logging that an inode exists, as part of a directory
495 * fsync, and the inode wasn't fsynced before. In this
496 * case don't set the size of the inode in the fs/subvol
497 * tree, otherwise we would be throwing valid data away.
498 */
499 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
500 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
501 ino_size != 0) {
502 struct btrfs_map_token token;
503
504 btrfs_init_map_token(&token);
505 btrfs_set_token_inode_size(dst_eb, dst_item,
506 ino_size, &token);
507 }
508 goto no_copy;
509 }
510
511 if (overwrite_root &&
512 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
513 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
514 save_old_i_size = 1;
515 saved_i_size = btrfs_inode_size(path->nodes[0],
516 dst_item);
517 }
518 }
519
520 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
521 src_ptr, item_size);
522
523 if (save_old_i_size) {
524 struct btrfs_inode_item *dst_item;
525 dst_item = (struct btrfs_inode_item *)dst_ptr;
526 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
527 }
528
529 /* make sure the generation is filled in */
530 if (key->type == BTRFS_INODE_ITEM_KEY) {
531 struct btrfs_inode_item *dst_item;
532 dst_item = (struct btrfs_inode_item *)dst_ptr;
533 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
534 btrfs_set_inode_generation(path->nodes[0], dst_item,
535 trans->transid);
536 }
537 }
538 no_copy:
539 btrfs_mark_buffer_dirty(path->nodes[0]);
540 btrfs_release_path(path);
541 return 0;
542 }
543
544 /*
545 * simple helper to read an inode off the disk from a given root
546 * This can only be called for subvolume roots and not for the log
547 */
548 static noinline struct inode *read_one_inode(struct btrfs_root *root,
549 u64 objectid)
550 {
551 struct btrfs_key key;
552 struct inode *inode;
553
554 key.objectid = objectid;
555 key.type = BTRFS_INODE_ITEM_KEY;
556 key.offset = 0;
557 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
558 if (IS_ERR(inode)) {
559 inode = NULL;
560 } else if (is_bad_inode(inode)) {
561 iput(inode);
562 inode = NULL;
563 }
564 return inode;
565 }
566
567 /* replays a single extent in 'eb' at 'slot' with 'key' into the
568 * subvolume 'root'. path is released on entry and should be released
569 * on exit.
570 *
571 * extents in the log tree have not been allocated out of the extent
572 * tree yet. So, this completes the allocation, taking a reference
573 * as required if the extent already exists or creating a new extent
574 * if it isn't in the extent allocation tree yet.
575 *
576 * The extent is inserted into the file, dropping any existing extents
577 * from the file that overlap the new one.
578 */
579 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
580 struct btrfs_root *root,
581 struct btrfs_path *path,
582 struct extent_buffer *eb, int slot,
583 struct btrfs_key *key)
584 {
585 int found_type;
586 u64 extent_end;
587 u64 start = key->offset;
588 u64 nbytes = 0;
589 struct btrfs_file_extent_item *item;
590 struct inode *inode = NULL;
591 unsigned long size;
592 int ret = 0;
593
594 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
595 found_type = btrfs_file_extent_type(eb, item);
596
597 if (found_type == BTRFS_FILE_EXTENT_REG ||
598 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
599 nbytes = btrfs_file_extent_num_bytes(eb, item);
600 extent_end = start + nbytes;
601
602 /*
603 * We don't add to the inodes nbytes if we are prealloc or a
604 * hole.
605 */
606 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
607 nbytes = 0;
608 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
609 size = btrfs_file_extent_inline_len(eb, slot, item);
610 nbytes = btrfs_file_extent_ram_bytes(eb, item);
611 extent_end = ALIGN(start + size, root->sectorsize);
612 } else {
613 ret = 0;
614 goto out;
615 }
616
617 inode = read_one_inode(root, key->objectid);
618 if (!inode) {
619 ret = -EIO;
620 goto out;
621 }
622
623 /*
624 * first check to see if we already have this extent in the
625 * file. This must be done before the btrfs_drop_extents run
626 * so we don't try to drop this extent.
627 */
628 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
629 start, 0);
630
631 if (ret == 0 &&
632 (found_type == BTRFS_FILE_EXTENT_REG ||
633 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
634 struct btrfs_file_extent_item cmp1;
635 struct btrfs_file_extent_item cmp2;
636 struct btrfs_file_extent_item *existing;
637 struct extent_buffer *leaf;
638
639 leaf = path->nodes[0];
640 existing = btrfs_item_ptr(leaf, path->slots[0],
641 struct btrfs_file_extent_item);
642
643 read_extent_buffer(eb, &cmp1, (unsigned long)item,
644 sizeof(cmp1));
645 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
646 sizeof(cmp2));
647
648 /*
649 * we already have a pointer to this exact extent,
650 * we don't have to do anything
651 */
652 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
653 btrfs_release_path(path);
654 goto out;
655 }
656 }
657 btrfs_release_path(path);
658
659 /* drop any overlapping extents */
660 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
661 if (ret)
662 goto out;
663
664 if (found_type == BTRFS_FILE_EXTENT_REG ||
665 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
666 u64 offset;
667 unsigned long dest_offset;
668 struct btrfs_key ins;
669
670 ret = btrfs_insert_empty_item(trans, root, path, key,
671 sizeof(*item));
672 if (ret)
673 goto out;
674 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
675 path->slots[0]);
676 copy_extent_buffer(path->nodes[0], eb, dest_offset,
677 (unsigned long)item, sizeof(*item));
678
679 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
680 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
681 ins.type = BTRFS_EXTENT_ITEM_KEY;
682 offset = key->offset - btrfs_file_extent_offset(eb, item);
683
684 /*
685 * Manually record dirty extent, as here we did a shallow
686 * file extent item copy and skip normal backref update,
687 * but modifying extent tree all by ourselves.
688 * So need to manually record dirty extent for qgroup,
689 * as the owner of the file extent changed from log tree
690 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
691 */
692 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
693 btrfs_file_extent_disk_bytenr(eb, item),
694 btrfs_file_extent_disk_num_bytes(eb, item),
695 GFP_NOFS);
696 if (ret < 0)
697 goto out;
698
699 if (ins.objectid > 0) {
700 u64 csum_start;
701 u64 csum_end;
702 LIST_HEAD(ordered_sums);
703 /*
704 * is this extent already allocated in the extent
705 * allocation tree? If so, just add a reference
706 */
707 ret = btrfs_lookup_data_extent(root, ins.objectid,
708 ins.offset);
709 if (ret == 0) {
710 ret = btrfs_inc_extent_ref(trans, root,
711 ins.objectid, ins.offset,
712 0, root->root_key.objectid,
713 key->objectid, offset);
714 if (ret)
715 goto out;
716 } else {
717 /*
718 * insert the extent pointer in the extent
719 * allocation tree
720 */
721 ret = btrfs_alloc_logged_file_extent(trans,
722 root, root->root_key.objectid,
723 key->objectid, offset, &ins);
724 if (ret)
725 goto out;
726 }
727 btrfs_release_path(path);
728
729 if (btrfs_file_extent_compression(eb, item)) {
730 csum_start = ins.objectid;
731 csum_end = csum_start + ins.offset;
732 } else {
733 csum_start = ins.objectid +
734 btrfs_file_extent_offset(eb, item);
735 csum_end = csum_start +
736 btrfs_file_extent_num_bytes(eb, item);
737 }
738
739 ret = btrfs_lookup_csums_range(root->log_root,
740 csum_start, csum_end - 1,
741 &ordered_sums, 0);
742 if (ret)
743 goto out;
744 /*
745 * Now delete all existing cums in the csum root that
746 * cover our range. We do this because we can have an
747 * extent that is completely referenced by one file
748 * extent item and partially referenced by another
749 * file extent item (like after using the clone or
750 * extent_same ioctls). In this case if we end up doing
751 * the replay of the one that partially references the
752 * extent first, and we do not do the csum deletion
753 * below, we can get 2 csum items in the csum tree that
754 * overlap each other. For example, imagine our log has
755 * the two following file extent items:
756 *
757 * key (257 EXTENT_DATA 409600)
758 * extent data disk byte 12845056 nr 102400
759 * extent data offset 20480 nr 20480 ram 102400
760 *
761 * key (257 EXTENT_DATA 819200)
762 * extent data disk byte 12845056 nr 102400
763 * extent data offset 0 nr 102400 ram 102400
764 *
765 * Where the second one fully references the 100K extent
766 * that starts at disk byte 12845056, and the log tree
767 * has a single csum item that covers the entire range
768 * of the extent:
769 *
770 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
771 *
772 * After the first file extent item is replayed, the
773 * csum tree gets the following csum item:
774 *
775 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
776 *
777 * Which covers the 20K sub-range starting at offset 20K
778 * of our extent. Now when we replay the second file
779 * extent item, if we do not delete existing csum items
780 * that cover any of its blocks, we end up getting two
781 * csum items in our csum tree that overlap each other:
782 *
783 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
784 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
785 *
786 * Which is a problem, because after this anyone trying
787 * to lookup up for the checksum of any block of our
788 * extent starting at an offset of 40K or higher, will
789 * end up looking at the second csum item only, which
790 * does not contain the checksum for any block starting
791 * at offset 40K or higher of our extent.
792 */
793 while (!list_empty(&ordered_sums)) {
794 struct btrfs_ordered_sum *sums;
795 sums = list_entry(ordered_sums.next,
796 struct btrfs_ordered_sum,
797 list);
798 if (!ret)
799 ret = btrfs_del_csums(trans,
800 root->fs_info->csum_root,
801 sums->bytenr,
802 sums->len);
803 if (!ret)
804 ret = btrfs_csum_file_blocks(trans,
805 root->fs_info->csum_root,
806 sums);
807 list_del(&sums->list);
808 kfree(sums);
809 }
810 if (ret)
811 goto out;
812 } else {
813 btrfs_release_path(path);
814 }
815 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
816 /* inline extents are easy, we just overwrite them */
817 ret = overwrite_item(trans, root, path, eb, slot, key);
818 if (ret)
819 goto out;
820 }
821
822 inode_add_bytes(inode, nbytes);
823 ret = btrfs_update_inode(trans, root, inode);
824 out:
825 if (inode)
826 iput(inode);
827 return ret;
828 }
829
830 /*
831 * when cleaning up conflicts between the directory names in the
832 * subvolume, directory names in the log and directory names in the
833 * inode back references, we may have to unlink inodes from directories.
834 *
835 * This is a helper function to do the unlink of a specific directory
836 * item
837 */
838 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
839 struct btrfs_root *root,
840 struct btrfs_path *path,
841 struct inode *dir,
842 struct btrfs_dir_item *di)
843 {
844 struct inode *inode;
845 char *name;
846 int name_len;
847 struct extent_buffer *leaf;
848 struct btrfs_key location;
849 int ret;
850
851 leaf = path->nodes[0];
852
853 btrfs_dir_item_key_to_cpu(leaf, di, &location);
854 name_len = btrfs_dir_name_len(leaf, di);
855 name = kmalloc(name_len, GFP_NOFS);
856 if (!name)
857 return -ENOMEM;
858
859 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
860 btrfs_release_path(path);
861
862 inode = read_one_inode(root, location.objectid);
863 if (!inode) {
864 ret = -EIO;
865 goto out;
866 }
867
868 ret = link_to_fixup_dir(trans, root, path, location.objectid);
869 if (ret)
870 goto out;
871
872 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
873 if (ret)
874 goto out;
875 else
876 ret = btrfs_run_delayed_items(trans, root);
877 out:
878 kfree(name);
879 iput(inode);
880 return ret;
881 }
882
883 /*
884 * helper function to see if a given name and sequence number found
885 * in an inode back reference are already in a directory and correctly
886 * point to this inode
887 */
888 static noinline int inode_in_dir(struct btrfs_root *root,
889 struct btrfs_path *path,
890 u64 dirid, u64 objectid, u64 index,
891 const char *name, int name_len)
892 {
893 struct btrfs_dir_item *di;
894 struct btrfs_key location;
895 int match = 0;
896
897 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
898 index, name, name_len, 0);
899 if (di && !IS_ERR(di)) {
900 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
901 if (location.objectid != objectid)
902 goto out;
903 } else
904 goto out;
905 btrfs_release_path(path);
906
907 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
908 if (di && !IS_ERR(di)) {
909 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
910 if (location.objectid != objectid)
911 goto out;
912 } else
913 goto out;
914 match = 1;
915 out:
916 btrfs_release_path(path);
917 return match;
918 }
919
920 /*
921 * helper function to check a log tree for a named back reference in
922 * an inode. This is used to decide if a back reference that is
923 * found in the subvolume conflicts with what we find in the log.
924 *
925 * inode backreferences may have multiple refs in a single item,
926 * during replay we process one reference at a time, and we don't
927 * want to delete valid links to a file from the subvolume if that
928 * link is also in the log.
929 */
930 static noinline int backref_in_log(struct btrfs_root *log,
931 struct btrfs_key *key,
932 u64 ref_objectid,
933 const char *name, int namelen)
934 {
935 struct btrfs_path *path;
936 struct btrfs_inode_ref *ref;
937 unsigned long ptr;
938 unsigned long ptr_end;
939 unsigned long name_ptr;
940 int found_name_len;
941 int item_size;
942 int ret;
943 int match = 0;
944
945 path = btrfs_alloc_path();
946 if (!path)
947 return -ENOMEM;
948
949 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
950 if (ret != 0)
951 goto out;
952
953 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
954
955 if (key->type == BTRFS_INODE_EXTREF_KEY) {
956 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
957 name, namelen, NULL))
958 match = 1;
959
960 goto out;
961 }
962
963 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
964 ptr_end = ptr + item_size;
965 while (ptr < ptr_end) {
966 ref = (struct btrfs_inode_ref *)ptr;
967 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
968 if (found_name_len == namelen) {
969 name_ptr = (unsigned long)(ref + 1);
970 ret = memcmp_extent_buffer(path->nodes[0], name,
971 name_ptr, namelen);
972 if (ret == 0) {
973 match = 1;
974 goto out;
975 }
976 }
977 ptr = (unsigned long)(ref + 1) + found_name_len;
978 }
979 out:
980 btrfs_free_path(path);
981 return match;
982 }
983
984 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
985 struct btrfs_root *root,
986 struct btrfs_path *path,
987 struct btrfs_root *log_root,
988 struct inode *dir, struct inode *inode,
989 struct extent_buffer *eb,
990 u64 inode_objectid, u64 parent_objectid,
991 u64 ref_index, char *name, int namelen,
992 int *search_done)
993 {
994 int ret;
995 char *victim_name;
996 int victim_name_len;
997 struct extent_buffer *leaf;
998 struct btrfs_dir_item *di;
999 struct btrfs_key search_key;
1000 struct btrfs_inode_extref *extref;
1001
1002 again:
1003 /* Search old style refs */
1004 search_key.objectid = inode_objectid;
1005 search_key.type = BTRFS_INODE_REF_KEY;
1006 search_key.offset = parent_objectid;
1007 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1008 if (ret == 0) {
1009 struct btrfs_inode_ref *victim_ref;
1010 unsigned long ptr;
1011 unsigned long ptr_end;
1012
1013 leaf = path->nodes[0];
1014
1015 /* are we trying to overwrite a back ref for the root directory
1016 * if so, just jump out, we're done
1017 */
1018 if (search_key.objectid == search_key.offset)
1019 return 1;
1020
1021 /* check all the names in this back reference to see
1022 * if they are in the log. if so, we allow them to stay
1023 * otherwise they must be unlinked as a conflict
1024 */
1025 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1026 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1027 while (ptr < ptr_end) {
1028 victim_ref = (struct btrfs_inode_ref *)ptr;
1029 victim_name_len = btrfs_inode_ref_name_len(leaf,
1030 victim_ref);
1031 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1032 if (!victim_name)
1033 return -ENOMEM;
1034
1035 read_extent_buffer(leaf, victim_name,
1036 (unsigned long)(victim_ref + 1),
1037 victim_name_len);
1038
1039 if (!backref_in_log(log_root, &search_key,
1040 parent_objectid,
1041 victim_name,
1042 victim_name_len)) {
1043 inc_nlink(inode);
1044 btrfs_release_path(path);
1045
1046 ret = btrfs_unlink_inode(trans, root, dir,
1047 inode, victim_name,
1048 victim_name_len);
1049 kfree(victim_name);
1050 if (ret)
1051 return ret;
1052 ret = btrfs_run_delayed_items(trans, root);
1053 if (ret)
1054 return ret;
1055 *search_done = 1;
1056 goto again;
1057 }
1058 kfree(victim_name);
1059
1060 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1061 }
1062
1063 /*
1064 * NOTE: we have searched root tree and checked the
1065 * corresponding ref, it does not need to check again.
1066 */
1067 *search_done = 1;
1068 }
1069 btrfs_release_path(path);
1070
1071 /* Same search but for extended refs */
1072 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1073 inode_objectid, parent_objectid, 0,
1074 0);
1075 if (!IS_ERR_OR_NULL(extref)) {
1076 u32 item_size;
1077 u32 cur_offset = 0;
1078 unsigned long base;
1079 struct inode *victim_parent;
1080
1081 leaf = path->nodes[0];
1082
1083 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1084 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1085
1086 while (cur_offset < item_size) {
1087 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1088
1089 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1090
1091 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1092 goto next;
1093
1094 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1095 if (!victim_name)
1096 return -ENOMEM;
1097 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1098 victim_name_len);
1099
1100 search_key.objectid = inode_objectid;
1101 search_key.type = BTRFS_INODE_EXTREF_KEY;
1102 search_key.offset = btrfs_extref_hash(parent_objectid,
1103 victim_name,
1104 victim_name_len);
1105 ret = 0;
1106 if (!backref_in_log(log_root, &search_key,
1107 parent_objectid, victim_name,
1108 victim_name_len)) {
1109 ret = -ENOENT;
1110 victim_parent = read_one_inode(root,
1111 parent_objectid);
1112 if (victim_parent) {
1113 inc_nlink(inode);
1114 btrfs_release_path(path);
1115
1116 ret = btrfs_unlink_inode(trans, root,
1117 victim_parent,
1118 inode,
1119 victim_name,
1120 victim_name_len);
1121 if (!ret)
1122 ret = btrfs_run_delayed_items(
1123 trans, root);
1124 }
1125 iput(victim_parent);
1126 kfree(victim_name);
1127 if (ret)
1128 return ret;
1129 *search_done = 1;
1130 goto again;
1131 }
1132 kfree(victim_name);
1133 if (ret)
1134 return ret;
1135 next:
1136 cur_offset += victim_name_len + sizeof(*extref);
1137 }
1138 *search_done = 1;
1139 }
1140 btrfs_release_path(path);
1141
1142 /* look for a conflicting sequence number */
1143 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1144 ref_index, name, namelen, 0);
1145 if (di && !IS_ERR(di)) {
1146 ret = drop_one_dir_item(trans, root, path, dir, di);
1147 if (ret)
1148 return ret;
1149 }
1150 btrfs_release_path(path);
1151
1152 /* look for a conflicing name */
1153 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1154 name, namelen, 0);
1155 if (di && !IS_ERR(di)) {
1156 ret = drop_one_dir_item(trans, root, path, dir, di);
1157 if (ret)
1158 return ret;
1159 }
1160 btrfs_release_path(path);
1161
1162 return 0;
1163 }
1164
1165 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1166 u32 *namelen, char **name, u64 *index,
1167 u64 *parent_objectid)
1168 {
1169 struct btrfs_inode_extref *extref;
1170
1171 extref = (struct btrfs_inode_extref *)ref_ptr;
1172
1173 *namelen = btrfs_inode_extref_name_len(eb, extref);
1174 *name = kmalloc(*namelen, GFP_NOFS);
1175 if (*name == NULL)
1176 return -ENOMEM;
1177
1178 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1179 *namelen);
1180
1181 *index = btrfs_inode_extref_index(eb, extref);
1182 if (parent_objectid)
1183 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1184
1185 return 0;
1186 }
1187
1188 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1189 u32 *namelen, char **name, u64 *index)
1190 {
1191 struct btrfs_inode_ref *ref;
1192
1193 ref = (struct btrfs_inode_ref *)ref_ptr;
1194
1195 *namelen = btrfs_inode_ref_name_len(eb, ref);
1196 *name = kmalloc(*namelen, GFP_NOFS);
1197 if (*name == NULL)
1198 return -ENOMEM;
1199
1200 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1201
1202 *index = btrfs_inode_ref_index(eb, ref);
1203
1204 return 0;
1205 }
1206
1207 /*
1208 * replay one inode back reference item found in the log tree.
1209 * eb, slot and key refer to the buffer and key found in the log tree.
1210 * root is the destination we are replaying into, and path is for temp
1211 * use by this function. (it should be released on return).
1212 */
1213 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1214 struct btrfs_root *root,
1215 struct btrfs_root *log,
1216 struct btrfs_path *path,
1217 struct extent_buffer *eb, int slot,
1218 struct btrfs_key *key)
1219 {
1220 struct inode *dir = NULL;
1221 struct inode *inode = NULL;
1222 unsigned long ref_ptr;
1223 unsigned long ref_end;
1224 char *name = NULL;
1225 int namelen;
1226 int ret;
1227 int search_done = 0;
1228 int log_ref_ver = 0;
1229 u64 parent_objectid;
1230 u64 inode_objectid;
1231 u64 ref_index = 0;
1232 int ref_struct_size;
1233
1234 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1235 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1236
1237 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1238 struct btrfs_inode_extref *r;
1239
1240 ref_struct_size = sizeof(struct btrfs_inode_extref);
1241 log_ref_ver = 1;
1242 r = (struct btrfs_inode_extref *)ref_ptr;
1243 parent_objectid = btrfs_inode_extref_parent(eb, r);
1244 } else {
1245 ref_struct_size = sizeof(struct btrfs_inode_ref);
1246 parent_objectid = key->offset;
1247 }
1248 inode_objectid = key->objectid;
1249
1250 /*
1251 * it is possible that we didn't log all the parent directories
1252 * for a given inode. If we don't find the dir, just don't
1253 * copy the back ref in. The link count fixup code will take
1254 * care of the rest
1255 */
1256 dir = read_one_inode(root, parent_objectid);
1257 if (!dir) {
1258 ret = -ENOENT;
1259 goto out;
1260 }
1261
1262 inode = read_one_inode(root, inode_objectid);
1263 if (!inode) {
1264 ret = -EIO;
1265 goto out;
1266 }
1267
1268 while (ref_ptr < ref_end) {
1269 if (log_ref_ver) {
1270 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1271 &ref_index, &parent_objectid);
1272 /*
1273 * parent object can change from one array
1274 * item to another.
1275 */
1276 if (!dir)
1277 dir = read_one_inode(root, parent_objectid);
1278 if (!dir) {
1279 ret = -ENOENT;
1280 goto out;
1281 }
1282 } else {
1283 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1284 &ref_index);
1285 }
1286 if (ret)
1287 goto out;
1288
1289 /* if we already have a perfect match, we're done */
1290 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1291 ref_index, name, namelen)) {
1292 /*
1293 * look for a conflicting back reference in the
1294 * metadata. if we find one we have to unlink that name
1295 * of the file before we add our new link. Later on, we
1296 * overwrite any existing back reference, and we don't
1297 * want to create dangling pointers in the directory.
1298 */
1299
1300 if (!search_done) {
1301 ret = __add_inode_ref(trans, root, path, log,
1302 dir, inode, eb,
1303 inode_objectid,
1304 parent_objectid,
1305 ref_index, name, namelen,
1306 &search_done);
1307 if (ret) {
1308 if (ret == 1)
1309 ret = 0;
1310 goto out;
1311 }
1312 }
1313
1314 /* insert our name */
1315 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1316 0, ref_index);
1317 if (ret)
1318 goto out;
1319
1320 btrfs_update_inode(trans, root, inode);
1321 }
1322
1323 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1324 kfree(name);
1325 name = NULL;
1326 if (log_ref_ver) {
1327 iput(dir);
1328 dir = NULL;
1329 }
1330 }
1331
1332 /* finally write the back reference in the inode */
1333 ret = overwrite_item(trans, root, path, eb, slot, key);
1334 out:
1335 btrfs_release_path(path);
1336 kfree(name);
1337 iput(dir);
1338 iput(inode);
1339 return ret;
1340 }
1341
1342 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1343 struct btrfs_root *root, u64 ino)
1344 {
1345 int ret;
1346
1347 ret = btrfs_insert_orphan_item(trans, root, ino);
1348 if (ret == -EEXIST)
1349 ret = 0;
1350
1351 return ret;
1352 }
1353
1354 static int count_inode_extrefs(struct btrfs_root *root,
1355 struct inode *inode, struct btrfs_path *path)
1356 {
1357 int ret = 0;
1358 int name_len;
1359 unsigned int nlink = 0;
1360 u32 item_size;
1361 u32 cur_offset = 0;
1362 u64 inode_objectid = btrfs_ino(inode);
1363 u64 offset = 0;
1364 unsigned long ptr;
1365 struct btrfs_inode_extref *extref;
1366 struct extent_buffer *leaf;
1367
1368 while (1) {
1369 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1370 &extref, &offset);
1371 if (ret)
1372 break;
1373
1374 leaf = path->nodes[0];
1375 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1376 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1377 cur_offset = 0;
1378
1379 while (cur_offset < item_size) {
1380 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1381 name_len = btrfs_inode_extref_name_len(leaf, extref);
1382
1383 nlink++;
1384
1385 cur_offset += name_len + sizeof(*extref);
1386 }
1387
1388 offset++;
1389 btrfs_release_path(path);
1390 }
1391 btrfs_release_path(path);
1392
1393 if (ret < 0 && ret != -ENOENT)
1394 return ret;
1395 return nlink;
1396 }
1397
1398 static int count_inode_refs(struct btrfs_root *root,
1399 struct inode *inode, struct btrfs_path *path)
1400 {
1401 int ret;
1402 struct btrfs_key key;
1403 unsigned int nlink = 0;
1404 unsigned long ptr;
1405 unsigned long ptr_end;
1406 int name_len;
1407 u64 ino = btrfs_ino(inode);
1408
1409 key.objectid = ino;
1410 key.type = BTRFS_INODE_REF_KEY;
1411 key.offset = (u64)-1;
1412
1413 while (1) {
1414 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1415 if (ret < 0)
1416 break;
1417 if (ret > 0) {
1418 if (path->slots[0] == 0)
1419 break;
1420 path->slots[0]--;
1421 }
1422 process_slot:
1423 btrfs_item_key_to_cpu(path->nodes[0], &key,
1424 path->slots[0]);
1425 if (key.objectid != ino ||
1426 key.type != BTRFS_INODE_REF_KEY)
1427 break;
1428 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1429 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1430 path->slots[0]);
1431 while (ptr < ptr_end) {
1432 struct btrfs_inode_ref *ref;
1433
1434 ref = (struct btrfs_inode_ref *)ptr;
1435 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1436 ref);
1437 ptr = (unsigned long)(ref + 1) + name_len;
1438 nlink++;
1439 }
1440
1441 if (key.offset == 0)
1442 break;
1443 if (path->slots[0] > 0) {
1444 path->slots[0]--;
1445 goto process_slot;
1446 }
1447 key.offset--;
1448 btrfs_release_path(path);
1449 }
1450 btrfs_release_path(path);
1451
1452 return nlink;
1453 }
1454
1455 /*
1456 * There are a few corners where the link count of the file can't
1457 * be properly maintained during replay. So, instead of adding
1458 * lots of complexity to the log code, we just scan the backrefs
1459 * for any file that has been through replay.
1460 *
1461 * The scan will update the link count on the inode to reflect the
1462 * number of back refs found. If it goes down to zero, the iput
1463 * will free the inode.
1464 */
1465 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1466 struct btrfs_root *root,
1467 struct inode *inode)
1468 {
1469 struct btrfs_path *path;
1470 int ret;
1471 u64 nlink = 0;
1472 u64 ino = btrfs_ino(inode);
1473
1474 path = btrfs_alloc_path();
1475 if (!path)
1476 return -ENOMEM;
1477
1478 ret = count_inode_refs(root, inode, path);
1479 if (ret < 0)
1480 goto out;
1481
1482 nlink = ret;
1483
1484 ret = count_inode_extrefs(root, inode, path);
1485 if (ret < 0)
1486 goto out;
1487
1488 nlink += ret;
1489
1490 ret = 0;
1491
1492 if (nlink != inode->i_nlink) {
1493 set_nlink(inode, nlink);
1494 btrfs_update_inode(trans, root, inode);
1495 }
1496 BTRFS_I(inode)->index_cnt = (u64)-1;
1497
1498 if (inode->i_nlink == 0) {
1499 if (S_ISDIR(inode->i_mode)) {
1500 ret = replay_dir_deletes(trans, root, NULL, path,
1501 ino, 1);
1502 if (ret)
1503 goto out;
1504 }
1505 ret = insert_orphan_item(trans, root, ino);
1506 }
1507
1508 out:
1509 btrfs_free_path(path);
1510 return ret;
1511 }
1512
1513 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1514 struct btrfs_root *root,
1515 struct btrfs_path *path)
1516 {
1517 int ret;
1518 struct btrfs_key key;
1519 struct inode *inode;
1520
1521 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1522 key.type = BTRFS_ORPHAN_ITEM_KEY;
1523 key.offset = (u64)-1;
1524 while (1) {
1525 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1526 if (ret < 0)
1527 break;
1528
1529 if (ret == 1) {
1530 if (path->slots[0] == 0)
1531 break;
1532 path->slots[0]--;
1533 }
1534
1535 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1536 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1537 key.type != BTRFS_ORPHAN_ITEM_KEY)
1538 break;
1539
1540 ret = btrfs_del_item(trans, root, path);
1541 if (ret)
1542 goto out;
1543
1544 btrfs_release_path(path);
1545 inode = read_one_inode(root, key.offset);
1546 if (!inode)
1547 return -EIO;
1548
1549 ret = fixup_inode_link_count(trans, root, inode);
1550 iput(inode);
1551 if (ret)
1552 goto out;
1553
1554 /*
1555 * fixup on a directory may create new entries,
1556 * make sure we always look for the highset possible
1557 * offset
1558 */
1559 key.offset = (u64)-1;
1560 }
1561 ret = 0;
1562 out:
1563 btrfs_release_path(path);
1564 return ret;
1565 }
1566
1567
1568 /*
1569 * record a given inode in the fixup dir so we can check its link
1570 * count when replay is done. The link count is incremented here
1571 * so the inode won't go away until we check it
1572 */
1573 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1574 struct btrfs_root *root,
1575 struct btrfs_path *path,
1576 u64 objectid)
1577 {
1578 struct btrfs_key key;
1579 int ret = 0;
1580 struct inode *inode;
1581
1582 inode = read_one_inode(root, objectid);
1583 if (!inode)
1584 return -EIO;
1585
1586 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1587 key.type = BTRFS_ORPHAN_ITEM_KEY;
1588 key.offset = objectid;
1589
1590 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1591
1592 btrfs_release_path(path);
1593 if (ret == 0) {
1594 if (!inode->i_nlink)
1595 set_nlink(inode, 1);
1596 else
1597 inc_nlink(inode);
1598 ret = btrfs_update_inode(trans, root, inode);
1599 } else if (ret == -EEXIST) {
1600 ret = 0;
1601 } else {
1602 BUG(); /* Logic Error */
1603 }
1604 iput(inode);
1605
1606 return ret;
1607 }
1608
1609 /*
1610 * when replaying the log for a directory, we only insert names
1611 * for inodes that actually exist. This means an fsync on a directory
1612 * does not implicitly fsync all the new files in it
1613 */
1614 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1615 struct btrfs_root *root,
1616 u64 dirid, u64 index,
1617 char *name, int name_len,
1618 struct btrfs_key *location)
1619 {
1620 struct inode *inode;
1621 struct inode *dir;
1622 int ret;
1623
1624 inode = read_one_inode(root, location->objectid);
1625 if (!inode)
1626 return -ENOENT;
1627
1628 dir = read_one_inode(root, dirid);
1629 if (!dir) {
1630 iput(inode);
1631 return -EIO;
1632 }
1633
1634 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1635
1636 /* FIXME, put inode into FIXUP list */
1637
1638 iput(inode);
1639 iput(dir);
1640 return ret;
1641 }
1642
1643 /*
1644 * Return true if an inode reference exists in the log for the given name,
1645 * inode and parent inode.
1646 */
1647 static bool name_in_log_ref(struct btrfs_root *log_root,
1648 const char *name, const int name_len,
1649 const u64 dirid, const u64 ino)
1650 {
1651 struct btrfs_key search_key;
1652
1653 search_key.objectid = ino;
1654 search_key.type = BTRFS_INODE_REF_KEY;
1655 search_key.offset = dirid;
1656 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1657 return true;
1658
1659 search_key.type = BTRFS_INODE_EXTREF_KEY;
1660 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1661 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1662 return true;
1663
1664 return false;
1665 }
1666
1667 /*
1668 * take a single entry in a log directory item and replay it into
1669 * the subvolume.
1670 *
1671 * if a conflicting item exists in the subdirectory already,
1672 * the inode it points to is unlinked and put into the link count
1673 * fix up tree.
1674 *
1675 * If a name from the log points to a file or directory that does
1676 * not exist in the FS, it is skipped. fsyncs on directories
1677 * do not force down inodes inside that directory, just changes to the
1678 * names or unlinks in a directory.
1679 *
1680 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1681 * non-existing inode) and 1 if the name was replayed.
1682 */
1683 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1684 struct btrfs_root *root,
1685 struct btrfs_path *path,
1686 struct extent_buffer *eb,
1687 struct btrfs_dir_item *di,
1688 struct btrfs_key *key)
1689 {
1690 char *name;
1691 int name_len;
1692 struct btrfs_dir_item *dst_di;
1693 struct btrfs_key found_key;
1694 struct btrfs_key log_key;
1695 struct inode *dir;
1696 u8 log_type;
1697 int exists;
1698 int ret = 0;
1699 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1700 bool name_added = false;
1701
1702 dir = read_one_inode(root, key->objectid);
1703 if (!dir)
1704 return -EIO;
1705
1706 name_len = btrfs_dir_name_len(eb, di);
1707 name = kmalloc(name_len, GFP_NOFS);
1708 if (!name) {
1709 ret = -ENOMEM;
1710 goto out;
1711 }
1712
1713 log_type = btrfs_dir_type(eb, di);
1714 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1715 name_len);
1716
1717 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1718 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1719 if (exists == 0)
1720 exists = 1;
1721 else
1722 exists = 0;
1723 btrfs_release_path(path);
1724
1725 if (key->type == BTRFS_DIR_ITEM_KEY) {
1726 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1727 name, name_len, 1);
1728 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1729 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1730 key->objectid,
1731 key->offset, name,
1732 name_len, 1);
1733 } else {
1734 /* Corruption */
1735 ret = -EINVAL;
1736 goto out;
1737 }
1738 if (IS_ERR_OR_NULL(dst_di)) {
1739 /* we need a sequence number to insert, so we only
1740 * do inserts for the BTRFS_DIR_INDEX_KEY types
1741 */
1742 if (key->type != BTRFS_DIR_INDEX_KEY)
1743 goto out;
1744 goto insert;
1745 }
1746
1747 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1748 /* the existing item matches the logged item */
1749 if (found_key.objectid == log_key.objectid &&
1750 found_key.type == log_key.type &&
1751 found_key.offset == log_key.offset &&
1752 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1753 update_size = false;
1754 goto out;
1755 }
1756
1757 /*
1758 * don't drop the conflicting directory entry if the inode
1759 * for the new entry doesn't exist
1760 */
1761 if (!exists)
1762 goto out;
1763
1764 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1765 if (ret)
1766 goto out;
1767
1768 if (key->type == BTRFS_DIR_INDEX_KEY)
1769 goto insert;
1770 out:
1771 btrfs_release_path(path);
1772 if (!ret && update_size) {
1773 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1774 ret = btrfs_update_inode(trans, root, dir);
1775 }
1776 kfree(name);
1777 iput(dir);
1778 if (!ret && name_added)
1779 ret = 1;
1780 return ret;
1781
1782 insert:
1783 if (name_in_log_ref(root->log_root, name, name_len,
1784 key->objectid, log_key.objectid)) {
1785 /* The dentry will be added later. */
1786 ret = 0;
1787 update_size = false;
1788 goto out;
1789 }
1790 btrfs_release_path(path);
1791 ret = insert_one_name(trans, root, key->objectid, key->offset,
1792 name, name_len, &log_key);
1793 if (ret && ret != -ENOENT && ret != -EEXIST)
1794 goto out;
1795 if (!ret)
1796 name_added = true;
1797 update_size = false;
1798 ret = 0;
1799 goto out;
1800 }
1801
1802 /*
1803 * find all the names in a directory item and reconcile them into
1804 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1805 * one name in a directory item, but the same code gets used for
1806 * both directory index types
1807 */
1808 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1809 struct btrfs_root *root,
1810 struct btrfs_path *path,
1811 struct extent_buffer *eb, int slot,
1812 struct btrfs_key *key)
1813 {
1814 int ret = 0;
1815 u32 item_size = btrfs_item_size_nr(eb, slot);
1816 struct btrfs_dir_item *di;
1817 int name_len;
1818 unsigned long ptr;
1819 unsigned long ptr_end;
1820 struct btrfs_path *fixup_path = NULL;
1821
1822 ptr = btrfs_item_ptr_offset(eb, slot);
1823 ptr_end = ptr + item_size;
1824 while (ptr < ptr_end) {
1825 di = (struct btrfs_dir_item *)ptr;
1826 if (verify_dir_item(root, eb, di))
1827 return -EIO;
1828 name_len = btrfs_dir_name_len(eb, di);
1829 ret = replay_one_name(trans, root, path, eb, di, key);
1830 if (ret < 0)
1831 break;
1832 ptr = (unsigned long)(di + 1);
1833 ptr += name_len;
1834
1835 /*
1836 * If this entry refers to a non-directory (directories can not
1837 * have a link count > 1) and it was added in the transaction
1838 * that was not committed, make sure we fixup the link count of
1839 * the inode it the entry points to. Otherwise something like
1840 * the following would result in a directory pointing to an
1841 * inode with a wrong link that does not account for this dir
1842 * entry:
1843 *
1844 * mkdir testdir
1845 * touch testdir/foo
1846 * touch testdir/bar
1847 * sync
1848 *
1849 * ln testdir/bar testdir/bar_link
1850 * ln testdir/foo testdir/foo_link
1851 * xfs_io -c "fsync" testdir/bar
1852 *
1853 * <power failure>
1854 *
1855 * mount fs, log replay happens
1856 *
1857 * File foo would remain with a link count of 1 when it has two
1858 * entries pointing to it in the directory testdir. This would
1859 * make it impossible to ever delete the parent directory has
1860 * it would result in stale dentries that can never be deleted.
1861 */
1862 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1863 struct btrfs_key di_key;
1864
1865 if (!fixup_path) {
1866 fixup_path = btrfs_alloc_path();
1867 if (!fixup_path) {
1868 ret = -ENOMEM;
1869 break;
1870 }
1871 }
1872
1873 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1874 ret = link_to_fixup_dir(trans, root, fixup_path,
1875 di_key.objectid);
1876 if (ret)
1877 break;
1878 }
1879 ret = 0;
1880 }
1881 btrfs_free_path(fixup_path);
1882 return ret;
1883 }
1884
1885 /*
1886 * directory replay has two parts. There are the standard directory
1887 * items in the log copied from the subvolume, and range items
1888 * created in the log while the subvolume was logged.
1889 *
1890 * The range items tell us which parts of the key space the log
1891 * is authoritative for. During replay, if a key in the subvolume
1892 * directory is in a logged range item, but not actually in the log
1893 * that means it was deleted from the directory before the fsync
1894 * and should be removed.
1895 */
1896 static noinline int find_dir_range(struct btrfs_root *root,
1897 struct btrfs_path *path,
1898 u64 dirid, int key_type,
1899 u64 *start_ret, u64 *end_ret)
1900 {
1901 struct btrfs_key key;
1902 u64 found_end;
1903 struct btrfs_dir_log_item *item;
1904 int ret;
1905 int nritems;
1906
1907 if (*start_ret == (u64)-1)
1908 return 1;
1909
1910 key.objectid = dirid;
1911 key.type = key_type;
1912 key.offset = *start_ret;
1913
1914 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1915 if (ret < 0)
1916 goto out;
1917 if (ret > 0) {
1918 if (path->slots[0] == 0)
1919 goto out;
1920 path->slots[0]--;
1921 }
1922 if (ret != 0)
1923 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1924
1925 if (key.type != key_type || key.objectid != dirid) {
1926 ret = 1;
1927 goto next;
1928 }
1929 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1930 struct btrfs_dir_log_item);
1931 found_end = btrfs_dir_log_end(path->nodes[0], item);
1932
1933 if (*start_ret >= key.offset && *start_ret <= found_end) {
1934 ret = 0;
1935 *start_ret = key.offset;
1936 *end_ret = found_end;
1937 goto out;
1938 }
1939 ret = 1;
1940 next:
1941 /* check the next slot in the tree to see if it is a valid item */
1942 nritems = btrfs_header_nritems(path->nodes[0]);
1943 if (path->slots[0] >= nritems) {
1944 ret = btrfs_next_leaf(root, path);
1945 if (ret)
1946 goto out;
1947 } else {
1948 path->slots[0]++;
1949 }
1950
1951 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1952
1953 if (key.type != key_type || key.objectid != dirid) {
1954 ret = 1;
1955 goto out;
1956 }
1957 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1958 struct btrfs_dir_log_item);
1959 found_end = btrfs_dir_log_end(path->nodes[0], item);
1960 *start_ret = key.offset;
1961 *end_ret = found_end;
1962 ret = 0;
1963 out:
1964 btrfs_release_path(path);
1965 return ret;
1966 }
1967
1968 /*
1969 * this looks for a given directory item in the log. If the directory
1970 * item is not in the log, the item is removed and the inode it points
1971 * to is unlinked
1972 */
1973 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1974 struct btrfs_root *root,
1975 struct btrfs_root *log,
1976 struct btrfs_path *path,
1977 struct btrfs_path *log_path,
1978 struct inode *dir,
1979 struct btrfs_key *dir_key)
1980 {
1981 int ret;
1982 struct extent_buffer *eb;
1983 int slot;
1984 u32 item_size;
1985 struct btrfs_dir_item *di;
1986 struct btrfs_dir_item *log_di;
1987 int name_len;
1988 unsigned long ptr;
1989 unsigned long ptr_end;
1990 char *name;
1991 struct inode *inode;
1992 struct btrfs_key location;
1993
1994 again:
1995 eb = path->nodes[0];
1996 slot = path->slots[0];
1997 item_size = btrfs_item_size_nr(eb, slot);
1998 ptr = btrfs_item_ptr_offset(eb, slot);
1999 ptr_end = ptr + item_size;
2000 while (ptr < ptr_end) {
2001 di = (struct btrfs_dir_item *)ptr;
2002 if (verify_dir_item(root, eb, di)) {
2003 ret = -EIO;
2004 goto out;
2005 }
2006
2007 name_len = btrfs_dir_name_len(eb, di);
2008 name = kmalloc(name_len, GFP_NOFS);
2009 if (!name) {
2010 ret = -ENOMEM;
2011 goto out;
2012 }
2013 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2014 name_len);
2015 log_di = NULL;
2016 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2017 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2018 dir_key->objectid,
2019 name, name_len, 0);
2020 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2021 log_di = btrfs_lookup_dir_index_item(trans, log,
2022 log_path,
2023 dir_key->objectid,
2024 dir_key->offset,
2025 name, name_len, 0);
2026 }
2027 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2028 btrfs_dir_item_key_to_cpu(eb, di, &location);
2029 btrfs_release_path(path);
2030 btrfs_release_path(log_path);
2031 inode = read_one_inode(root, location.objectid);
2032 if (!inode) {
2033 kfree(name);
2034 return -EIO;
2035 }
2036
2037 ret = link_to_fixup_dir(trans, root,
2038 path, location.objectid);
2039 if (ret) {
2040 kfree(name);
2041 iput(inode);
2042 goto out;
2043 }
2044
2045 inc_nlink(inode);
2046 ret = btrfs_unlink_inode(trans, root, dir, inode,
2047 name, name_len);
2048 if (!ret)
2049 ret = btrfs_run_delayed_items(trans, root);
2050 kfree(name);
2051 iput(inode);
2052 if (ret)
2053 goto out;
2054
2055 /* there might still be more names under this key
2056 * check and repeat if required
2057 */
2058 ret = btrfs_search_slot(NULL, root, dir_key, path,
2059 0, 0);
2060 if (ret == 0)
2061 goto again;
2062 ret = 0;
2063 goto out;
2064 } else if (IS_ERR(log_di)) {
2065 kfree(name);
2066 return PTR_ERR(log_di);
2067 }
2068 btrfs_release_path(log_path);
2069 kfree(name);
2070
2071 ptr = (unsigned long)(di + 1);
2072 ptr += name_len;
2073 }
2074 ret = 0;
2075 out:
2076 btrfs_release_path(path);
2077 btrfs_release_path(log_path);
2078 return ret;
2079 }
2080
2081 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2082 struct btrfs_root *root,
2083 struct btrfs_root *log,
2084 struct btrfs_path *path,
2085 const u64 ino)
2086 {
2087 struct btrfs_key search_key;
2088 struct btrfs_path *log_path;
2089 int i;
2090 int nritems;
2091 int ret;
2092
2093 log_path = btrfs_alloc_path();
2094 if (!log_path)
2095 return -ENOMEM;
2096
2097 search_key.objectid = ino;
2098 search_key.type = BTRFS_XATTR_ITEM_KEY;
2099 search_key.offset = 0;
2100 again:
2101 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2102 if (ret < 0)
2103 goto out;
2104 process_leaf:
2105 nritems = btrfs_header_nritems(path->nodes[0]);
2106 for (i = path->slots[0]; i < nritems; i++) {
2107 struct btrfs_key key;
2108 struct btrfs_dir_item *di;
2109 struct btrfs_dir_item *log_di;
2110 u32 total_size;
2111 u32 cur;
2112
2113 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2114 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2115 ret = 0;
2116 goto out;
2117 }
2118
2119 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2120 total_size = btrfs_item_size_nr(path->nodes[0], i);
2121 cur = 0;
2122 while (cur < total_size) {
2123 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2124 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2125 u32 this_len = sizeof(*di) + name_len + data_len;
2126 char *name;
2127
2128 name = kmalloc(name_len, GFP_NOFS);
2129 if (!name) {
2130 ret = -ENOMEM;
2131 goto out;
2132 }
2133 read_extent_buffer(path->nodes[0], name,
2134 (unsigned long)(di + 1), name_len);
2135
2136 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2137 name, name_len, 0);
2138 btrfs_release_path(log_path);
2139 if (!log_di) {
2140 /* Doesn't exist in log tree, so delete it. */
2141 btrfs_release_path(path);
2142 di = btrfs_lookup_xattr(trans, root, path, ino,
2143 name, name_len, -1);
2144 kfree(name);
2145 if (IS_ERR(di)) {
2146 ret = PTR_ERR(di);
2147 goto out;
2148 }
2149 ASSERT(di);
2150 ret = btrfs_delete_one_dir_name(trans, root,
2151 path, di);
2152 if (ret)
2153 goto out;
2154 btrfs_release_path(path);
2155 search_key = key;
2156 goto again;
2157 }
2158 kfree(name);
2159 if (IS_ERR(log_di)) {
2160 ret = PTR_ERR(log_di);
2161 goto out;
2162 }
2163 cur += this_len;
2164 di = (struct btrfs_dir_item *)((char *)di + this_len);
2165 }
2166 }
2167 ret = btrfs_next_leaf(root, path);
2168 if (ret > 0)
2169 ret = 0;
2170 else if (ret == 0)
2171 goto process_leaf;
2172 out:
2173 btrfs_free_path(log_path);
2174 btrfs_release_path(path);
2175 return ret;
2176 }
2177
2178
2179 /*
2180 * deletion replay happens before we copy any new directory items
2181 * out of the log or out of backreferences from inodes. It
2182 * scans the log to find ranges of keys that log is authoritative for,
2183 * and then scans the directory to find items in those ranges that are
2184 * not present in the log.
2185 *
2186 * Anything we don't find in the log is unlinked and removed from the
2187 * directory.
2188 */
2189 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2190 struct btrfs_root *root,
2191 struct btrfs_root *log,
2192 struct btrfs_path *path,
2193 u64 dirid, int del_all)
2194 {
2195 u64 range_start;
2196 u64 range_end;
2197 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2198 int ret = 0;
2199 struct btrfs_key dir_key;
2200 struct btrfs_key found_key;
2201 struct btrfs_path *log_path;
2202 struct inode *dir;
2203
2204 dir_key.objectid = dirid;
2205 dir_key.type = BTRFS_DIR_ITEM_KEY;
2206 log_path = btrfs_alloc_path();
2207 if (!log_path)
2208 return -ENOMEM;
2209
2210 dir = read_one_inode(root, dirid);
2211 /* it isn't an error if the inode isn't there, that can happen
2212 * because we replay the deletes before we copy in the inode item
2213 * from the log
2214 */
2215 if (!dir) {
2216 btrfs_free_path(log_path);
2217 return 0;
2218 }
2219 again:
2220 range_start = 0;
2221 range_end = 0;
2222 while (1) {
2223 if (del_all)
2224 range_end = (u64)-1;
2225 else {
2226 ret = find_dir_range(log, path, dirid, key_type,
2227 &range_start, &range_end);
2228 if (ret != 0)
2229 break;
2230 }
2231
2232 dir_key.offset = range_start;
2233 while (1) {
2234 int nritems;
2235 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2236 0, 0);
2237 if (ret < 0)
2238 goto out;
2239
2240 nritems = btrfs_header_nritems(path->nodes[0]);
2241 if (path->slots[0] >= nritems) {
2242 ret = btrfs_next_leaf(root, path);
2243 if (ret)
2244 break;
2245 }
2246 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2247 path->slots[0]);
2248 if (found_key.objectid != dirid ||
2249 found_key.type != dir_key.type)
2250 goto next_type;
2251
2252 if (found_key.offset > range_end)
2253 break;
2254
2255 ret = check_item_in_log(trans, root, log, path,
2256 log_path, dir,
2257 &found_key);
2258 if (ret)
2259 goto out;
2260 if (found_key.offset == (u64)-1)
2261 break;
2262 dir_key.offset = found_key.offset + 1;
2263 }
2264 btrfs_release_path(path);
2265 if (range_end == (u64)-1)
2266 break;
2267 range_start = range_end + 1;
2268 }
2269
2270 next_type:
2271 ret = 0;
2272 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2273 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2274 dir_key.type = BTRFS_DIR_INDEX_KEY;
2275 btrfs_release_path(path);
2276 goto again;
2277 }
2278 out:
2279 btrfs_release_path(path);
2280 btrfs_free_path(log_path);
2281 iput(dir);
2282 return ret;
2283 }
2284
2285 /*
2286 * the process_func used to replay items from the log tree. This
2287 * gets called in two different stages. The first stage just looks
2288 * for inodes and makes sure they are all copied into the subvolume.
2289 *
2290 * The second stage copies all the other item types from the log into
2291 * the subvolume. The two stage approach is slower, but gets rid of
2292 * lots of complexity around inodes referencing other inodes that exist
2293 * only in the log (references come from either directory items or inode
2294 * back refs).
2295 */
2296 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2297 struct walk_control *wc, u64 gen)
2298 {
2299 int nritems;
2300 struct btrfs_path *path;
2301 struct btrfs_root *root = wc->replay_dest;
2302 struct btrfs_key key;
2303 int level;
2304 int i;
2305 int ret;
2306
2307 ret = btrfs_read_buffer(eb, gen);
2308 if (ret)
2309 return ret;
2310
2311 level = btrfs_header_level(eb);
2312
2313 if (level != 0)
2314 return 0;
2315
2316 path = btrfs_alloc_path();
2317 if (!path)
2318 return -ENOMEM;
2319
2320 nritems = btrfs_header_nritems(eb);
2321 for (i = 0; i < nritems; i++) {
2322 btrfs_item_key_to_cpu(eb, &key, i);
2323
2324 /* inode keys are done during the first stage */
2325 if (key.type == BTRFS_INODE_ITEM_KEY &&
2326 wc->stage == LOG_WALK_REPLAY_INODES) {
2327 struct btrfs_inode_item *inode_item;
2328 u32 mode;
2329
2330 inode_item = btrfs_item_ptr(eb, i,
2331 struct btrfs_inode_item);
2332 ret = replay_xattr_deletes(wc->trans, root, log,
2333 path, key.objectid);
2334 if (ret)
2335 break;
2336 mode = btrfs_inode_mode(eb, inode_item);
2337 if (S_ISDIR(mode)) {
2338 ret = replay_dir_deletes(wc->trans,
2339 root, log, path, key.objectid, 0);
2340 if (ret)
2341 break;
2342 }
2343 ret = overwrite_item(wc->trans, root, path,
2344 eb, i, &key);
2345 if (ret)
2346 break;
2347
2348 /* for regular files, make sure corresponding
2349 * orphan item exist. extents past the new EOF
2350 * will be truncated later by orphan cleanup.
2351 */
2352 if (S_ISREG(mode)) {
2353 ret = insert_orphan_item(wc->trans, root,
2354 key.objectid);
2355 if (ret)
2356 break;
2357 }
2358
2359 ret = link_to_fixup_dir(wc->trans, root,
2360 path, key.objectid);
2361 if (ret)
2362 break;
2363 }
2364
2365 if (key.type == BTRFS_DIR_INDEX_KEY &&
2366 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2367 ret = replay_one_dir_item(wc->trans, root, path,
2368 eb, i, &key);
2369 if (ret)
2370 break;
2371 }
2372
2373 if (wc->stage < LOG_WALK_REPLAY_ALL)
2374 continue;
2375
2376 /* these keys are simply copied */
2377 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2378 ret = overwrite_item(wc->trans, root, path,
2379 eb, i, &key);
2380 if (ret)
2381 break;
2382 } else if (key.type == BTRFS_INODE_REF_KEY ||
2383 key.type == BTRFS_INODE_EXTREF_KEY) {
2384 ret = add_inode_ref(wc->trans, root, log, path,
2385 eb, i, &key);
2386 if (ret && ret != -ENOENT)
2387 break;
2388 ret = 0;
2389 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2390 ret = replay_one_extent(wc->trans, root, path,
2391 eb, i, &key);
2392 if (ret)
2393 break;
2394 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2395 ret = replay_one_dir_item(wc->trans, root, path,
2396 eb, i, &key);
2397 if (ret)
2398 break;
2399 }
2400 }
2401 btrfs_free_path(path);
2402 return ret;
2403 }
2404
2405 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2406 struct btrfs_root *root,
2407 struct btrfs_path *path, int *level,
2408 struct walk_control *wc)
2409 {
2410 u64 root_owner;
2411 u64 bytenr;
2412 u64 ptr_gen;
2413 struct extent_buffer *next;
2414 struct extent_buffer *cur;
2415 struct extent_buffer *parent;
2416 u32 blocksize;
2417 int ret = 0;
2418
2419 WARN_ON(*level < 0);
2420 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2421
2422 while (*level > 0) {
2423 WARN_ON(*level < 0);
2424 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2425 cur = path->nodes[*level];
2426
2427 WARN_ON(btrfs_header_level(cur) != *level);
2428
2429 if (path->slots[*level] >=
2430 btrfs_header_nritems(cur))
2431 break;
2432
2433 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2434 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2435 blocksize = root->nodesize;
2436
2437 parent = path->nodes[*level];
2438 root_owner = btrfs_header_owner(parent);
2439
2440 next = btrfs_find_create_tree_block(root, bytenr);
2441 if (IS_ERR(next))
2442 return PTR_ERR(next);
2443
2444 if (*level == 1) {
2445 ret = wc->process_func(root, next, wc, ptr_gen);
2446 if (ret) {
2447 free_extent_buffer(next);
2448 return ret;
2449 }
2450
2451 path->slots[*level]++;
2452 if (wc->free) {
2453 ret = btrfs_read_buffer(next, ptr_gen);
2454 if (ret) {
2455 free_extent_buffer(next);
2456 return ret;
2457 }
2458
2459 if (trans) {
2460 btrfs_tree_lock(next);
2461 btrfs_set_lock_blocking(next);
2462 clean_tree_block(trans, root->fs_info,
2463 next);
2464 btrfs_wait_tree_block_writeback(next);
2465 btrfs_tree_unlock(next);
2466 }
2467
2468 WARN_ON(root_owner !=
2469 BTRFS_TREE_LOG_OBJECTID);
2470 ret = btrfs_free_and_pin_reserved_extent(root,
2471 bytenr, blocksize);
2472 if (ret) {
2473 free_extent_buffer(next);
2474 return ret;
2475 }
2476 }
2477 free_extent_buffer(next);
2478 continue;
2479 }
2480 ret = btrfs_read_buffer(next, ptr_gen);
2481 if (ret) {
2482 free_extent_buffer(next);
2483 return ret;
2484 }
2485
2486 WARN_ON(*level <= 0);
2487 if (path->nodes[*level-1])
2488 free_extent_buffer(path->nodes[*level-1]);
2489 path->nodes[*level-1] = next;
2490 *level = btrfs_header_level(next);
2491 path->slots[*level] = 0;
2492 cond_resched();
2493 }
2494 WARN_ON(*level < 0);
2495 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2496
2497 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2498
2499 cond_resched();
2500 return 0;
2501 }
2502
2503 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2504 struct btrfs_root *root,
2505 struct btrfs_path *path, int *level,
2506 struct walk_control *wc)
2507 {
2508 u64 root_owner;
2509 int i;
2510 int slot;
2511 int ret;
2512
2513 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2514 slot = path->slots[i];
2515 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2516 path->slots[i]++;
2517 *level = i;
2518 WARN_ON(*level == 0);
2519 return 0;
2520 } else {
2521 struct extent_buffer *parent;
2522 if (path->nodes[*level] == root->node)
2523 parent = path->nodes[*level];
2524 else
2525 parent = path->nodes[*level + 1];
2526
2527 root_owner = btrfs_header_owner(parent);
2528 ret = wc->process_func(root, path->nodes[*level], wc,
2529 btrfs_header_generation(path->nodes[*level]));
2530 if (ret)
2531 return ret;
2532
2533 if (wc->free) {
2534 struct extent_buffer *next;
2535
2536 next = path->nodes[*level];
2537
2538 if (trans) {
2539 btrfs_tree_lock(next);
2540 btrfs_set_lock_blocking(next);
2541 clean_tree_block(trans, root->fs_info,
2542 next);
2543 btrfs_wait_tree_block_writeback(next);
2544 btrfs_tree_unlock(next);
2545 }
2546
2547 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2548 ret = btrfs_free_and_pin_reserved_extent(root,
2549 path->nodes[*level]->start,
2550 path->nodes[*level]->len);
2551 if (ret)
2552 return ret;
2553 }
2554 free_extent_buffer(path->nodes[*level]);
2555 path->nodes[*level] = NULL;
2556 *level = i + 1;
2557 }
2558 }
2559 return 1;
2560 }
2561
2562 /*
2563 * drop the reference count on the tree rooted at 'snap'. This traverses
2564 * the tree freeing any blocks that have a ref count of zero after being
2565 * decremented.
2566 */
2567 static int walk_log_tree(struct btrfs_trans_handle *trans,
2568 struct btrfs_root *log, struct walk_control *wc)
2569 {
2570 int ret = 0;
2571 int wret;
2572 int level;
2573 struct btrfs_path *path;
2574 int orig_level;
2575
2576 path = btrfs_alloc_path();
2577 if (!path)
2578 return -ENOMEM;
2579
2580 level = btrfs_header_level(log->node);
2581 orig_level = level;
2582 path->nodes[level] = log->node;
2583 extent_buffer_get(log->node);
2584 path->slots[level] = 0;
2585
2586 while (1) {
2587 wret = walk_down_log_tree(trans, log, path, &level, wc);
2588 if (wret > 0)
2589 break;
2590 if (wret < 0) {
2591 ret = wret;
2592 goto out;
2593 }
2594
2595 wret = walk_up_log_tree(trans, log, path, &level, wc);
2596 if (wret > 0)
2597 break;
2598 if (wret < 0) {
2599 ret = wret;
2600 goto out;
2601 }
2602 }
2603
2604 /* was the root node processed? if not, catch it here */
2605 if (path->nodes[orig_level]) {
2606 ret = wc->process_func(log, path->nodes[orig_level], wc,
2607 btrfs_header_generation(path->nodes[orig_level]));
2608 if (ret)
2609 goto out;
2610 if (wc->free) {
2611 struct extent_buffer *next;
2612
2613 next = path->nodes[orig_level];
2614
2615 if (trans) {
2616 btrfs_tree_lock(next);
2617 btrfs_set_lock_blocking(next);
2618 clean_tree_block(trans, log->fs_info, next);
2619 btrfs_wait_tree_block_writeback(next);
2620 btrfs_tree_unlock(next);
2621 }
2622
2623 WARN_ON(log->root_key.objectid !=
2624 BTRFS_TREE_LOG_OBJECTID);
2625 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2626 next->len);
2627 if (ret)
2628 goto out;
2629 }
2630 }
2631
2632 out:
2633 btrfs_free_path(path);
2634 return ret;
2635 }
2636
2637 /*
2638 * helper function to update the item for a given subvolumes log root
2639 * in the tree of log roots
2640 */
2641 static int update_log_root(struct btrfs_trans_handle *trans,
2642 struct btrfs_root *log)
2643 {
2644 int ret;
2645
2646 if (log->log_transid == 1) {
2647 /* insert root item on the first sync */
2648 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2649 &log->root_key, &log->root_item);
2650 } else {
2651 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2652 &log->root_key, &log->root_item);
2653 }
2654 return ret;
2655 }
2656
2657 static void wait_log_commit(struct btrfs_root *root, int transid)
2658 {
2659 DEFINE_WAIT(wait);
2660 int index = transid % 2;
2661
2662 /*
2663 * we only allow two pending log transactions at a time,
2664 * so we know that if ours is more than 2 older than the
2665 * current transaction, we're done
2666 */
2667 do {
2668 prepare_to_wait(&root->log_commit_wait[index],
2669 &wait, TASK_UNINTERRUPTIBLE);
2670 mutex_unlock(&root->log_mutex);
2671
2672 if (root->log_transid_committed < transid &&
2673 atomic_read(&root->log_commit[index]))
2674 schedule();
2675
2676 finish_wait(&root->log_commit_wait[index], &wait);
2677 mutex_lock(&root->log_mutex);
2678 } while (root->log_transid_committed < transid &&
2679 atomic_read(&root->log_commit[index]));
2680 }
2681
2682 static void wait_for_writer(struct btrfs_root *root)
2683 {
2684 DEFINE_WAIT(wait);
2685
2686 while (atomic_read(&root->log_writers)) {
2687 prepare_to_wait(&root->log_writer_wait,
2688 &wait, TASK_UNINTERRUPTIBLE);
2689 mutex_unlock(&root->log_mutex);
2690 if (atomic_read(&root->log_writers))
2691 schedule();
2692 finish_wait(&root->log_writer_wait, &wait);
2693 mutex_lock(&root->log_mutex);
2694 }
2695 }
2696
2697 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2698 struct btrfs_log_ctx *ctx)
2699 {
2700 if (!ctx)
2701 return;
2702
2703 mutex_lock(&root->log_mutex);
2704 list_del_init(&ctx->list);
2705 mutex_unlock(&root->log_mutex);
2706 }
2707
2708 /*
2709 * Invoked in log mutex context, or be sure there is no other task which
2710 * can access the list.
2711 */
2712 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2713 int index, int error)
2714 {
2715 struct btrfs_log_ctx *ctx;
2716
2717 if (!error) {
2718 INIT_LIST_HEAD(&root->log_ctxs[index]);
2719 return;
2720 }
2721
2722 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2723 ctx->log_ret = error;
2724
2725 INIT_LIST_HEAD(&root->log_ctxs[index]);
2726 }
2727
2728 /*
2729 * btrfs_sync_log does sends a given tree log down to the disk and
2730 * updates the super blocks to record it. When this call is done,
2731 * you know that any inodes previously logged are safely on disk only
2732 * if it returns 0.
2733 *
2734 * Any other return value means you need to call btrfs_commit_transaction.
2735 * Some of the edge cases for fsyncing directories that have had unlinks
2736 * or renames done in the past mean that sometimes the only safe
2737 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2738 * that has happened.
2739 */
2740 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2741 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2742 {
2743 int index1;
2744 int index2;
2745 int mark;
2746 int ret;
2747 struct btrfs_root *log = root->log_root;
2748 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2749 int log_transid = 0;
2750 struct btrfs_log_ctx root_log_ctx;
2751 struct blk_plug plug;
2752
2753 mutex_lock(&root->log_mutex);
2754 log_transid = ctx->log_transid;
2755 if (root->log_transid_committed >= log_transid) {
2756 mutex_unlock(&root->log_mutex);
2757 return ctx->log_ret;
2758 }
2759
2760 index1 = log_transid % 2;
2761 if (atomic_read(&root->log_commit[index1])) {
2762 wait_log_commit(root, log_transid);
2763 mutex_unlock(&root->log_mutex);
2764 return ctx->log_ret;
2765 }
2766 ASSERT(log_transid == root->log_transid);
2767 atomic_set(&root->log_commit[index1], 1);
2768
2769 /* wait for previous tree log sync to complete */
2770 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2771 wait_log_commit(root, log_transid - 1);
2772
2773 while (1) {
2774 int batch = atomic_read(&root->log_batch);
2775 /* when we're on an ssd, just kick the log commit out */
2776 if (!btrfs_test_opt(root->fs_info, SSD) &&
2777 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2778 mutex_unlock(&root->log_mutex);
2779 schedule_timeout_uninterruptible(1);
2780 mutex_lock(&root->log_mutex);
2781 }
2782 wait_for_writer(root);
2783 if (batch == atomic_read(&root->log_batch))
2784 break;
2785 }
2786
2787 /* bail out if we need to do a full commit */
2788 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2789 ret = -EAGAIN;
2790 btrfs_free_logged_extents(log, log_transid);
2791 mutex_unlock(&root->log_mutex);
2792 goto out;
2793 }
2794
2795 if (log_transid % 2 == 0)
2796 mark = EXTENT_DIRTY;
2797 else
2798 mark = EXTENT_NEW;
2799
2800 /* we start IO on all the marked extents here, but we don't actually
2801 * wait for them until later.
2802 */
2803 blk_start_plug(&plug);
2804 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2805 if (ret) {
2806 blk_finish_plug(&plug);
2807 btrfs_abort_transaction(trans, ret);
2808 btrfs_free_logged_extents(log, log_transid);
2809 btrfs_set_log_full_commit(root->fs_info, trans);
2810 mutex_unlock(&root->log_mutex);
2811 goto out;
2812 }
2813
2814 btrfs_set_root_node(&log->root_item, log->node);
2815
2816 root->log_transid++;
2817 log->log_transid = root->log_transid;
2818 root->log_start_pid = 0;
2819 /*
2820 * IO has been started, blocks of the log tree have WRITTEN flag set
2821 * in their headers. new modifications of the log will be written to
2822 * new positions. so it's safe to allow log writers to go in.
2823 */
2824 mutex_unlock(&root->log_mutex);
2825
2826 btrfs_init_log_ctx(&root_log_ctx, NULL);
2827
2828 mutex_lock(&log_root_tree->log_mutex);
2829 atomic_inc(&log_root_tree->log_batch);
2830 atomic_inc(&log_root_tree->log_writers);
2831
2832 index2 = log_root_tree->log_transid % 2;
2833 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2834 root_log_ctx.log_transid = log_root_tree->log_transid;
2835
2836 mutex_unlock(&log_root_tree->log_mutex);
2837
2838 ret = update_log_root(trans, log);
2839
2840 mutex_lock(&log_root_tree->log_mutex);
2841 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2842 /*
2843 * Implicit memory barrier after atomic_dec_and_test
2844 */
2845 if (waitqueue_active(&log_root_tree->log_writer_wait))
2846 wake_up(&log_root_tree->log_writer_wait);
2847 }
2848
2849 if (ret) {
2850 if (!list_empty(&root_log_ctx.list))
2851 list_del_init(&root_log_ctx.list);
2852
2853 blk_finish_plug(&plug);
2854 btrfs_set_log_full_commit(root->fs_info, trans);
2855
2856 if (ret != -ENOSPC) {
2857 btrfs_abort_transaction(trans, ret);
2858 mutex_unlock(&log_root_tree->log_mutex);
2859 goto out;
2860 }
2861 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2862 btrfs_free_logged_extents(log, log_transid);
2863 mutex_unlock(&log_root_tree->log_mutex);
2864 ret = -EAGAIN;
2865 goto out;
2866 }
2867
2868 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2869 blk_finish_plug(&plug);
2870 mutex_unlock(&log_root_tree->log_mutex);
2871 ret = root_log_ctx.log_ret;
2872 goto out;
2873 }
2874
2875 index2 = root_log_ctx.log_transid % 2;
2876 if (atomic_read(&log_root_tree->log_commit[index2])) {
2877 blk_finish_plug(&plug);
2878 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2879 mark);
2880 btrfs_wait_logged_extents(trans, log, log_transid);
2881 wait_log_commit(log_root_tree,
2882 root_log_ctx.log_transid);
2883 mutex_unlock(&log_root_tree->log_mutex);
2884 if (!ret)
2885 ret = root_log_ctx.log_ret;
2886 goto out;
2887 }
2888 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2889 atomic_set(&log_root_tree->log_commit[index2], 1);
2890
2891 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2892 wait_log_commit(log_root_tree,
2893 root_log_ctx.log_transid - 1);
2894 }
2895
2896 wait_for_writer(log_root_tree);
2897
2898 /*
2899 * now that we've moved on to the tree of log tree roots,
2900 * check the full commit flag again
2901 */
2902 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2903 blk_finish_plug(&plug);
2904 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2905 btrfs_free_logged_extents(log, log_transid);
2906 mutex_unlock(&log_root_tree->log_mutex);
2907 ret = -EAGAIN;
2908 goto out_wake_log_root;
2909 }
2910
2911 ret = btrfs_write_marked_extents(log_root_tree,
2912 &log_root_tree->dirty_log_pages,
2913 EXTENT_DIRTY | EXTENT_NEW);
2914 blk_finish_plug(&plug);
2915 if (ret) {
2916 btrfs_set_log_full_commit(root->fs_info, trans);
2917 btrfs_abort_transaction(trans, ret);
2918 btrfs_free_logged_extents(log, log_transid);
2919 mutex_unlock(&log_root_tree->log_mutex);
2920 goto out_wake_log_root;
2921 }
2922 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2923 if (!ret)
2924 ret = btrfs_wait_marked_extents(log_root_tree,
2925 &log_root_tree->dirty_log_pages,
2926 EXTENT_NEW | EXTENT_DIRTY);
2927 if (ret) {
2928 btrfs_set_log_full_commit(root->fs_info, trans);
2929 btrfs_free_logged_extents(log, log_transid);
2930 mutex_unlock(&log_root_tree->log_mutex);
2931 goto out_wake_log_root;
2932 }
2933 btrfs_wait_logged_extents(trans, log, log_transid);
2934
2935 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2936 log_root_tree->node->start);
2937 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2938 btrfs_header_level(log_root_tree->node));
2939
2940 log_root_tree->log_transid++;
2941 mutex_unlock(&log_root_tree->log_mutex);
2942
2943 /*
2944 * nobody else is going to jump in and write the the ctree
2945 * super here because the log_commit atomic below is protecting
2946 * us. We must be called with a transaction handle pinning
2947 * the running transaction open, so a full commit can't hop
2948 * in and cause problems either.
2949 */
2950 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2951 if (ret) {
2952 btrfs_set_log_full_commit(root->fs_info, trans);
2953 btrfs_abort_transaction(trans, ret);
2954 goto out_wake_log_root;
2955 }
2956
2957 mutex_lock(&root->log_mutex);
2958 if (root->last_log_commit < log_transid)
2959 root->last_log_commit = log_transid;
2960 mutex_unlock(&root->log_mutex);
2961
2962 out_wake_log_root:
2963 /*
2964 * We needn't get log_mutex here because we are sure all
2965 * the other tasks are blocked.
2966 */
2967 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2968
2969 mutex_lock(&log_root_tree->log_mutex);
2970 log_root_tree->log_transid_committed++;
2971 atomic_set(&log_root_tree->log_commit[index2], 0);
2972 mutex_unlock(&log_root_tree->log_mutex);
2973
2974 /*
2975 * The barrier before waitqueue_active is implied by mutex_unlock
2976 */
2977 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2978 wake_up(&log_root_tree->log_commit_wait[index2]);
2979 out:
2980 /* See above. */
2981 btrfs_remove_all_log_ctxs(root, index1, ret);
2982
2983 mutex_lock(&root->log_mutex);
2984 root->log_transid_committed++;
2985 atomic_set(&root->log_commit[index1], 0);
2986 mutex_unlock(&root->log_mutex);
2987
2988 /*
2989 * The barrier before waitqueue_active is implied by mutex_unlock
2990 */
2991 if (waitqueue_active(&root->log_commit_wait[index1]))
2992 wake_up(&root->log_commit_wait[index1]);
2993 return ret;
2994 }
2995
2996 static void free_log_tree(struct btrfs_trans_handle *trans,
2997 struct btrfs_root *log)
2998 {
2999 int ret;
3000 u64 start;
3001 u64 end;
3002 struct walk_control wc = {
3003 .free = 1,
3004 .process_func = process_one_buffer
3005 };
3006
3007 ret = walk_log_tree(trans, log, &wc);
3008 /* I don't think this can happen but just in case */
3009 if (ret)
3010 btrfs_abort_transaction(trans, ret);
3011
3012 while (1) {
3013 ret = find_first_extent_bit(&log->dirty_log_pages,
3014 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
3015 NULL);
3016 if (ret)
3017 break;
3018
3019 clear_extent_bits(&log->dirty_log_pages, start, end,
3020 EXTENT_DIRTY | EXTENT_NEW);
3021 }
3022
3023 /*
3024 * We may have short-circuited the log tree with the full commit logic
3025 * and left ordered extents on our list, so clear these out to keep us
3026 * from leaking inodes and memory.
3027 */
3028 btrfs_free_logged_extents(log, 0);
3029 btrfs_free_logged_extents(log, 1);
3030
3031 free_extent_buffer(log->node);
3032 kfree(log);
3033 }
3034
3035 /*
3036 * free all the extents used by the tree log. This should be called
3037 * at commit time of the full transaction
3038 */
3039 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3040 {
3041 if (root->log_root) {
3042 free_log_tree(trans, root->log_root);
3043 root->log_root = NULL;
3044 }
3045 return 0;
3046 }
3047
3048 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3049 struct btrfs_fs_info *fs_info)
3050 {
3051 if (fs_info->log_root_tree) {
3052 free_log_tree(trans, fs_info->log_root_tree);
3053 fs_info->log_root_tree = NULL;
3054 }
3055 return 0;
3056 }
3057
3058 /*
3059 * If both a file and directory are logged, and unlinks or renames are
3060 * mixed in, we have a few interesting corners:
3061 *
3062 * create file X in dir Y
3063 * link file X to X.link in dir Y
3064 * fsync file X
3065 * unlink file X but leave X.link
3066 * fsync dir Y
3067 *
3068 * After a crash we would expect only X.link to exist. But file X
3069 * didn't get fsync'd again so the log has back refs for X and X.link.
3070 *
3071 * We solve this by removing directory entries and inode backrefs from the
3072 * log when a file that was logged in the current transaction is
3073 * unlinked. Any later fsync will include the updated log entries, and
3074 * we'll be able to reconstruct the proper directory items from backrefs.
3075 *
3076 * This optimizations allows us to avoid relogging the entire inode
3077 * or the entire directory.
3078 */
3079 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3080 struct btrfs_root *root,
3081 const char *name, int name_len,
3082 struct inode *dir, u64 index)
3083 {
3084 struct btrfs_root *log;
3085 struct btrfs_dir_item *di;
3086 struct btrfs_path *path;
3087 int ret;
3088 int err = 0;
3089 int bytes_del = 0;
3090 u64 dir_ino = btrfs_ino(dir);
3091
3092 if (BTRFS_I(dir)->logged_trans < trans->transid)
3093 return 0;
3094
3095 ret = join_running_log_trans(root);
3096 if (ret)
3097 return 0;
3098
3099 mutex_lock(&BTRFS_I(dir)->log_mutex);
3100
3101 log = root->log_root;
3102 path = btrfs_alloc_path();
3103 if (!path) {
3104 err = -ENOMEM;
3105 goto out_unlock;
3106 }
3107
3108 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3109 name, name_len, -1);
3110 if (IS_ERR(di)) {
3111 err = PTR_ERR(di);
3112 goto fail;
3113 }
3114 if (di) {
3115 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3116 bytes_del += name_len;
3117 if (ret) {
3118 err = ret;
3119 goto fail;
3120 }
3121 }
3122 btrfs_release_path(path);
3123 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3124 index, name, name_len, -1);
3125 if (IS_ERR(di)) {
3126 err = PTR_ERR(di);
3127 goto fail;
3128 }
3129 if (di) {
3130 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3131 bytes_del += name_len;
3132 if (ret) {
3133 err = ret;
3134 goto fail;
3135 }
3136 }
3137
3138 /* update the directory size in the log to reflect the names
3139 * we have removed
3140 */
3141 if (bytes_del) {
3142 struct btrfs_key key;
3143
3144 key.objectid = dir_ino;
3145 key.offset = 0;
3146 key.type = BTRFS_INODE_ITEM_KEY;
3147 btrfs_release_path(path);
3148
3149 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3150 if (ret < 0) {
3151 err = ret;
3152 goto fail;
3153 }
3154 if (ret == 0) {
3155 struct btrfs_inode_item *item;
3156 u64 i_size;
3157
3158 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3159 struct btrfs_inode_item);
3160 i_size = btrfs_inode_size(path->nodes[0], item);
3161 if (i_size > bytes_del)
3162 i_size -= bytes_del;
3163 else
3164 i_size = 0;
3165 btrfs_set_inode_size(path->nodes[0], item, i_size);
3166 btrfs_mark_buffer_dirty(path->nodes[0]);
3167 } else
3168 ret = 0;
3169 btrfs_release_path(path);
3170 }
3171 fail:
3172 btrfs_free_path(path);
3173 out_unlock:
3174 mutex_unlock(&BTRFS_I(dir)->log_mutex);
3175 if (ret == -ENOSPC) {
3176 btrfs_set_log_full_commit(root->fs_info, trans);
3177 ret = 0;
3178 } else if (ret < 0)
3179 btrfs_abort_transaction(trans, ret);
3180
3181 btrfs_end_log_trans(root);
3182
3183 return err;
3184 }
3185
3186 /* see comments for btrfs_del_dir_entries_in_log */
3187 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3188 struct btrfs_root *root,
3189 const char *name, int name_len,
3190 struct inode *inode, u64 dirid)
3191 {
3192 struct btrfs_root *log;
3193 u64 index;
3194 int ret;
3195
3196 if (BTRFS_I(inode)->logged_trans < trans->transid)
3197 return 0;
3198
3199 ret = join_running_log_trans(root);
3200 if (ret)
3201 return 0;
3202 log = root->log_root;
3203 mutex_lock(&BTRFS_I(inode)->log_mutex);
3204
3205 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3206 dirid, &index);
3207 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3208 if (ret == -ENOSPC) {
3209 btrfs_set_log_full_commit(root->fs_info, trans);
3210 ret = 0;
3211 } else if (ret < 0 && ret != -ENOENT)
3212 btrfs_abort_transaction(trans, ret);
3213 btrfs_end_log_trans(root);
3214
3215 return ret;
3216 }
3217
3218 /*
3219 * creates a range item in the log for 'dirid'. first_offset and
3220 * last_offset tell us which parts of the key space the log should
3221 * be considered authoritative for.
3222 */
3223 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3224 struct btrfs_root *log,
3225 struct btrfs_path *path,
3226 int key_type, u64 dirid,
3227 u64 first_offset, u64 last_offset)
3228 {
3229 int ret;
3230 struct btrfs_key key;
3231 struct btrfs_dir_log_item *item;
3232
3233 key.objectid = dirid;
3234 key.offset = first_offset;
3235 if (key_type == BTRFS_DIR_ITEM_KEY)
3236 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3237 else
3238 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3239 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3240 if (ret)
3241 return ret;
3242
3243 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3244 struct btrfs_dir_log_item);
3245 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3246 btrfs_mark_buffer_dirty(path->nodes[0]);
3247 btrfs_release_path(path);
3248 return 0;
3249 }
3250
3251 /*
3252 * log all the items included in the current transaction for a given
3253 * directory. This also creates the range items in the log tree required
3254 * to replay anything deleted before the fsync
3255 */
3256 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3257 struct btrfs_root *root, struct inode *inode,
3258 struct btrfs_path *path,
3259 struct btrfs_path *dst_path, int key_type,
3260 struct btrfs_log_ctx *ctx,
3261 u64 min_offset, u64 *last_offset_ret)
3262 {
3263 struct btrfs_key min_key;
3264 struct btrfs_root *log = root->log_root;
3265 struct extent_buffer *src;
3266 int err = 0;
3267 int ret;
3268 int i;
3269 int nritems;
3270 u64 first_offset = min_offset;
3271 u64 last_offset = (u64)-1;
3272 u64 ino = btrfs_ino(inode);
3273
3274 log = root->log_root;
3275
3276 min_key.objectid = ino;
3277 min_key.type = key_type;
3278 min_key.offset = min_offset;
3279
3280 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3281
3282 /*
3283 * we didn't find anything from this transaction, see if there
3284 * is anything at all
3285 */
3286 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3287 min_key.objectid = ino;
3288 min_key.type = key_type;
3289 min_key.offset = (u64)-1;
3290 btrfs_release_path(path);
3291 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3292 if (ret < 0) {
3293 btrfs_release_path(path);
3294 return ret;
3295 }
3296 ret = btrfs_previous_item(root, path, ino, key_type);
3297
3298 /* if ret == 0 there are items for this type,
3299 * create a range to tell us the last key of this type.
3300 * otherwise, there are no items in this directory after
3301 * *min_offset, and we create a range to indicate that.
3302 */
3303 if (ret == 0) {
3304 struct btrfs_key tmp;
3305 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3306 path->slots[0]);
3307 if (key_type == tmp.type)
3308 first_offset = max(min_offset, tmp.offset) + 1;
3309 }
3310 goto done;
3311 }
3312
3313 /* go backward to find any previous key */
3314 ret = btrfs_previous_item(root, path, ino, key_type);
3315 if (ret == 0) {
3316 struct btrfs_key tmp;
3317 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3318 if (key_type == tmp.type) {
3319 first_offset = tmp.offset;
3320 ret = overwrite_item(trans, log, dst_path,
3321 path->nodes[0], path->slots[0],
3322 &tmp);
3323 if (ret) {
3324 err = ret;
3325 goto done;
3326 }
3327 }
3328 }
3329 btrfs_release_path(path);
3330
3331 /* find the first key from this transaction again */
3332 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3333 if (WARN_ON(ret != 0))
3334 goto done;
3335
3336 /*
3337 * we have a block from this transaction, log every item in it
3338 * from our directory
3339 */
3340 while (1) {
3341 struct btrfs_key tmp;
3342 src = path->nodes[0];
3343 nritems = btrfs_header_nritems(src);
3344 for (i = path->slots[0]; i < nritems; i++) {
3345 struct btrfs_dir_item *di;
3346
3347 btrfs_item_key_to_cpu(src, &min_key, i);
3348
3349 if (min_key.objectid != ino || min_key.type != key_type)
3350 goto done;
3351 ret = overwrite_item(trans, log, dst_path, src, i,
3352 &min_key);
3353 if (ret) {
3354 err = ret;
3355 goto done;
3356 }
3357
3358 /*
3359 * We must make sure that when we log a directory entry,
3360 * the corresponding inode, after log replay, has a
3361 * matching link count. For example:
3362 *
3363 * touch foo
3364 * mkdir mydir
3365 * sync
3366 * ln foo mydir/bar
3367 * xfs_io -c "fsync" mydir
3368 * <crash>
3369 * <mount fs and log replay>
3370 *
3371 * Would result in a fsync log that when replayed, our
3372 * file inode would have a link count of 1, but we get
3373 * two directory entries pointing to the same inode.
3374 * After removing one of the names, it would not be
3375 * possible to remove the other name, which resulted
3376 * always in stale file handle errors, and would not
3377 * be possible to rmdir the parent directory, since
3378 * its i_size could never decrement to the value
3379 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3380 */
3381 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3382 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3383 if (ctx &&
3384 (btrfs_dir_transid(src, di) == trans->transid ||
3385 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3386 tmp.type != BTRFS_ROOT_ITEM_KEY)
3387 ctx->log_new_dentries = true;
3388 }
3389 path->slots[0] = nritems;
3390
3391 /*
3392 * look ahead to the next item and see if it is also
3393 * from this directory and from this transaction
3394 */
3395 ret = btrfs_next_leaf(root, path);
3396 if (ret == 1) {
3397 last_offset = (u64)-1;
3398 goto done;
3399 }
3400 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3401 if (tmp.objectid != ino || tmp.type != key_type) {
3402 last_offset = (u64)-1;
3403 goto done;
3404 }
3405 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3406 ret = overwrite_item(trans, log, dst_path,
3407 path->nodes[0], path->slots[0],
3408 &tmp);
3409 if (ret)
3410 err = ret;
3411 else
3412 last_offset = tmp.offset;
3413 goto done;
3414 }
3415 }
3416 done:
3417 btrfs_release_path(path);
3418 btrfs_release_path(dst_path);
3419
3420 if (err == 0) {
3421 *last_offset_ret = last_offset;
3422 /*
3423 * insert the log range keys to indicate where the log
3424 * is valid
3425 */
3426 ret = insert_dir_log_key(trans, log, path, key_type,
3427 ino, first_offset, last_offset);
3428 if (ret)
3429 err = ret;
3430 }
3431 return err;
3432 }
3433
3434 /*
3435 * logging directories is very similar to logging inodes, We find all the items
3436 * from the current transaction and write them to the log.
3437 *
3438 * The recovery code scans the directory in the subvolume, and if it finds a
3439 * key in the range logged that is not present in the log tree, then it means
3440 * that dir entry was unlinked during the transaction.
3441 *
3442 * In order for that scan to work, we must include one key smaller than
3443 * the smallest logged by this transaction and one key larger than the largest
3444 * key logged by this transaction.
3445 */
3446 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3447 struct btrfs_root *root, struct inode *inode,
3448 struct btrfs_path *path,
3449 struct btrfs_path *dst_path,
3450 struct btrfs_log_ctx *ctx)
3451 {
3452 u64 min_key;
3453 u64 max_key;
3454 int ret;
3455 int key_type = BTRFS_DIR_ITEM_KEY;
3456
3457 again:
3458 min_key = 0;
3459 max_key = 0;
3460 while (1) {
3461 ret = log_dir_items(trans, root, inode, path,
3462 dst_path, key_type, ctx, min_key,
3463 &max_key);
3464 if (ret)
3465 return ret;
3466 if (max_key == (u64)-1)
3467 break;
3468 min_key = max_key + 1;
3469 }
3470
3471 if (key_type == BTRFS_DIR_ITEM_KEY) {
3472 key_type = BTRFS_DIR_INDEX_KEY;
3473 goto again;
3474 }
3475 return 0;
3476 }
3477
3478 /*
3479 * a helper function to drop items from the log before we relog an
3480 * inode. max_key_type indicates the highest item type to remove.
3481 * This cannot be run for file data extents because it does not
3482 * free the extents they point to.
3483 */
3484 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3485 struct btrfs_root *log,
3486 struct btrfs_path *path,
3487 u64 objectid, int max_key_type)
3488 {
3489 int ret;
3490 struct btrfs_key key;
3491 struct btrfs_key found_key;
3492 int start_slot;
3493
3494 key.objectid = objectid;
3495 key.type = max_key_type;
3496 key.offset = (u64)-1;
3497
3498 while (1) {
3499 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3500 BUG_ON(ret == 0); /* Logic error */
3501 if (ret < 0)
3502 break;
3503
3504 if (path->slots[0] == 0)
3505 break;
3506
3507 path->slots[0]--;
3508 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3509 path->slots[0]);
3510
3511 if (found_key.objectid != objectid)
3512 break;
3513
3514 found_key.offset = 0;
3515 found_key.type = 0;
3516 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3517 &start_slot);
3518
3519 ret = btrfs_del_items(trans, log, path, start_slot,
3520 path->slots[0] - start_slot + 1);
3521 /*
3522 * If start slot isn't 0 then we don't need to re-search, we've
3523 * found the last guy with the objectid in this tree.
3524 */
3525 if (ret || start_slot != 0)
3526 break;
3527 btrfs_release_path(path);
3528 }
3529 btrfs_release_path(path);
3530 if (ret > 0)
3531 ret = 0;
3532 return ret;
3533 }
3534
3535 static void fill_inode_item(struct btrfs_trans_handle *trans,
3536 struct extent_buffer *leaf,
3537 struct btrfs_inode_item *item,
3538 struct inode *inode, int log_inode_only,
3539 u64 logged_isize)
3540 {
3541 struct btrfs_map_token token;
3542
3543 btrfs_init_map_token(&token);
3544
3545 if (log_inode_only) {
3546 /* set the generation to zero so the recover code
3547 * can tell the difference between an logging
3548 * just to say 'this inode exists' and a logging
3549 * to say 'update this inode with these values'
3550 */
3551 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3552 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3553 } else {
3554 btrfs_set_token_inode_generation(leaf, item,
3555 BTRFS_I(inode)->generation,
3556 &token);
3557 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3558 }
3559
3560 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3561 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3562 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3563 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3564
3565 btrfs_set_token_timespec_sec(leaf, &item->atime,
3566 inode->i_atime.tv_sec, &token);
3567 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3568 inode->i_atime.tv_nsec, &token);
3569
3570 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3571 inode->i_mtime.tv_sec, &token);
3572 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3573 inode->i_mtime.tv_nsec, &token);
3574
3575 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3576 inode->i_ctime.tv_sec, &token);
3577 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3578 inode->i_ctime.tv_nsec, &token);
3579
3580 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3581 &token);
3582
3583 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3584 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3585 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3586 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3587 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3588 }
3589
3590 static int log_inode_item(struct btrfs_trans_handle *trans,
3591 struct btrfs_root *log, struct btrfs_path *path,
3592 struct inode *inode)
3593 {
3594 struct btrfs_inode_item *inode_item;
3595 int ret;
3596
3597 ret = btrfs_insert_empty_item(trans, log, path,
3598 &BTRFS_I(inode)->location,
3599 sizeof(*inode_item));
3600 if (ret && ret != -EEXIST)
3601 return ret;
3602 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3603 struct btrfs_inode_item);
3604 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3605 btrfs_release_path(path);
3606 return 0;
3607 }
3608
3609 static noinline int copy_items(struct btrfs_trans_handle *trans,
3610 struct inode *inode,
3611 struct btrfs_path *dst_path,
3612 struct btrfs_path *src_path, u64 *last_extent,
3613 int start_slot, int nr, int inode_only,
3614 u64 logged_isize)
3615 {
3616 unsigned long src_offset;
3617 unsigned long dst_offset;
3618 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3619 struct btrfs_file_extent_item *extent;
3620 struct btrfs_inode_item *inode_item;
3621 struct extent_buffer *src = src_path->nodes[0];
3622 struct btrfs_key first_key, last_key, key;
3623 int ret;
3624 struct btrfs_key *ins_keys;
3625 u32 *ins_sizes;
3626 char *ins_data;
3627 int i;
3628 struct list_head ordered_sums;
3629 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3630 bool has_extents = false;
3631 bool need_find_last_extent = true;
3632 bool done = false;
3633
3634 INIT_LIST_HEAD(&ordered_sums);
3635
3636 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3637 nr * sizeof(u32), GFP_NOFS);
3638 if (!ins_data)
3639 return -ENOMEM;
3640
3641 first_key.objectid = (u64)-1;
3642
3643 ins_sizes = (u32 *)ins_data;
3644 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3645
3646 for (i = 0; i < nr; i++) {
3647 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3648 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3649 }
3650 ret = btrfs_insert_empty_items(trans, log, dst_path,
3651 ins_keys, ins_sizes, nr);
3652 if (ret) {
3653 kfree(ins_data);
3654 return ret;
3655 }
3656
3657 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3658 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3659 dst_path->slots[0]);
3660
3661 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3662
3663 if ((i == (nr - 1)))
3664 last_key = ins_keys[i];
3665
3666 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3667 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3668 dst_path->slots[0],
3669 struct btrfs_inode_item);
3670 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3671 inode, inode_only == LOG_INODE_EXISTS,
3672 logged_isize);
3673 } else {
3674 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3675 src_offset, ins_sizes[i]);
3676 }
3677
3678 /*
3679 * We set need_find_last_extent here in case we know we were
3680 * processing other items and then walk into the first extent in
3681 * the inode. If we don't hit an extent then nothing changes,
3682 * we'll do the last search the next time around.
3683 */
3684 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3685 has_extents = true;
3686 if (first_key.objectid == (u64)-1)
3687 first_key = ins_keys[i];
3688 } else {
3689 need_find_last_extent = false;
3690 }
3691
3692 /* take a reference on file data extents so that truncates
3693 * or deletes of this inode don't have to relog the inode
3694 * again
3695 */
3696 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3697 !skip_csum) {
3698 int found_type;
3699 extent = btrfs_item_ptr(src, start_slot + i,
3700 struct btrfs_file_extent_item);
3701
3702 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3703 continue;
3704
3705 found_type = btrfs_file_extent_type(src, extent);
3706 if (found_type == BTRFS_FILE_EXTENT_REG) {
3707 u64 ds, dl, cs, cl;
3708 ds = btrfs_file_extent_disk_bytenr(src,
3709 extent);
3710 /* ds == 0 is a hole */
3711 if (ds == 0)
3712 continue;
3713
3714 dl = btrfs_file_extent_disk_num_bytes(src,
3715 extent);
3716 cs = btrfs_file_extent_offset(src, extent);
3717 cl = btrfs_file_extent_num_bytes(src,
3718 extent);
3719 if (btrfs_file_extent_compression(src,
3720 extent)) {
3721 cs = 0;
3722 cl = dl;
3723 }
3724
3725 ret = btrfs_lookup_csums_range(
3726 log->fs_info->csum_root,
3727 ds + cs, ds + cs + cl - 1,
3728 &ordered_sums, 0);
3729 if (ret) {
3730 btrfs_release_path(dst_path);
3731 kfree(ins_data);
3732 return ret;
3733 }
3734 }
3735 }
3736 }
3737
3738 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3739 btrfs_release_path(dst_path);
3740 kfree(ins_data);
3741
3742 /*
3743 * we have to do this after the loop above to avoid changing the
3744 * log tree while trying to change the log tree.
3745 */
3746 ret = 0;
3747 while (!list_empty(&ordered_sums)) {
3748 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3749 struct btrfs_ordered_sum,
3750 list);
3751 if (!ret)
3752 ret = btrfs_csum_file_blocks(trans, log, sums);
3753 list_del(&sums->list);
3754 kfree(sums);
3755 }
3756
3757 if (!has_extents)
3758 return ret;
3759
3760 if (need_find_last_extent && *last_extent == first_key.offset) {
3761 /*
3762 * We don't have any leafs between our current one and the one
3763 * we processed before that can have file extent items for our
3764 * inode (and have a generation number smaller than our current
3765 * transaction id).
3766 */
3767 need_find_last_extent = false;
3768 }
3769
3770 /*
3771 * Because we use btrfs_search_forward we could skip leaves that were
3772 * not modified and then assume *last_extent is valid when it really
3773 * isn't. So back up to the previous leaf and read the end of the last
3774 * extent before we go and fill in holes.
3775 */
3776 if (need_find_last_extent) {
3777 u64 len;
3778
3779 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3780 if (ret < 0)
3781 return ret;
3782 if (ret)
3783 goto fill_holes;
3784 if (src_path->slots[0])
3785 src_path->slots[0]--;
3786 src = src_path->nodes[0];
3787 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3788 if (key.objectid != btrfs_ino(inode) ||
3789 key.type != BTRFS_EXTENT_DATA_KEY)
3790 goto fill_holes;
3791 extent = btrfs_item_ptr(src, src_path->slots[0],
3792 struct btrfs_file_extent_item);
3793 if (btrfs_file_extent_type(src, extent) ==
3794 BTRFS_FILE_EXTENT_INLINE) {
3795 len = btrfs_file_extent_inline_len(src,
3796 src_path->slots[0],
3797 extent);
3798 *last_extent = ALIGN(key.offset + len,
3799 log->sectorsize);
3800 } else {
3801 len = btrfs_file_extent_num_bytes(src, extent);
3802 *last_extent = key.offset + len;
3803 }
3804 }
3805 fill_holes:
3806 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3807 * things could have happened
3808 *
3809 * 1) A merge could have happened, so we could currently be on a leaf
3810 * that holds what we were copying in the first place.
3811 * 2) A split could have happened, and now not all of the items we want
3812 * are on the same leaf.
3813 *
3814 * So we need to adjust how we search for holes, we need to drop the
3815 * path and re-search for the first extent key we found, and then walk
3816 * forward until we hit the last one we copied.
3817 */
3818 if (need_find_last_extent) {
3819 /* btrfs_prev_leaf could return 1 without releasing the path */
3820 btrfs_release_path(src_path);
3821 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3822 src_path, 0, 0);
3823 if (ret < 0)
3824 return ret;
3825 ASSERT(ret == 0);
3826 src = src_path->nodes[0];
3827 i = src_path->slots[0];
3828 } else {
3829 i = start_slot;
3830 }
3831
3832 /*
3833 * Ok so here we need to go through and fill in any holes we may have
3834 * to make sure that holes are punched for those areas in case they had
3835 * extents previously.
3836 */
3837 while (!done) {
3838 u64 offset, len;
3839 u64 extent_end;
3840
3841 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3842 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3843 if (ret < 0)
3844 return ret;
3845 ASSERT(ret == 0);
3846 src = src_path->nodes[0];
3847 i = 0;
3848 }
3849
3850 btrfs_item_key_to_cpu(src, &key, i);
3851 if (!btrfs_comp_cpu_keys(&key, &last_key))
3852 done = true;
3853 if (key.objectid != btrfs_ino(inode) ||
3854 key.type != BTRFS_EXTENT_DATA_KEY) {
3855 i++;
3856 continue;
3857 }
3858 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3859 if (btrfs_file_extent_type(src, extent) ==
3860 BTRFS_FILE_EXTENT_INLINE) {
3861 len = btrfs_file_extent_inline_len(src, i, extent);
3862 extent_end = ALIGN(key.offset + len, log->sectorsize);
3863 } else {
3864 len = btrfs_file_extent_num_bytes(src, extent);
3865 extent_end = key.offset + len;
3866 }
3867 i++;
3868
3869 if (*last_extent == key.offset) {
3870 *last_extent = extent_end;
3871 continue;
3872 }
3873 offset = *last_extent;
3874 len = key.offset - *last_extent;
3875 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3876 offset, 0, 0, len, 0, len, 0,
3877 0, 0);
3878 if (ret)
3879 break;
3880 *last_extent = extent_end;
3881 }
3882 /*
3883 * Need to let the callers know we dropped the path so they should
3884 * re-search.
3885 */
3886 if (!ret && need_find_last_extent)
3887 ret = 1;
3888 return ret;
3889 }
3890
3891 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3892 {
3893 struct extent_map *em1, *em2;
3894
3895 em1 = list_entry(a, struct extent_map, list);
3896 em2 = list_entry(b, struct extent_map, list);
3897
3898 if (em1->start < em2->start)
3899 return -1;
3900 else if (em1->start > em2->start)
3901 return 1;
3902 return 0;
3903 }
3904
3905 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3906 struct inode *inode,
3907 struct btrfs_root *root,
3908 const struct extent_map *em,
3909 const struct list_head *logged_list,
3910 bool *ordered_io_error)
3911 {
3912 struct btrfs_ordered_extent *ordered;
3913 struct btrfs_root *log = root->log_root;
3914 u64 mod_start = em->mod_start;
3915 u64 mod_len = em->mod_len;
3916 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3917 u64 csum_offset;
3918 u64 csum_len;
3919 LIST_HEAD(ordered_sums);
3920 int ret = 0;
3921
3922 *ordered_io_error = false;
3923
3924 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3925 em->block_start == EXTENT_MAP_HOLE)
3926 return 0;
3927
3928 /*
3929 * Wait far any ordered extent that covers our extent map. If it
3930 * finishes without an error, first check and see if our csums are on
3931 * our outstanding ordered extents.
3932 */
3933 list_for_each_entry(ordered, logged_list, log_list) {
3934 struct btrfs_ordered_sum *sum;
3935
3936 if (!mod_len)
3937 break;
3938
3939 if (ordered->file_offset + ordered->len <= mod_start ||
3940 mod_start + mod_len <= ordered->file_offset)
3941 continue;
3942
3943 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3944 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3945 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3946 const u64 start = ordered->file_offset;
3947 const u64 end = ordered->file_offset + ordered->len - 1;
3948
3949 WARN_ON(ordered->inode != inode);
3950 filemap_fdatawrite_range(inode->i_mapping, start, end);
3951 }
3952
3953 wait_event(ordered->wait,
3954 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3955 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3956
3957 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3958 /*
3959 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3960 * i_mapping flags, so that the next fsync won't get
3961 * an outdated io error too.
3962 */
3963 btrfs_inode_check_errors(inode);
3964 *ordered_io_error = true;
3965 break;
3966 }
3967 /*
3968 * We are going to copy all the csums on this ordered extent, so
3969 * go ahead and adjust mod_start and mod_len in case this
3970 * ordered extent has already been logged.
3971 */
3972 if (ordered->file_offset > mod_start) {
3973 if (ordered->file_offset + ordered->len >=
3974 mod_start + mod_len)
3975 mod_len = ordered->file_offset - mod_start;
3976 /*
3977 * If we have this case
3978 *
3979 * |--------- logged extent ---------|
3980 * |----- ordered extent ----|
3981 *
3982 * Just don't mess with mod_start and mod_len, we'll
3983 * just end up logging more csums than we need and it
3984 * will be ok.
3985 */
3986 } else {
3987 if (ordered->file_offset + ordered->len <
3988 mod_start + mod_len) {
3989 mod_len = (mod_start + mod_len) -
3990 (ordered->file_offset + ordered->len);
3991 mod_start = ordered->file_offset +
3992 ordered->len;
3993 } else {
3994 mod_len = 0;
3995 }
3996 }
3997
3998 if (skip_csum)
3999 continue;
4000
4001 /*
4002 * To keep us from looping for the above case of an ordered
4003 * extent that falls inside of the logged extent.
4004 */
4005 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
4006 &ordered->flags))
4007 continue;
4008
4009 list_for_each_entry(sum, &ordered->list, list) {
4010 ret = btrfs_csum_file_blocks(trans, log, sum);
4011 if (ret)
4012 break;
4013 }
4014 }
4015
4016 if (*ordered_io_error || !mod_len || ret || skip_csum)
4017 return ret;
4018
4019 if (em->compress_type) {
4020 csum_offset = 0;
4021 csum_len = max(em->block_len, em->orig_block_len);
4022 } else {
4023 csum_offset = mod_start - em->start;
4024 csum_len = mod_len;
4025 }
4026
4027 /* block start is already adjusted for the file extent offset. */
4028 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
4029 em->block_start + csum_offset,
4030 em->block_start + csum_offset +
4031 csum_len - 1, &ordered_sums, 0);
4032 if (ret)
4033 return ret;
4034
4035 while (!list_empty(&ordered_sums)) {
4036 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4037 struct btrfs_ordered_sum,
4038 list);
4039 if (!ret)
4040 ret = btrfs_csum_file_blocks(trans, log, sums);
4041 list_del(&sums->list);
4042 kfree(sums);
4043 }
4044
4045 return ret;
4046 }
4047
4048 static int log_one_extent(struct btrfs_trans_handle *trans,
4049 struct inode *inode, struct btrfs_root *root,
4050 const struct extent_map *em,
4051 struct btrfs_path *path,
4052 const struct list_head *logged_list,
4053 struct btrfs_log_ctx *ctx)
4054 {
4055 struct btrfs_root *log = root->log_root;
4056 struct btrfs_file_extent_item *fi;
4057 struct extent_buffer *leaf;
4058 struct btrfs_map_token token;
4059 struct btrfs_key key;
4060 u64 extent_offset = em->start - em->orig_start;
4061 u64 block_len;
4062 int ret;
4063 int extent_inserted = 0;
4064 bool ordered_io_err = false;
4065
4066 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
4067 &ordered_io_err);
4068 if (ret)
4069 return ret;
4070
4071 if (ordered_io_err) {
4072 ctx->io_err = -EIO;
4073 return 0;
4074 }
4075
4076 btrfs_init_map_token(&token);
4077
4078 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
4079 em->start + em->len, NULL, 0, 1,
4080 sizeof(*fi), &extent_inserted);
4081 if (ret)
4082 return ret;
4083
4084 if (!extent_inserted) {
4085 key.objectid = btrfs_ino(inode);
4086 key.type = BTRFS_EXTENT_DATA_KEY;
4087 key.offset = em->start;
4088
4089 ret = btrfs_insert_empty_item(trans, log, path, &key,
4090 sizeof(*fi));
4091 if (ret)
4092 return ret;
4093 }
4094 leaf = path->nodes[0];
4095 fi = btrfs_item_ptr(leaf, path->slots[0],
4096 struct btrfs_file_extent_item);
4097
4098 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4099 &token);
4100 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4101 btrfs_set_token_file_extent_type(leaf, fi,
4102 BTRFS_FILE_EXTENT_PREALLOC,
4103 &token);
4104 else
4105 btrfs_set_token_file_extent_type(leaf, fi,
4106 BTRFS_FILE_EXTENT_REG,
4107 &token);
4108
4109 block_len = max(em->block_len, em->orig_block_len);
4110 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4111 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4112 em->block_start,
4113 &token);
4114 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4115 &token);
4116 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4117 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4118 em->block_start -
4119 extent_offset, &token);
4120 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4121 &token);
4122 } else {
4123 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4124 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4125 &token);
4126 }
4127
4128 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4129 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4130 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4131 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4132 &token);
4133 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4134 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4135 btrfs_mark_buffer_dirty(leaf);
4136
4137 btrfs_release_path(path);
4138
4139 return ret;
4140 }
4141
4142 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4143 struct btrfs_root *root,
4144 struct inode *inode,
4145 struct btrfs_path *path,
4146 struct list_head *logged_list,
4147 struct btrfs_log_ctx *ctx,
4148 const u64 start,
4149 const u64 end)
4150 {
4151 struct extent_map *em, *n;
4152 struct list_head extents;
4153 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4154 u64 test_gen;
4155 int ret = 0;
4156 int num = 0;
4157
4158 INIT_LIST_HEAD(&extents);
4159
4160 down_write(&BTRFS_I(inode)->dio_sem);
4161 write_lock(&tree->lock);
4162 test_gen = root->fs_info->last_trans_committed;
4163
4164 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4165 list_del_init(&em->list);
4166
4167 /*
4168 * Just an arbitrary number, this can be really CPU intensive
4169 * once we start getting a lot of extents, and really once we
4170 * have a bunch of extents we just want to commit since it will
4171 * be faster.
4172 */
4173 if (++num > 32768) {
4174 list_del_init(&tree->modified_extents);
4175 ret = -EFBIG;
4176 goto process;
4177 }
4178
4179 if (em->generation <= test_gen)
4180 continue;
4181 /* Need a ref to keep it from getting evicted from cache */
4182 atomic_inc(&em->refs);
4183 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4184 list_add_tail(&em->list, &extents);
4185 num++;
4186 }
4187
4188 list_sort(NULL, &extents, extent_cmp);
4189 btrfs_get_logged_extents(inode, logged_list, start, end);
4190 /*
4191 * Some ordered extents started by fsync might have completed
4192 * before we could collect them into the list logged_list, which
4193 * means they're gone, not in our logged_list nor in the inode's
4194 * ordered tree. We want the application/user space to know an
4195 * error happened while attempting to persist file data so that
4196 * it can take proper action. If such error happened, we leave
4197 * without writing to the log tree and the fsync must report the
4198 * file data write error and not commit the current transaction.
4199 */
4200 ret = btrfs_inode_check_errors(inode);
4201 if (ret)
4202 ctx->io_err = ret;
4203 process:
4204 while (!list_empty(&extents)) {
4205 em = list_entry(extents.next, struct extent_map, list);
4206
4207 list_del_init(&em->list);
4208
4209 /*
4210 * If we had an error we just need to delete everybody from our
4211 * private list.
4212 */
4213 if (ret) {
4214 clear_em_logging(tree, em);
4215 free_extent_map(em);
4216 continue;
4217 }
4218
4219 write_unlock(&tree->lock);
4220
4221 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4222 ctx);
4223 write_lock(&tree->lock);
4224 clear_em_logging(tree, em);
4225 free_extent_map(em);
4226 }
4227 WARN_ON(!list_empty(&extents));
4228 write_unlock(&tree->lock);
4229 up_write(&BTRFS_I(inode)->dio_sem);
4230
4231 btrfs_release_path(path);
4232 return ret;
4233 }
4234
4235 static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4236 struct btrfs_path *path, u64 *size_ret)
4237 {
4238 struct btrfs_key key;
4239 int ret;
4240
4241 key.objectid = btrfs_ino(inode);
4242 key.type = BTRFS_INODE_ITEM_KEY;
4243 key.offset = 0;
4244
4245 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4246 if (ret < 0) {
4247 return ret;
4248 } else if (ret > 0) {
4249 *size_ret = 0;
4250 } else {
4251 struct btrfs_inode_item *item;
4252
4253 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4254 struct btrfs_inode_item);
4255 *size_ret = btrfs_inode_size(path->nodes[0], item);
4256 }
4257
4258 btrfs_release_path(path);
4259 return 0;
4260 }
4261
4262 /*
4263 * At the moment we always log all xattrs. This is to figure out at log replay
4264 * time which xattrs must have their deletion replayed. If a xattr is missing
4265 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4266 * because if a xattr is deleted, the inode is fsynced and a power failure
4267 * happens, causing the log to be replayed the next time the fs is mounted,
4268 * we want the xattr to not exist anymore (same behaviour as other filesystems
4269 * with a journal, ext3/4, xfs, f2fs, etc).
4270 */
4271 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4272 struct btrfs_root *root,
4273 struct inode *inode,
4274 struct btrfs_path *path,
4275 struct btrfs_path *dst_path)
4276 {
4277 int ret;
4278 struct btrfs_key key;
4279 const u64 ino = btrfs_ino(inode);
4280 int ins_nr = 0;
4281 int start_slot = 0;
4282
4283 key.objectid = ino;
4284 key.type = BTRFS_XATTR_ITEM_KEY;
4285 key.offset = 0;
4286
4287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4288 if (ret < 0)
4289 return ret;
4290
4291 while (true) {
4292 int slot = path->slots[0];
4293 struct extent_buffer *leaf = path->nodes[0];
4294 int nritems = btrfs_header_nritems(leaf);
4295
4296 if (slot >= nritems) {
4297 if (ins_nr > 0) {
4298 u64 last_extent = 0;
4299
4300 ret = copy_items(trans, inode, dst_path, path,
4301 &last_extent, start_slot,
4302 ins_nr, 1, 0);
4303 /* can't be 1, extent items aren't processed */
4304 ASSERT(ret <= 0);
4305 if (ret < 0)
4306 return ret;
4307 ins_nr = 0;
4308 }
4309 ret = btrfs_next_leaf(root, path);
4310 if (ret < 0)
4311 return ret;
4312 else if (ret > 0)
4313 break;
4314 continue;
4315 }
4316
4317 btrfs_item_key_to_cpu(leaf, &key, slot);
4318 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4319 break;
4320
4321 if (ins_nr == 0)
4322 start_slot = slot;
4323 ins_nr++;
4324 path->slots[0]++;
4325 cond_resched();
4326 }
4327 if (ins_nr > 0) {
4328 u64 last_extent = 0;
4329
4330 ret = copy_items(trans, inode, dst_path, path,
4331 &last_extent, start_slot,
4332 ins_nr, 1, 0);
4333 /* can't be 1, extent items aren't processed */
4334 ASSERT(ret <= 0);
4335 if (ret < 0)
4336 return ret;
4337 }
4338
4339 return 0;
4340 }
4341
4342 /*
4343 * If the no holes feature is enabled we need to make sure any hole between the
4344 * last extent and the i_size of our inode is explicitly marked in the log. This
4345 * is to make sure that doing something like:
4346 *
4347 * 1) create file with 128Kb of data
4348 * 2) truncate file to 64Kb
4349 * 3) truncate file to 256Kb
4350 * 4) fsync file
4351 * 5) <crash/power failure>
4352 * 6) mount fs and trigger log replay
4353 *
4354 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4355 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4356 * file correspond to a hole. The presence of explicit holes in a log tree is
4357 * what guarantees that log replay will remove/adjust file extent items in the
4358 * fs/subvol tree.
4359 *
4360 * Here we do not need to care about holes between extents, that is already done
4361 * by copy_items(). We also only need to do this in the full sync path, where we
4362 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4363 * lookup the list of modified extent maps and if any represents a hole, we
4364 * insert a corresponding extent representing a hole in the log tree.
4365 */
4366 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4367 struct btrfs_root *root,
4368 struct inode *inode,
4369 struct btrfs_path *path)
4370 {
4371 int ret;
4372 struct btrfs_key key;
4373 u64 hole_start;
4374 u64 hole_size;
4375 struct extent_buffer *leaf;
4376 struct btrfs_root *log = root->log_root;
4377 const u64 ino = btrfs_ino(inode);
4378 const u64 i_size = i_size_read(inode);
4379
4380 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4381 return 0;
4382
4383 key.objectid = ino;
4384 key.type = BTRFS_EXTENT_DATA_KEY;
4385 key.offset = (u64)-1;
4386
4387 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4388 ASSERT(ret != 0);
4389 if (ret < 0)
4390 return ret;
4391
4392 ASSERT(path->slots[0] > 0);
4393 path->slots[0]--;
4394 leaf = path->nodes[0];
4395 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4396
4397 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4398 /* inode does not have any extents */
4399 hole_start = 0;
4400 hole_size = i_size;
4401 } else {
4402 struct btrfs_file_extent_item *extent;
4403 u64 len;
4404
4405 /*
4406 * If there's an extent beyond i_size, an explicit hole was
4407 * already inserted by copy_items().
4408 */
4409 if (key.offset >= i_size)
4410 return 0;
4411
4412 extent = btrfs_item_ptr(leaf, path->slots[0],
4413 struct btrfs_file_extent_item);
4414
4415 if (btrfs_file_extent_type(leaf, extent) ==
4416 BTRFS_FILE_EXTENT_INLINE) {
4417 len = btrfs_file_extent_inline_len(leaf,
4418 path->slots[0],
4419 extent);
4420 ASSERT(len == i_size);
4421 return 0;
4422 }
4423
4424 len = btrfs_file_extent_num_bytes(leaf, extent);
4425 /* Last extent goes beyond i_size, no need to log a hole. */
4426 if (key.offset + len > i_size)
4427 return 0;
4428 hole_start = key.offset + len;
4429 hole_size = i_size - hole_start;
4430 }
4431 btrfs_release_path(path);
4432
4433 /* Last extent ends at i_size. */
4434 if (hole_size == 0)
4435 return 0;
4436
4437 hole_size = ALIGN(hole_size, root->sectorsize);
4438 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4439 hole_size, 0, hole_size, 0, 0, 0);
4440 return ret;
4441 }
4442
4443 /*
4444 * When we are logging a new inode X, check if it doesn't have a reference that
4445 * matches the reference from some other inode Y created in a past transaction
4446 * and that was renamed in the current transaction. If we don't do this, then at
4447 * log replay time we can lose inode Y (and all its files if it's a directory):
4448 *
4449 * mkdir /mnt/x
4450 * echo "hello world" > /mnt/x/foobar
4451 * sync
4452 * mv /mnt/x /mnt/y
4453 * mkdir /mnt/x # or touch /mnt/x
4454 * xfs_io -c fsync /mnt/x
4455 * <power fail>
4456 * mount fs, trigger log replay
4457 *
4458 * After the log replay procedure, we would lose the first directory and all its
4459 * files (file foobar).
4460 * For the case where inode Y is not a directory we simply end up losing it:
4461 *
4462 * echo "123" > /mnt/foo
4463 * sync
4464 * mv /mnt/foo /mnt/bar
4465 * echo "abc" > /mnt/foo
4466 * xfs_io -c fsync /mnt/foo
4467 * <power fail>
4468 *
4469 * We also need this for cases where a snapshot entry is replaced by some other
4470 * entry (file or directory) otherwise we end up with an unreplayable log due to
4471 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4472 * if it were a regular entry:
4473 *
4474 * mkdir /mnt/x
4475 * btrfs subvolume snapshot /mnt /mnt/x/snap
4476 * btrfs subvolume delete /mnt/x/snap
4477 * rmdir /mnt/x
4478 * mkdir /mnt/x
4479 * fsync /mnt/x or fsync some new file inside it
4480 * <power fail>
4481 *
4482 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4483 * the same transaction.
4484 */
4485 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4486 const int slot,
4487 const struct btrfs_key *key,
4488 struct inode *inode,
4489 u64 *other_ino)
4490 {
4491 int ret;
4492 struct btrfs_path *search_path;
4493 char *name = NULL;
4494 u32 name_len = 0;
4495 u32 item_size = btrfs_item_size_nr(eb, slot);
4496 u32 cur_offset = 0;
4497 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4498
4499 search_path = btrfs_alloc_path();
4500 if (!search_path)
4501 return -ENOMEM;
4502 search_path->search_commit_root = 1;
4503 search_path->skip_locking = 1;
4504
4505 while (cur_offset < item_size) {
4506 u64 parent;
4507 u32 this_name_len;
4508 u32 this_len;
4509 unsigned long name_ptr;
4510 struct btrfs_dir_item *di;
4511
4512 if (key->type == BTRFS_INODE_REF_KEY) {
4513 struct btrfs_inode_ref *iref;
4514
4515 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4516 parent = key->offset;
4517 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4518 name_ptr = (unsigned long)(iref + 1);
4519 this_len = sizeof(*iref) + this_name_len;
4520 } else {
4521 struct btrfs_inode_extref *extref;
4522
4523 extref = (struct btrfs_inode_extref *)(ptr +
4524 cur_offset);
4525 parent = btrfs_inode_extref_parent(eb, extref);
4526 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4527 name_ptr = (unsigned long)&extref->name;
4528 this_len = sizeof(*extref) + this_name_len;
4529 }
4530
4531 if (this_name_len > name_len) {
4532 char *new_name;
4533
4534 new_name = krealloc(name, this_name_len, GFP_NOFS);
4535 if (!new_name) {
4536 ret = -ENOMEM;
4537 goto out;
4538 }
4539 name_len = this_name_len;
4540 name = new_name;
4541 }
4542
4543 read_extent_buffer(eb, name, name_ptr, this_name_len);
4544 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4545 search_path, parent,
4546 name, this_name_len, 0);
4547 if (di && !IS_ERR(di)) {
4548 struct btrfs_key di_key;
4549
4550 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4551 di, &di_key);
4552 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4553 ret = 1;
4554 *other_ino = di_key.objectid;
4555 } else {
4556 ret = -EAGAIN;
4557 }
4558 goto out;
4559 } else if (IS_ERR(di)) {
4560 ret = PTR_ERR(di);
4561 goto out;
4562 }
4563 btrfs_release_path(search_path);
4564
4565 cur_offset += this_len;
4566 }
4567 ret = 0;
4568 out:
4569 btrfs_free_path(search_path);
4570 kfree(name);
4571 return ret;
4572 }
4573
4574 /* log a single inode in the tree log.
4575 * At least one parent directory for this inode must exist in the tree
4576 * or be logged already.
4577 *
4578 * Any items from this inode changed by the current transaction are copied
4579 * to the log tree. An extra reference is taken on any extents in this
4580 * file, allowing us to avoid a whole pile of corner cases around logging
4581 * blocks that have been removed from the tree.
4582 *
4583 * See LOG_INODE_ALL and related defines for a description of what inode_only
4584 * does.
4585 *
4586 * This handles both files and directories.
4587 */
4588 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4589 struct btrfs_root *root, struct inode *inode,
4590 int inode_only,
4591 const loff_t start,
4592 const loff_t end,
4593 struct btrfs_log_ctx *ctx)
4594 {
4595 struct btrfs_path *path;
4596 struct btrfs_path *dst_path;
4597 struct btrfs_key min_key;
4598 struct btrfs_key max_key;
4599 struct btrfs_root *log = root->log_root;
4600 struct extent_buffer *src = NULL;
4601 LIST_HEAD(logged_list);
4602 u64 last_extent = 0;
4603 int err = 0;
4604 int ret;
4605 int nritems;
4606 int ins_start_slot = 0;
4607 int ins_nr;
4608 bool fast_search = false;
4609 u64 ino = btrfs_ino(inode);
4610 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4611 u64 logged_isize = 0;
4612 bool need_log_inode_item = true;
4613
4614 path = btrfs_alloc_path();
4615 if (!path)
4616 return -ENOMEM;
4617 dst_path = btrfs_alloc_path();
4618 if (!dst_path) {
4619 btrfs_free_path(path);
4620 return -ENOMEM;
4621 }
4622
4623 min_key.objectid = ino;
4624 min_key.type = BTRFS_INODE_ITEM_KEY;
4625 min_key.offset = 0;
4626
4627 max_key.objectid = ino;
4628
4629
4630 /* today the code can only do partial logging of directories */
4631 if (S_ISDIR(inode->i_mode) ||
4632 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4633 &BTRFS_I(inode)->runtime_flags) &&
4634 inode_only == LOG_INODE_EXISTS))
4635 max_key.type = BTRFS_XATTR_ITEM_KEY;
4636 else
4637 max_key.type = (u8)-1;
4638 max_key.offset = (u64)-1;
4639
4640 /*
4641 * Only run delayed items if we are a dir or a new file.
4642 * Otherwise commit the delayed inode only, which is needed in
4643 * order for the log replay code to mark inodes for link count
4644 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4645 */
4646 if (S_ISDIR(inode->i_mode) ||
4647 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
4648 ret = btrfs_commit_inode_delayed_items(trans, inode);
4649 else
4650 ret = btrfs_commit_inode_delayed_inode(inode);
4651
4652 if (ret) {
4653 btrfs_free_path(path);
4654 btrfs_free_path(dst_path);
4655 return ret;
4656 }
4657
4658 mutex_lock(&BTRFS_I(inode)->log_mutex);
4659
4660 /*
4661 * a brute force approach to making sure we get the most uptodate
4662 * copies of everything.
4663 */
4664 if (S_ISDIR(inode->i_mode)) {
4665 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4666
4667 if (inode_only == LOG_INODE_EXISTS)
4668 max_key_type = BTRFS_XATTR_ITEM_KEY;
4669 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4670 } else {
4671 if (inode_only == LOG_INODE_EXISTS) {
4672 /*
4673 * Make sure the new inode item we write to the log has
4674 * the same isize as the current one (if it exists).
4675 * This is necessary to prevent data loss after log
4676 * replay, and also to prevent doing a wrong expanding
4677 * truncate - for e.g. create file, write 4K into offset
4678 * 0, fsync, write 4K into offset 4096, add hard link,
4679 * fsync some other file (to sync log), power fail - if
4680 * we use the inode's current i_size, after log replay
4681 * we get a 8Kb file, with the last 4Kb extent as a hole
4682 * (zeroes), as if an expanding truncate happened,
4683 * instead of getting a file of 4Kb only.
4684 */
4685 err = logged_inode_size(log, inode, path,
4686 &logged_isize);
4687 if (err)
4688 goto out_unlock;
4689 }
4690 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4691 &BTRFS_I(inode)->runtime_flags)) {
4692 if (inode_only == LOG_INODE_EXISTS) {
4693 max_key.type = BTRFS_XATTR_ITEM_KEY;
4694 ret = drop_objectid_items(trans, log, path, ino,
4695 max_key.type);
4696 } else {
4697 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4698 &BTRFS_I(inode)->runtime_flags);
4699 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4700 &BTRFS_I(inode)->runtime_flags);
4701 while(1) {
4702 ret = btrfs_truncate_inode_items(trans,
4703 log, inode, 0, 0);
4704 if (ret != -EAGAIN)
4705 break;
4706 }
4707 }
4708 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4709 &BTRFS_I(inode)->runtime_flags) ||
4710 inode_only == LOG_INODE_EXISTS) {
4711 if (inode_only == LOG_INODE_ALL)
4712 fast_search = true;
4713 max_key.type = BTRFS_XATTR_ITEM_KEY;
4714 ret = drop_objectid_items(trans, log, path, ino,
4715 max_key.type);
4716 } else {
4717 if (inode_only == LOG_INODE_ALL)
4718 fast_search = true;
4719 goto log_extents;
4720 }
4721
4722 }
4723 if (ret) {
4724 err = ret;
4725 goto out_unlock;
4726 }
4727
4728 while (1) {
4729 ins_nr = 0;
4730 ret = btrfs_search_forward(root, &min_key,
4731 path, trans->transid);
4732 if (ret < 0) {
4733 err = ret;
4734 goto out_unlock;
4735 }
4736 if (ret != 0)
4737 break;
4738 again:
4739 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4740 if (min_key.objectid != ino)
4741 break;
4742 if (min_key.type > max_key.type)
4743 break;
4744
4745 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4746 need_log_inode_item = false;
4747
4748 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4749 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4750 BTRFS_I(inode)->generation == trans->transid) {
4751 u64 other_ino = 0;
4752
4753 ret = btrfs_check_ref_name_override(path->nodes[0],
4754 path->slots[0],
4755 &min_key, inode,
4756 &other_ino);
4757 if (ret < 0) {
4758 err = ret;
4759 goto out_unlock;
4760 } else if (ret > 0 && ctx &&
4761 other_ino != btrfs_ino(ctx->inode)) {
4762 struct btrfs_key inode_key;
4763 struct inode *other_inode;
4764
4765 if (ins_nr > 0) {
4766 ins_nr++;
4767 } else {
4768 ins_nr = 1;
4769 ins_start_slot = path->slots[0];
4770 }
4771 ret = copy_items(trans, inode, dst_path, path,
4772 &last_extent, ins_start_slot,
4773 ins_nr, inode_only,
4774 logged_isize);
4775 if (ret < 0) {
4776 err = ret;
4777 goto out_unlock;
4778 }
4779 ins_nr = 0;
4780 btrfs_release_path(path);
4781 inode_key.objectid = other_ino;
4782 inode_key.type = BTRFS_INODE_ITEM_KEY;
4783 inode_key.offset = 0;
4784 other_inode = btrfs_iget(root->fs_info->sb,
4785 &inode_key, root,
4786 NULL);
4787 /*
4788 * If the other inode that had a conflicting dir
4789 * entry was deleted in the current transaction,
4790 * we don't need to do more work nor fallback to
4791 * a transaction commit.
4792 */
4793 if (IS_ERR(other_inode) &&
4794 PTR_ERR(other_inode) == -ENOENT) {
4795 goto next_key;
4796 } else if (IS_ERR(other_inode)) {
4797 err = PTR_ERR(other_inode);
4798 goto out_unlock;
4799 }
4800 /*
4801 * We are safe logging the other inode without
4802 * acquiring its i_mutex as long as we log with
4803 * the LOG_INODE_EXISTS mode. We're safe against
4804 * concurrent renames of the other inode as well
4805 * because during a rename we pin the log and
4806 * update the log with the new name before we
4807 * unpin it.
4808 */
4809 err = btrfs_log_inode(trans, root, other_inode,
4810 LOG_INODE_EXISTS,
4811 0, LLONG_MAX, ctx);
4812 iput(other_inode);
4813 if (err)
4814 goto out_unlock;
4815 else
4816 goto next_key;
4817 }
4818 }
4819
4820 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4821 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4822 if (ins_nr == 0)
4823 goto next_slot;
4824 ret = copy_items(trans, inode, dst_path, path,
4825 &last_extent, ins_start_slot,
4826 ins_nr, inode_only, logged_isize);
4827 if (ret < 0) {
4828 err = ret;
4829 goto out_unlock;
4830 }
4831 ins_nr = 0;
4832 if (ret) {
4833 btrfs_release_path(path);
4834 continue;
4835 }
4836 goto next_slot;
4837 }
4838
4839 src = path->nodes[0];
4840 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4841 ins_nr++;
4842 goto next_slot;
4843 } else if (!ins_nr) {
4844 ins_start_slot = path->slots[0];
4845 ins_nr = 1;
4846 goto next_slot;
4847 }
4848
4849 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4850 ins_start_slot, ins_nr, inode_only,
4851 logged_isize);
4852 if (ret < 0) {
4853 err = ret;
4854 goto out_unlock;
4855 }
4856 if (ret) {
4857 ins_nr = 0;
4858 btrfs_release_path(path);
4859 continue;
4860 }
4861 ins_nr = 1;
4862 ins_start_slot = path->slots[0];
4863 next_slot:
4864
4865 nritems = btrfs_header_nritems(path->nodes[0]);
4866 path->slots[0]++;
4867 if (path->slots[0] < nritems) {
4868 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4869 path->slots[0]);
4870 goto again;
4871 }
4872 if (ins_nr) {
4873 ret = copy_items(trans, inode, dst_path, path,
4874 &last_extent, ins_start_slot,
4875 ins_nr, inode_only, logged_isize);
4876 if (ret < 0) {
4877 err = ret;
4878 goto out_unlock;
4879 }
4880 ret = 0;
4881 ins_nr = 0;
4882 }
4883 btrfs_release_path(path);
4884 next_key:
4885 if (min_key.offset < (u64)-1) {
4886 min_key.offset++;
4887 } else if (min_key.type < max_key.type) {
4888 min_key.type++;
4889 min_key.offset = 0;
4890 } else {
4891 break;
4892 }
4893 }
4894 if (ins_nr) {
4895 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4896 ins_start_slot, ins_nr, inode_only,
4897 logged_isize);
4898 if (ret < 0) {
4899 err = ret;
4900 goto out_unlock;
4901 }
4902 ret = 0;
4903 ins_nr = 0;
4904 }
4905
4906 btrfs_release_path(path);
4907 btrfs_release_path(dst_path);
4908 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4909 if (err)
4910 goto out_unlock;
4911 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4912 btrfs_release_path(path);
4913 btrfs_release_path(dst_path);
4914 err = btrfs_log_trailing_hole(trans, root, inode, path);
4915 if (err)
4916 goto out_unlock;
4917 }
4918 log_extents:
4919 btrfs_release_path(path);
4920 btrfs_release_path(dst_path);
4921 if (need_log_inode_item) {
4922 err = log_inode_item(trans, log, dst_path, inode);
4923 if (err)
4924 goto out_unlock;
4925 }
4926 if (fast_search) {
4927 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4928 &logged_list, ctx, start, end);
4929 if (ret) {
4930 err = ret;
4931 goto out_unlock;
4932 }
4933 } else if (inode_only == LOG_INODE_ALL) {
4934 struct extent_map *em, *n;
4935
4936 write_lock(&em_tree->lock);
4937 /*
4938 * We can't just remove every em if we're called for a ranged
4939 * fsync - that is, one that doesn't cover the whole possible
4940 * file range (0 to LLONG_MAX). This is because we can have
4941 * em's that fall outside the range we're logging and therefore
4942 * their ordered operations haven't completed yet
4943 * (btrfs_finish_ordered_io() not invoked yet). This means we
4944 * didn't get their respective file extent item in the fs/subvol
4945 * tree yet, and need to let the next fast fsync (one which
4946 * consults the list of modified extent maps) find the em so
4947 * that it logs a matching file extent item and waits for the
4948 * respective ordered operation to complete (if it's still
4949 * running).
4950 *
4951 * Removing every em outside the range we're logging would make
4952 * the next fast fsync not log their matching file extent items,
4953 * therefore making us lose data after a log replay.
4954 */
4955 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4956 list) {
4957 const u64 mod_end = em->mod_start + em->mod_len - 1;
4958
4959 if (em->mod_start >= start && mod_end <= end)
4960 list_del_init(&em->list);
4961 }
4962 write_unlock(&em_tree->lock);
4963 }
4964
4965 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4966 ret = log_directory_changes(trans, root, inode, path, dst_path,
4967 ctx);
4968 if (ret) {
4969 err = ret;
4970 goto out_unlock;
4971 }
4972 }
4973
4974 spin_lock(&BTRFS_I(inode)->lock);
4975 BTRFS_I(inode)->logged_trans = trans->transid;
4976 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4977 spin_unlock(&BTRFS_I(inode)->lock);
4978 out_unlock:
4979 if (unlikely(err))
4980 btrfs_put_logged_extents(&logged_list);
4981 else
4982 btrfs_submit_logged_extents(&logged_list, log);
4983 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4984
4985 btrfs_free_path(path);
4986 btrfs_free_path(dst_path);
4987 return err;
4988 }
4989
4990 /*
4991 * Check if we must fallback to a transaction commit when logging an inode.
4992 * This must be called after logging the inode and is used only in the context
4993 * when fsyncing an inode requires the need to log some other inode - in which
4994 * case we can't lock the i_mutex of each other inode we need to log as that
4995 * can lead to deadlocks with concurrent fsync against other inodes (as we can
4996 * log inodes up or down in the hierarchy) or rename operations for example. So
4997 * we take the log_mutex of the inode after we have logged it and then check for
4998 * its last_unlink_trans value - this is safe because any task setting
4999 * last_unlink_trans must take the log_mutex and it must do this before it does
5000 * the actual unlink operation, so if we do this check before a concurrent task
5001 * sets last_unlink_trans it means we've logged a consistent version/state of
5002 * all the inode items, otherwise we are not sure and must do a transaction
5003 * commit (the concurrent task might have only updated last_unlink_trans before
5004 * we logged the inode or it might have also done the unlink).
5005 */
5006 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5007 struct inode *inode)
5008 {
5009 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
5010 bool ret = false;
5011
5012 mutex_lock(&BTRFS_I(inode)->log_mutex);
5013 if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
5014 /*
5015 * Make sure any commits to the log are forced to be full
5016 * commits.
5017 */
5018 btrfs_set_log_full_commit(fs_info, trans);
5019 ret = true;
5020 }
5021 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5022
5023 return ret;
5024 }
5025
5026 /*
5027 * follow the dentry parent pointers up the chain and see if any
5028 * of the directories in it require a full commit before they can
5029 * be logged. Returns zero if nothing special needs to be done or 1 if
5030 * a full commit is required.
5031 */
5032 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5033 struct inode *inode,
5034 struct dentry *parent,
5035 struct super_block *sb,
5036 u64 last_committed)
5037 {
5038 int ret = 0;
5039 struct dentry *old_parent = NULL;
5040 struct inode *orig_inode = inode;
5041
5042 /*
5043 * for regular files, if its inode is already on disk, we don't
5044 * have to worry about the parents at all. This is because
5045 * we can use the last_unlink_trans field to record renames
5046 * and other fun in this file.
5047 */
5048 if (S_ISREG(inode->i_mode) &&
5049 BTRFS_I(inode)->generation <= last_committed &&
5050 BTRFS_I(inode)->last_unlink_trans <= last_committed)
5051 goto out;
5052
5053 if (!S_ISDIR(inode->i_mode)) {
5054 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5055 goto out;
5056 inode = d_inode(parent);
5057 }
5058
5059 while (1) {
5060 /*
5061 * If we are logging a directory then we start with our inode,
5062 * not our parent's inode, so we need to skip setting the
5063 * logged_trans so that further down in the log code we don't
5064 * think this inode has already been logged.
5065 */
5066 if (inode != orig_inode)
5067 BTRFS_I(inode)->logged_trans = trans->transid;
5068 smp_mb();
5069
5070 if (btrfs_must_commit_transaction(trans, inode)) {
5071 ret = 1;
5072 break;
5073 }
5074
5075 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5076 break;
5077
5078 if (IS_ROOT(parent)) {
5079 inode = d_inode(parent);
5080 if (btrfs_must_commit_transaction(trans, inode))
5081 ret = 1;
5082 break;
5083 }
5084
5085 parent = dget_parent(parent);
5086 dput(old_parent);
5087 old_parent = parent;
5088 inode = d_inode(parent);
5089
5090 }
5091 dput(old_parent);
5092 out:
5093 return ret;
5094 }
5095
5096 struct btrfs_dir_list {
5097 u64 ino;
5098 struct list_head list;
5099 };
5100
5101 /*
5102 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5103 * details about the why it is needed.
5104 * This is a recursive operation - if an existing dentry corresponds to a
5105 * directory, that directory's new entries are logged too (same behaviour as
5106 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5107 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5108 * complains about the following circular lock dependency / possible deadlock:
5109 *
5110 * CPU0 CPU1
5111 * ---- ----
5112 * lock(&type->i_mutex_dir_key#3/2);
5113 * lock(sb_internal#2);
5114 * lock(&type->i_mutex_dir_key#3/2);
5115 * lock(&sb->s_type->i_mutex_key#14);
5116 *
5117 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5118 * sb_start_intwrite() in btrfs_start_transaction().
5119 * Not locking i_mutex of the inodes is still safe because:
5120 *
5121 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5122 * that while logging the inode new references (names) are added or removed
5123 * from the inode, leaving the logged inode item with a link count that does
5124 * not match the number of logged inode reference items. This is fine because
5125 * at log replay time we compute the real number of links and correct the
5126 * link count in the inode item (see replay_one_buffer() and
5127 * link_to_fixup_dir());
5128 *
5129 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5130 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5131 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5132 * has a size that doesn't match the sum of the lengths of all the logged
5133 * names. This does not result in a problem because if a dir_item key is
5134 * logged but its matching dir_index key is not logged, at log replay time we
5135 * don't use it to replay the respective name (see replay_one_name()). On the
5136 * other hand if only the dir_index key ends up being logged, the respective
5137 * name is added to the fs/subvol tree with both the dir_item and dir_index
5138 * keys created (see replay_one_name()).
5139 * The directory's inode item with a wrong i_size is not a problem as well,
5140 * since we don't use it at log replay time to set the i_size in the inode
5141 * item of the fs/subvol tree (see overwrite_item()).
5142 */
5143 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5144 struct btrfs_root *root,
5145 struct inode *start_inode,
5146 struct btrfs_log_ctx *ctx)
5147 {
5148 struct btrfs_root *log = root->log_root;
5149 struct btrfs_path *path;
5150 LIST_HEAD(dir_list);
5151 struct btrfs_dir_list *dir_elem;
5152 int ret = 0;
5153
5154 path = btrfs_alloc_path();
5155 if (!path)
5156 return -ENOMEM;
5157
5158 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5159 if (!dir_elem) {
5160 btrfs_free_path(path);
5161 return -ENOMEM;
5162 }
5163 dir_elem->ino = btrfs_ino(start_inode);
5164 list_add_tail(&dir_elem->list, &dir_list);
5165
5166 while (!list_empty(&dir_list)) {
5167 struct extent_buffer *leaf;
5168 struct btrfs_key min_key;
5169 int nritems;
5170 int i;
5171
5172 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5173 list);
5174 if (ret)
5175 goto next_dir_inode;
5176
5177 min_key.objectid = dir_elem->ino;
5178 min_key.type = BTRFS_DIR_ITEM_KEY;
5179 min_key.offset = 0;
5180 again:
5181 btrfs_release_path(path);
5182 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5183 if (ret < 0) {
5184 goto next_dir_inode;
5185 } else if (ret > 0) {
5186 ret = 0;
5187 goto next_dir_inode;
5188 }
5189
5190 process_leaf:
5191 leaf = path->nodes[0];
5192 nritems = btrfs_header_nritems(leaf);
5193 for (i = path->slots[0]; i < nritems; i++) {
5194 struct btrfs_dir_item *di;
5195 struct btrfs_key di_key;
5196 struct inode *di_inode;
5197 struct btrfs_dir_list *new_dir_elem;
5198 int log_mode = LOG_INODE_EXISTS;
5199 int type;
5200
5201 btrfs_item_key_to_cpu(leaf, &min_key, i);
5202 if (min_key.objectid != dir_elem->ino ||
5203 min_key.type != BTRFS_DIR_ITEM_KEY)
5204 goto next_dir_inode;
5205
5206 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5207 type = btrfs_dir_type(leaf, di);
5208 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5209 type != BTRFS_FT_DIR)
5210 continue;
5211 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5212 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5213 continue;
5214
5215 di_inode = btrfs_iget(root->fs_info->sb, &di_key,
5216 root, NULL);
5217 if (IS_ERR(di_inode)) {
5218 ret = PTR_ERR(di_inode);
5219 goto next_dir_inode;
5220 }
5221
5222 if (btrfs_inode_in_log(di_inode, trans->transid)) {
5223 iput(di_inode);
5224 continue;
5225 }
5226
5227 ctx->log_new_dentries = false;
5228 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5229 log_mode = LOG_INODE_ALL;
5230 btrfs_release_path(path);
5231 ret = btrfs_log_inode(trans, root, di_inode,
5232 log_mode, 0, LLONG_MAX, ctx);
5233 if (!ret &&
5234 btrfs_must_commit_transaction(trans, di_inode))
5235 ret = 1;
5236 iput(di_inode);
5237 if (ret)
5238 goto next_dir_inode;
5239 if (ctx->log_new_dentries) {
5240 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5241 GFP_NOFS);
5242 if (!new_dir_elem) {
5243 ret = -ENOMEM;
5244 goto next_dir_inode;
5245 }
5246 new_dir_elem->ino = di_key.objectid;
5247 list_add_tail(&new_dir_elem->list, &dir_list);
5248 }
5249 break;
5250 }
5251 if (i == nritems) {
5252 ret = btrfs_next_leaf(log, path);
5253 if (ret < 0) {
5254 goto next_dir_inode;
5255 } else if (ret > 0) {
5256 ret = 0;
5257 goto next_dir_inode;
5258 }
5259 goto process_leaf;
5260 }
5261 if (min_key.offset < (u64)-1) {
5262 min_key.offset++;
5263 goto again;
5264 }
5265 next_dir_inode:
5266 list_del(&dir_elem->list);
5267 kfree(dir_elem);
5268 }
5269
5270 btrfs_free_path(path);
5271 return ret;
5272 }
5273
5274 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5275 struct inode *inode,
5276 struct btrfs_log_ctx *ctx)
5277 {
5278 int ret;
5279 struct btrfs_path *path;
5280 struct btrfs_key key;
5281 struct btrfs_root *root = BTRFS_I(inode)->root;
5282 const u64 ino = btrfs_ino(inode);
5283
5284 path = btrfs_alloc_path();
5285 if (!path)
5286 return -ENOMEM;
5287 path->skip_locking = 1;
5288 path->search_commit_root = 1;
5289
5290 key.objectid = ino;
5291 key.type = BTRFS_INODE_REF_KEY;
5292 key.offset = 0;
5293 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5294 if (ret < 0)
5295 goto out;
5296
5297 while (true) {
5298 struct extent_buffer *leaf = path->nodes[0];
5299 int slot = path->slots[0];
5300 u32 cur_offset = 0;
5301 u32 item_size;
5302 unsigned long ptr;
5303
5304 if (slot >= btrfs_header_nritems(leaf)) {
5305 ret = btrfs_next_leaf(root, path);
5306 if (ret < 0)
5307 goto out;
5308 else if (ret > 0)
5309 break;
5310 continue;
5311 }
5312
5313 btrfs_item_key_to_cpu(leaf, &key, slot);
5314 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5315 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5316 break;
5317
5318 item_size = btrfs_item_size_nr(leaf, slot);
5319 ptr = btrfs_item_ptr_offset(leaf, slot);
5320 while (cur_offset < item_size) {
5321 struct btrfs_key inode_key;
5322 struct inode *dir_inode;
5323
5324 inode_key.type = BTRFS_INODE_ITEM_KEY;
5325 inode_key.offset = 0;
5326
5327 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5328 struct btrfs_inode_extref *extref;
5329
5330 extref = (struct btrfs_inode_extref *)
5331 (ptr + cur_offset);
5332 inode_key.objectid = btrfs_inode_extref_parent(
5333 leaf, extref);
5334 cur_offset += sizeof(*extref);
5335 cur_offset += btrfs_inode_extref_name_len(leaf,
5336 extref);
5337 } else {
5338 inode_key.objectid = key.offset;
5339 cur_offset = item_size;
5340 }
5341
5342 dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
5343 root, NULL);
5344 /* If parent inode was deleted, skip it. */
5345 if (IS_ERR(dir_inode))
5346 continue;
5347
5348 if (ctx)
5349 ctx->log_new_dentries = false;
5350 ret = btrfs_log_inode(trans, root, dir_inode,
5351 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5352 if (!ret &&
5353 btrfs_must_commit_transaction(trans, dir_inode))
5354 ret = 1;
5355 if (!ret && ctx && ctx->log_new_dentries)
5356 ret = log_new_dir_dentries(trans, root,
5357 dir_inode, ctx);
5358 iput(dir_inode);
5359 if (ret)
5360 goto out;
5361 }
5362 path->slots[0]++;
5363 }
5364 ret = 0;
5365 out:
5366 btrfs_free_path(path);
5367 return ret;
5368 }
5369
5370 /*
5371 * helper function around btrfs_log_inode to make sure newly created
5372 * parent directories also end up in the log. A minimal inode and backref
5373 * only logging is done of any parent directories that are older than
5374 * the last committed transaction
5375 */
5376 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5377 struct btrfs_root *root, struct inode *inode,
5378 struct dentry *parent,
5379 const loff_t start,
5380 const loff_t end,
5381 int exists_only,
5382 struct btrfs_log_ctx *ctx)
5383 {
5384 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5385 struct super_block *sb;
5386 struct dentry *old_parent = NULL;
5387 int ret = 0;
5388 u64 last_committed = root->fs_info->last_trans_committed;
5389 bool log_dentries = false;
5390 struct inode *orig_inode = inode;
5391
5392 sb = inode->i_sb;
5393
5394 if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
5395 ret = 1;
5396 goto end_no_trans;
5397 }
5398
5399 /*
5400 * The prev transaction commit doesn't complete, we need do
5401 * full commit by ourselves.
5402 */
5403 if (root->fs_info->last_trans_log_full_commit >
5404 root->fs_info->last_trans_committed) {
5405 ret = 1;
5406 goto end_no_trans;
5407 }
5408
5409 if (root != BTRFS_I(inode)->root ||
5410 btrfs_root_refs(&root->root_item) == 0) {
5411 ret = 1;
5412 goto end_no_trans;
5413 }
5414
5415 ret = check_parent_dirs_for_sync(trans, inode, parent,
5416 sb, last_committed);
5417 if (ret)
5418 goto end_no_trans;
5419
5420 if (btrfs_inode_in_log(inode, trans->transid)) {
5421 ret = BTRFS_NO_LOG_SYNC;
5422 goto end_no_trans;
5423 }
5424
5425 ret = start_log_trans(trans, root, ctx);
5426 if (ret)
5427 goto end_no_trans;
5428
5429 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5430 if (ret)
5431 goto end_trans;
5432
5433 /*
5434 * for regular files, if its inode is already on disk, we don't
5435 * have to worry about the parents at all. This is because
5436 * we can use the last_unlink_trans field to record renames
5437 * and other fun in this file.
5438 */
5439 if (S_ISREG(inode->i_mode) &&
5440 BTRFS_I(inode)->generation <= last_committed &&
5441 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
5442 ret = 0;
5443 goto end_trans;
5444 }
5445
5446 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
5447 log_dentries = true;
5448
5449 /*
5450 * On unlink we must make sure all our current and old parent directory
5451 * inodes are fully logged. This is to prevent leaving dangling
5452 * directory index entries in directories that were our parents but are
5453 * not anymore. Not doing this results in old parent directory being
5454 * impossible to delete after log replay (rmdir will always fail with
5455 * error -ENOTEMPTY).
5456 *
5457 * Example 1:
5458 *
5459 * mkdir testdir
5460 * touch testdir/foo
5461 * ln testdir/foo testdir/bar
5462 * sync
5463 * unlink testdir/bar
5464 * xfs_io -c fsync testdir/foo
5465 * <power failure>
5466 * mount fs, triggers log replay
5467 *
5468 * If we don't log the parent directory (testdir), after log replay the
5469 * directory still has an entry pointing to the file inode using the bar
5470 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5471 * the file inode has a link count of 1.
5472 *
5473 * Example 2:
5474 *
5475 * mkdir testdir
5476 * touch foo
5477 * ln foo testdir/foo2
5478 * ln foo testdir/foo3
5479 * sync
5480 * unlink testdir/foo3
5481 * xfs_io -c fsync foo
5482 * <power failure>
5483 * mount fs, triggers log replay
5484 *
5485 * Similar as the first example, after log replay the parent directory
5486 * testdir still has an entry pointing to the inode file with name foo3
5487 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5488 * and has a link count of 2.
5489 */
5490 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
5491 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5492 if (ret)
5493 goto end_trans;
5494 }
5495
5496 while (1) {
5497 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5498 break;
5499
5500 inode = d_inode(parent);
5501 if (root != BTRFS_I(inode)->root)
5502 break;
5503
5504 if (BTRFS_I(inode)->generation > last_committed) {
5505 ret = btrfs_log_inode(trans, root, inode,
5506 LOG_INODE_EXISTS,
5507 0, LLONG_MAX, ctx);
5508 if (ret)
5509 goto end_trans;
5510 }
5511 if (IS_ROOT(parent))
5512 break;
5513
5514 parent = dget_parent(parent);
5515 dput(old_parent);
5516 old_parent = parent;
5517 }
5518 if (log_dentries)
5519 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5520 else
5521 ret = 0;
5522 end_trans:
5523 dput(old_parent);
5524 if (ret < 0) {
5525 btrfs_set_log_full_commit(root->fs_info, trans);
5526 ret = 1;
5527 }
5528
5529 if (ret)
5530 btrfs_remove_log_ctx(root, ctx);
5531 btrfs_end_log_trans(root);
5532 end_no_trans:
5533 return ret;
5534 }
5535
5536 /*
5537 * it is not safe to log dentry if the chunk root has added new
5538 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5539 * If this returns 1, you must commit the transaction to safely get your
5540 * data on disk.
5541 */
5542 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5543 struct btrfs_root *root, struct dentry *dentry,
5544 const loff_t start,
5545 const loff_t end,
5546 struct btrfs_log_ctx *ctx)
5547 {
5548 struct dentry *parent = dget_parent(dentry);
5549 int ret;
5550
5551 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
5552 start, end, 0, ctx);
5553 dput(parent);
5554
5555 return ret;
5556 }
5557
5558 /*
5559 * should be called during mount to recover any replay any log trees
5560 * from the FS
5561 */
5562 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5563 {
5564 int ret;
5565 struct btrfs_path *path;
5566 struct btrfs_trans_handle *trans;
5567 struct btrfs_key key;
5568 struct btrfs_key found_key;
5569 struct btrfs_key tmp_key;
5570 struct btrfs_root *log;
5571 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5572 struct walk_control wc = {
5573 .process_func = process_one_buffer,
5574 .stage = 0,
5575 };
5576
5577 path = btrfs_alloc_path();
5578 if (!path)
5579 return -ENOMEM;
5580
5581 fs_info->log_root_recovering = 1;
5582
5583 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5584 if (IS_ERR(trans)) {
5585 ret = PTR_ERR(trans);
5586 goto error;
5587 }
5588
5589 wc.trans = trans;
5590 wc.pin = 1;
5591
5592 ret = walk_log_tree(trans, log_root_tree, &wc);
5593 if (ret) {
5594 btrfs_handle_fs_error(fs_info, ret, "Failed to pin buffers while "
5595 "recovering log root tree.");
5596 goto error;
5597 }
5598
5599 again:
5600 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5601 key.offset = (u64)-1;
5602 key.type = BTRFS_ROOT_ITEM_KEY;
5603
5604 while (1) {
5605 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5606
5607 if (ret < 0) {
5608 btrfs_handle_fs_error(fs_info, ret,
5609 "Couldn't find tree log root.");
5610 goto error;
5611 }
5612 if (ret > 0) {
5613 if (path->slots[0] == 0)
5614 break;
5615 path->slots[0]--;
5616 }
5617 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5618 path->slots[0]);
5619 btrfs_release_path(path);
5620 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5621 break;
5622
5623 log = btrfs_read_fs_root(log_root_tree, &found_key);
5624 if (IS_ERR(log)) {
5625 ret = PTR_ERR(log);
5626 btrfs_handle_fs_error(fs_info, ret,
5627 "Couldn't read tree log root.");
5628 goto error;
5629 }
5630
5631 tmp_key.objectid = found_key.offset;
5632 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5633 tmp_key.offset = (u64)-1;
5634
5635 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5636 if (IS_ERR(wc.replay_dest)) {
5637 ret = PTR_ERR(wc.replay_dest);
5638 free_extent_buffer(log->node);
5639 free_extent_buffer(log->commit_root);
5640 kfree(log);
5641 btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root "
5642 "for tree log recovery.");
5643 goto error;
5644 }
5645
5646 wc.replay_dest->log_root = log;
5647 btrfs_record_root_in_trans(trans, wc.replay_dest);
5648 ret = walk_log_tree(trans, log, &wc);
5649
5650 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5651 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5652 path);
5653 }
5654
5655 key.offset = found_key.offset - 1;
5656 wc.replay_dest->log_root = NULL;
5657 free_extent_buffer(log->node);
5658 free_extent_buffer(log->commit_root);
5659 kfree(log);
5660
5661 if (ret)
5662 goto error;
5663
5664 if (found_key.offset == 0)
5665 break;
5666 }
5667 btrfs_release_path(path);
5668
5669 /* step one is to pin it all, step two is to replay just inodes */
5670 if (wc.pin) {
5671 wc.pin = 0;
5672 wc.process_func = replay_one_buffer;
5673 wc.stage = LOG_WALK_REPLAY_INODES;
5674 goto again;
5675 }
5676 /* step three is to replay everything */
5677 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5678 wc.stage++;
5679 goto again;
5680 }
5681
5682 btrfs_free_path(path);
5683
5684 /* step 4: commit the transaction, which also unpins the blocks */
5685 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
5686 if (ret)
5687 return ret;
5688
5689 free_extent_buffer(log_root_tree->node);
5690 log_root_tree->log_root = NULL;
5691 fs_info->log_root_recovering = 0;
5692 kfree(log_root_tree);
5693
5694 return 0;
5695 error:
5696 if (wc.trans)
5697 btrfs_end_transaction(wc.trans, fs_info->tree_root);
5698 btrfs_free_path(path);
5699 return ret;
5700 }
5701
5702 /*
5703 * there are some corner cases where we want to force a full
5704 * commit instead of allowing a directory to be logged.
5705 *
5706 * They revolve around files there were unlinked from the directory, and
5707 * this function updates the parent directory so that a full commit is
5708 * properly done if it is fsync'd later after the unlinks are done.
5709 *
5710 * Must be called before the unlink operations (updates to the subvolume tree,
5711 * inodes, etc) are done.
5712 */
5713 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5714 struct inode *dir, struct inode *inode,
5715 int for_rename)
5716 {
5717 /*
5718 * when we're logging a file, if it hasn't been renamed
5719 * or unlinked, and its inode is fully committed on disk,
5720 * we don't have to worry about walking up the directory chain
5721 * to log its parents.
5722 *
5723 * So, we use the last_unlink_trans field to put this transid
5724 * into the file. When the file is logged we check it and
5725 * don't log the parents if the file is fully on disk.
5726 */
5727 mutex_lock(&BTRFS_I(inode)->log_mutex);
5728 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5729 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5730
5731 /*
5732 * if this directory was already logged any new
5733 * names for this file/dir will get recorded
5734 */
5735 smp_mb();
5736 if (BTRFS_I(dir)->logged_trans == trans->transid)
5737 return;
5738
5739 /*
5740 * if the inode we're about to unlink was logged,
5741 * the log will be properly updated for any new names
5742 */
5743 if (BTRFS_I(inode)->logged_trans == trans->transid)
5744 return;
5745
5746 /*
5747 * when renaming files across directories, if the directory
5748 * there we're unlinking from gets fsync'd later on, there's
5749 * no way to find the destination directory later and fsync it
5750 * properly. So, we have to be conservative and force commits
5751 * so the new name gets discovered.
5752 */
5753 if (for_rename)
5754 goto record;
5755
5756 /* we can safely do the unlink without any special recording */
5757 return;
5758
5759 record:
5760 mutex_lock(&BTRFS_I(dir)->log_mutex);
5761 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5762 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5763 }
5764
5765 /*
5766 * Make sure that if someone attempts to fsync the parent directory of a deleted
5767 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5768 * that after replaying the log tree of the parent directory's root we will not
5769 * see the snapshot anymore and at log replay time we will not see any log tree
5770 * corresponding to the deleted snapshot's root, which could lead to replaying
5771 * it after replaying the log tree of the parent directory (which would replay
5772 * the snapshot delete operation).
5773 *
5774 * Must be called before the actual snapshot destroy operation (updates to the
5775 * parent root and tree of tree roots trees, etc) are done.
5776 */
5777 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5778 struct inode *dir)
5779 {
5780 mutex_lock(&BTRFS_I(dir)->log_mutex);
5781 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5782 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5783 }
5784
5785 /*
5786 * Call this after adding a new name for a file and it will properly
5787 * update the log to reflect the new name.
5788 *
5789 * It will return zero if all goes well, and it will return 1 if a
5790 * full transaction commit is required.
5791 */
5792 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5793 struct inode *inode, struct inode *old_dir,
5794 struct dentry *parent)
5795 {
5796 struct btrfs_root * root = BTRFS_I(inode)->root;
5797
5798 /*
5799 * this will force the logging code to walk the dentry chain
5800 * up for the file
5801 */
5802 if (S_ISREG(inode->i_mode))
5803 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5804
5805 /*
5806 * if this inode hasn't been logged and directory we're renaming it
5807 * from hasn't been logged, we don't need to log it
5808 */
5809 if (BTRFS_I(inode)->logged_trans <=
5810 root->fs_info->last_trans_committed &&
5811 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
5812 root->fs_info->last_trans_committed))
5813 return 0;
5814
5815 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5816 LLONG_MAX, 1, NULL);
5817 }
5818
This page took 0.154389 seconds and 5 git commands to generate.