2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
25 #include "transaction.h"
27 #include "ref-cache.h"
29 static int total_trans
= 0;
30 extern struct kmem_cache
*btrfs_trans_handle_cachep
;
31 extern struct kmem_cache
*btrfs_transaction_cachep
;
33 #define BTRFS_ROOT_TRANS_TAG 0
36 struct list_head list
;
37 struct btrfs_root
*root
;
38 struct btrfs_root
*latest_root
;
39 struct btrfs_leaf_ref_tree ref_tree
;
42 static noinline
void put_transaction(struct btrfs_transaction
*transaction
)
44 WARN_ON(transaction
->use_count
== 0);
45 transaction
->use_count
--;
46 if (transaction
->use_count
== 0) {
47 WARN_ON(total_trans
== 0);
49 list_del_init(&transaction
->list
);
50 memset(transaction
, 0, sizeof(*transaction
));
51 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
55 static noinline
int join_transaction(struct btrfs_root
*root
)
57 struct btrfs_transaction
*cur_trans
;
58 cur_trans
= root
->fs_info
->running_transaction
;
60 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
,
64 root
->fs_info
->generation
++;
65 root
->fs_info
->last_alloc
= 0;
66 root
->fs_info
->last_data_alloc
= 0;
67 cur_trans
->num_writers
= 1;
68 cur_trans
->num_joined
= 0;
69 cur_trans
->transid
= root
->fs_info
->generation
;
70 init_waitqueue_head(&cur_trans
->writer_wait
);
71 init_waitqueue_head(&cur_trans
->commit_wait
);
72 cur_trans
->in_commit
= 0;
73 cur_trans
->blocked
= 0;
74 cur_trans
->use_count
= 1;
75 cur_trans
->commit_done
= 0;
76 cur_trans
->start_time
= get_seconds();
77 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
78 list_add_tail(&cur_trans
->list
, &root
->fs_info
->trans_list
);
79 extent_io_tree_init(&cur_trans
->dirty_pages
,
80 root
->fs_info
->btree_inode
->i_mapping
,
82 spin_lock(&root
->fs_info
->new_trans_lock
);
83 root
->fs_info
->running_transaction
= cur_trans
;
84 spin_unlock(&root
->fs_info
->new_trans_lock
);
86 cur_trans
->num_writers
++;
87 cur_trans
->num_joined
++;
93 static noinline
int record_root_in_trans(struct btrfs_root
*root
)
95 struct dirty_root
*dirty
;
96 u64 running_trans_id
= root
->fs_info
->running_transaction
->transid
;
97 if (root
->ref_cows
&& root
->last_trans
< running_trans_id
) {
98 WARN_ON(root
== root
->fs_info
->extent_root
);
99 if (root
->root_item
.refs
!= 0) {
100 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
101 (unsigned long)root
->root_key
.objectid
,
102 BTRFS_ROOT_TRANS_TAG
);
104 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
106 dirty
->root
= kmalloc(sizeof(*dirty
->root
), GFP_NOFS
);
107 BUG_ON(!dirty
->root
);
109 dirty
->latest_root
= root
;
110 INIT_LIST_HEAD(&dirty
->list
);
111 btrfs_leaf_ref_tree_init(&dirty
->ref_tree
);
112 dirty
->ref_tree
.generation
= running_trans_id
;
114 root
->commit_root
= btrfs_root_node(root
);
115 root
->ref_tree
= &dirty
->ref_tree
;
117 memcpy(dirty
->root
, root
, sizeof(*root
));
118 spin_lock_init(&dirty
->root
->node_lock
);
119 mutex_init(&dirty
->root
->objectid_mutex
);
120 dirty
->root
->node
= root
->commit_root
;
121 dirty
->root
->commit_root
= NULL
;
125 root
->last_trans
= running_trans_id
;
130 struct btrfs_trans_handle
*start_transaction(struct btrfs_root
*root
,
131 int num_blocks
, int join
)
133 struct btrfs_trans_handle
*h
=
134 kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
135 struct btrfs_transaction
*cur_trans
;
138 mutex_lock(&root
->fs_info
->trans_mutex
);
139 cur_trans
= root
->fs_info
->running_transaction
;
140 if (cur_trans
&& cur_trans
->blocked
&& !join
) {
142 cur_trans
->use_count
++;
144 prepare_to_wait(&root
->fs_info
->transaction_wait
, &wait
,
145 TASK_UNINTERRUPTIBLE
);
146 if (cur_trans
->blocked
) {
147 mutex_unlock(&root
->fs_info
->trans_mutex
);
149 mutex_lock(&root
->fs_info
->trans_mutex
);
150 finish_wait(&root
->fs_info
->transaction_wait
,
153 finish_wait(&root
->fs_info
->transaction_wait
,
158 put_transaction(cur_trans
);
160 ret
= join_transaction(root
);
163 record_root_in_trans(root
);
164 h
->transid
= root
->fs_info
->running_transaction
->transid
;
165 h
->transaction
= root
->fs_info
->running_transaction
;
166 h
->blocks_reserved
= num_blocks
;
168 h
->block_group
= NULL
;
169 h
->alloc_exclude_nr
= 0;
170 h
->alloc_exclude_start
= 0;
171 root
->fs_info
->running_transaction
->use_count
++;
172 mutex_unlock(&root
->fs_info
->trans_mutex
);
176 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
179 return start_transaction(root
, num_blocks
, 0);
181 struct btrfs_trans_handle
*btrfs_join_transaction(struct btrfs_root
*root
,
184 return start_transaction(root
, num_blocks
, 1);
187 static noinline
int wait_for_commit(struct btrfs_root
*root
,
188 struct btrfs_transaction
*commit
)
191 mutex_lock(&root
->fs_info
->trans_mutex
);
192 while(!commit
->commit_done
) {
193 prepare_to_wait(&commit
->commit_wait
, &wait
,
194 TASK_UNINTERRUPTIBLE
);
195 if (commit
->commit_done
)
197 mutex_unlock(&root
->fs_info
->trans_mutex
);
199 mutex_lock(&root
->fs_info
->trans_mutex
);
201 mutex_unlock(&root
->fs_info
->trans_mutex
);
202 finish_wait(&commit
->commit_wait
, &wait
);
206 static int __btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
207 struct btrfs_root
*root
, int throttle
)
209 struct btrfs_transaction
*cur_trans
;
211 mutex_lock(&root
->fs_info
->trans_mutex
);
212 cur_trans
= root
->fs_info
->running_transaction
;
213 WARN_ON(cur_trans
!= trans
->transaction
);
214 WARN_ON(cur_trans
->num_writers
< 1);
215 cur_trans
->num_writers
--;
217 if (waitqueue_active(&cur_trans
->writer_wait
))
218 wake_up(&cur_trans
->writer_wait
);
220 if (0 && cur_trans
->in_commit
&& throttle
) {
222 mutex_unlock(&root
->fs_info
->trans_mutex
);
223 prepare_to_wait(&root
->fs_info
->transaction_throttle
, &wait
,
224 TASK_UNINTERRUPTIBLE
);
226 finish_wait(&root
->fs_info
->transaction_throttle
, &wait
);
227 mutex_lock(&root
->fs_info
->trans_mutex
);
230 put_transaction(cur_trans
);
231 mutex_unlock(&root
->fs_info
->trans_mutex
);
232 memset(trans
, 0, sizeof(*trans
));
233 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
237 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
238 struct btrfs_root
*root
)
240 return __btrfs_end_transaction(trans
, root
, 0);
243 int btrfs_end_transaction_throttle(struct btrfs_trans_handle
*trans
,
244 struct btrfs_root
*root
)
246 return __btrfs_end_transaction(trans
, root
, 1);
250 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
251 struct btrfs_root
*root
)
256 struct extent_io_tree
*dirty_pages
;
258 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
263 if (!trans
|| !trans
->transaction
) {
264 return filemap_write_and_wait(btree_inode
->i_mapping
);
266 dirty_pages
= &trans
->transaction
->dirty_pages
;
268 ret
= find_first_extent_bit(dirty_pages
, 0, &start
, &end
,
272 clear_extent_dirty(dirty_pages
, start
, end
, GFP_NOFS
);
273 while(start
<= end
) {
274 index
= start
>> PAGE_CACHE_SHIFT
;
275 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
276 page
= find_lock_page(btree_inode
->i_mapping
, index
);
279 if (PageWriteback(page
)) {
281 wait_on_page_writeback(page
);
284 page_cache_release(page
);
288 err
= write_one_page(page
, 0);
291 page_cache_release(page
);
294 err
= filemap_fdatawait(btree_inode
->i_mapping
);
300 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
301 struct btrfs_root
*root
)
305 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
307 btrfs_write_dirty_block_groups(trans
, root
);
309 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
310 if (old_root_bytenr
== root
->node
->start
)
312 btrfs_set_root_bytenr(&root
->root_item
,
314 btrfs_set_root_level(&root
->root_item
,
315 btrfs_header_level(root
->node
));
316 ret
= btrfs_update_root(trans
, tree_root
,
320 btrfs_write_dirty_block_groups(trans
, root
);
325 int btrfs_commit_tree_roots(struct btrfs_trans_handle
*trans
,
326 struct btrfs_root
*root
)
328 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
329 struct list_head
*next
;
331 while(!list_empty(&fs_info
->dirty_cowonly_roots
)) {
332 next
= fs_info
->dirty_cowonly_roots
.next
;
334 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
335 update_cowonly_root(trans
, root
);
340 int btrfs_add_dead_root(struct btrfs_root
*root
,
341 struct btrfs_root
*latest
,
342 struct list_head
*dead_list
)
344 struct dirty_root
*dirty
;
346 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
349 btrfs_leaf_ref_tree_init(&dirty
->ref_tree
);
351 dirty
->latest_root
= latest
;
352 root
->ref_tree
= NULL
;
353 list_add(&dirty
->list
, dead_list
);
357 static noinline
int add_dirty_roots(struct btrfs_trans_handle
*trans
,
358 struct radix_tree_root
*radix
,
359 struct list_head
*list
)
361 struct dirty_root
*dirty
;
362 struct btrfs_root
*gang
[8];
363 struct btrfs_root
*root
;
370 ret
= radix_tree_gang_lookup_tag(radix
, (void **)gang
, 0,
372 BTRFS_ROOT_TRANS_TAG
);
375 for (i
= 0; i
< ret
; i
++) {
377 radix_tree_tag_clear(radix
,
378 (unsigned long)root
->root_key
.objectid
,
379 BTRFS_ROOT_TRANS_TAG
);
381 BUG_ON(!root
->ref_tree
);
382 dirty
= container_of(root
->ref_tree
, struct dirty_root
,
385 if (root
->commit_root
== root
->node
) {
386 WARN_ON(root
->node
->start
!=
387 btrfs_root_bytenr(&root
->root_item
));
389 BUG_ON(!btrfs_leaf_ref_tree_empty(
391 free_extent_buffer(root
->commit_root
);
392 root
->commit_root
= NULL
;
393 root
->ref_tree
= NULL
;
398 /* make sure to update the root on disk
399 * so we get any updates to the block used
402 err
= btrfs_update_root(trans
,
403 root
->fs_info
->tree_root
,
409 memset(&root
->root_item
.drop_progress
, 0,
410 sizeof(struct btrfs_disk_key
));
411 root
->root_item
.drop_level
= 0;
412 root
->commit_root
= NULL
;
413 root
->ref_tree
= NULL
;
414 root
->root_key
.offset
= root
->fs_info
->generation
;
415 btrfs_set_root_bytenr(&root
->root_item
,
417 btrfs_set_root_level(&root
->root_item
,
418 btrfs_header_level(root
->node
));
419 err
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
425 refs
= btrfs_root_refs(&dirty
->root
->root_item
);
426 btrfs_set_root_refs(&dirty
->root
->root_item
, refs
- 1);
427 err
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
428 &dirty
->root
->root_key
,
429 &dirty
->root
->root_item
);
433 list_add(&dirty
->list
, list
);
436 free_extent_buffer(dirty
->root
->node
);
445 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
447 struct btrfs_fs_info
*info
= root
->fs_info
;
449 struct btrfs_trans_handle
*trans
;
453 if (root
->defrag_running
)
455 trans
= btrfs_start_transaction(root
, 1);
457 root
->defrag_running
= 1;
458 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
459 nr
= trans
->blocks_used
;
460 btrfs_end_transaction(trans
, root
);
461 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
464 trans
= btrfs_start_transaction(root
, 1);
465 if (root
->fs_info
->closing
|| ret
!= -EAGAIN
)
468 root
->defrag_running
= 0;
470 btrfs_end_transaction(trans
, root
);
474 static noinline
int drop_dirty_roots(struct btrfs_root
*tree_root
,
475 struct list_head
*list
)
477 struct dirty_root
*dirty
;
478 struct btrfs_trans_handle
*trans
;
485 while(!list_empty(list
)) {
486 struct btrfs_root
*root
;
488 dirty
= list_entry(list
->next
, struct dirty_root
, list
);
489 list_del_init(&dirty
->list
);
491 num_bytes
= btrfs_root_used(&dirty
->root
->root_item
);
492 root
= dirty
->latest_root
;
493 atomic_inc(&root
->fs_info
->throttles
);
495 mutex_lock(&root
->fs_info
->drop_mutex
);
497 trans
= btrfs_start_transaction(tree_root
, 1);
498 ret
= btrfs_drop_snapshot(trans
, dirty
->root
);
499 if (ret
!= -EAGAIN
) {
503 err
= btrfs_update_root(trans
,
505 &dirty
->root
->root_key
,
506 &dirty
->root
->root_item
);
509 nr
= trans
->blocks_used
;
510 ret
= btrfs_end_transaction_throttle(trans
, tree_root
);
513 mutex_unlock(&root
->fs_info
->drop_mutex
);
514 btrfs_btree_balance_dirty(tree_root
, nr
);
516 mutex_lock(&root
->fs_info
->drop_mutex
);
519 atomic_dec(&root
->fs_info
->throttles
);
521 mutex_lock(&root
->fs_info
->alloc_mutex
);
522 num_bytes
-= btrfs_root_used(&dirty
->root
->root_item
);
523 bytes_used
= btrfs_root_used(&root
->root_item
);
525 record_root_in_trans(root
);
526 btrfs_set_root_used(&root
->root_item
,
527 bytes_used
- num_bytes
);
529 mutex_unlock(&root
->fs_info
->alloc_mutex
);
531 ret
= btrfs_del_root(trans
, tree_root
, &dirty
->root
->root_key
);
536 mutex_unlock(&root
->fs_info
->drop_mutex
);
538 nr
= trans
->blocks_used
;
539 ret
= btrfs_end_transaction(trans
, tree_root
);
542 btrfs_remove_leaf_refs(dirty
->root
);
544 free_extent_buffer(dirty
->root
->node
);
548 btrfs_btree_balance_dirty(tree_root
, nr
);
554 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
555 struct btrfs_fs_info
*fs_info
,
556 struct btrfs_pending_snapshot
*pending
)
558 struct btrfs_key key
;
559 struct btrfs_root_item
*new_root_item
;
560 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
561 struct btrfs_root
*root
= pending
->root
;
562 struct extent_buffer
*tmp
;
563 struct extent_buffer
*old
;
568 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
569 if (!new_root_item
) {
573 ret
= btrfs_find_free_objectid(trans
, tree_root
, 0, &objectid
);
577 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
579 key
.objectid
= objectid
;
581 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
583 old
= btrfs_lock_root_node(root
);
584 btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
);
586 btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
587 btrfs_tree_unlock(old
);
588 free_extent_buffer(old
);
590 btrfs_set_root_bytenr(new_root_item
, tmp
->start
);
591 btrfs_set_root_level(new_root_item
, btrfs_header_level(tmp
));
592 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
, &key
,
594 btrfs_tree_unlock(tmp
);
595 free_extent_buffer(tmp
);
600 * insert the directory item
602 key
.offset
= (u64
)-1;
603 namelen
= strlen(pending
->name
);
604 ret
= btrfs_insert_dir_item(trans
, root
->fs_info
->tree_root
,
605 pending
->name
, namelen
,
606 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
,
607 &key
, BTRFS_FT_DIR
, 0);
612 ret
= btrfs_insert_inode_ref(trans
, root
->fs_info
->tree_root
,
613 pending
->name
, strlen(pending
->name
), objectid
,
614 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
, 0);
616 /* Invalidate existing dcache entry for new snapshot. */
617 btrfs_invalidate_dcache_root(root
, pending
->name
, namelen
);
620 kfree(new_root_item
);
624 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
625 struct btrfs_fs_info
*fs_info
)
627 struct btrfs_pending_snapshot
*pending
;
628 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
631 while(!list_empty(head
)) {
632 pending
= list_entry(head
->next
,
633 struct btrfs_pending_snapshot
, list
);
634 ret
= create_pending_snapshot(trans
, fs_info
, pending
);
636 list_del(&pending
->list
);
637 kfree(pending
->name
);
643 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
644 struct btrfs_root
*root
)
646 unsigned long joined
= 0;
647 unsigned long timeout
= 1;
648 struct btrfs_transaction
*cur_trans
;
649 struct btrfs_transaction
*prev_trans
= NULL
;
650 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
651 struct list_head dirty_fs_roots
;
652 struct extent_io_tree
*pinned_copy
;
656 INIT_LIST_HEAD(&dirty_fs_roots
);
658 mutex_lock(&root
->fs_info
->trans_mutex
);
659 if (trans
->transaction
->in_commit
) {
660 cur_trans
= trans
->transaction
;
661 trans
->transaction
->use_count
++;
662 mutex_unlock(&root
->fs_info
->trans_mutex
);
663 btrfs_end_transaction(trans
, root
);
665 ret
= wait_for_commit(root
, cur_trans
);
668 mutex_lock(&root
->fs_info
->trans_mutex
);
669 put_transaction(cur_trans
);
670 mutex_unlock(&root
->fs_info
->trans_mutex
);
675 pinned_copy
= kmalloc(sizeof(*pinned_copy
), GFP_NOFS
);
679 extent_io_tree_init(pinned_copy
,
680 root
->fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
682 trans
->transaction
->in_commit
= 1;
683 trans
->transaction
->blocked
= 1;
684 cur_trans
= trans
->transaction
;
685 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
686 prev_trans
= list_entry(cur_trans
->list
.prev
,
687 struct btrfs_transaction
, list
);
688 if (!prev_trans
->commit_done
) {
689 prev_trans
->use_count
++;
690 mutex_unlock(&root
->fs_info
->trans_mutex
);
692 wait_for_commit(root
, prev_trans
);
694 mutex_lock(&root
->fs_info
->trans_mutex
);
695 put_transaction(prev_trans
);
700 joined
= cur_trans
->num_joined
;
701 WARN_ON(cur_trans
!= trans
->transaction
);
702 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
703 TASK_UNINTERRUPTIBLE
);
705 if (cur_trans
->num_writers
> 1)
706 timeout
= MAX_SCHEDULE_TIMEOUT
;
710 mutex_unlock(&root
->fs_info
->trans_mutex
);
712 schedule_timeout(timeout
);
714 mutex_lock(&root
->fs_info
->trans_mutex
);
715 finish_wait(&cur_trans
->writer_wait
, &wait
);
716 } while (cur_trans
->num_writers
> 1 ||
717 (cur_trans
->num_joined
!= joined
));
719 ret
= create_pending_snapshots(trans
, root
->fs_info
);
722 WARN_ON(cur_trans
!= trans
->transaction
);
724 ret
= add_dirty_roots(trans
, &root
->fs_info
->fs_roots_radix
,
728 spin_lock(&root
->fs_info
->ref_cache_lock
);
729 root
->fs_info
->running_ref_cache_size
= 0;
730 spin_unlock(&root
->fs_info
->ref_cache_lock
);
732 ret
= btrfs_commit_tree_roots(trans
, root
);
735 cur_trans
= root
->fs_info
->running_transaction
;
736 spin_lock(&root
->fs_info
->new_trans_lock
);
737 root
->fs_info
->running_transaction
= NULL
;
738 spin_unlock(&root
->fs_info
->new_trans_lock
);
739 btrfs_set_super_generation(&root
->fs_info
->super_copy
,
741 btrfs_set_super_root(&root
->fs_info
->super_copy
,
742 root
->fs_info
->tree_root
->node
->start
);
743 btrfs_set_super_root_level(&root
->fs_info
->super_copy
,
744 btrfs_header_level(root
->fs_info
->tree_root
->node
));
746 btrfs_set_super_chunk_root(&root
->fs_info
->super_copy
,
747 chunk_root
->node
->start
);
748 btrfs_set_super_chunk_root_level(&root
->fs_info
->super_copy
,
749 btrfs_header_level(chunk_root
->node
));
750 memcpy(&root
->fs_info
->super_for_commit
, &root
->fs_info
->super_copy
,
751 sizeof(root
->fs_info
->super_copy
));
753 btrfs_copy_pinned(root
, pinned_copy
);
755 trans
->transaction
->blocked
= 0;
756 wake_up(&root
->fs_info
->transaction_throttle
);
757 wake_up(&root
->fs_info
->transaction_wait
);
759 mutex_unlock(&root
->fs_info
->trans_mutex
);
760 ret
= btrfs_write_and_wait_transaction(trans
, root
);
762 write_ctree_super(trans
, root
);
764 btrfs_finish_extent_commit(trans
, root
, pinned_copy
);
765 mutex_lock(&root
->fs_info
->trans_mutex
);
769 cur_trans
->commit_done
= 1;
770 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
771 wake_up(&cur_trans
->commit_wait
);
772 put_transaction(cur_trans
);
773 put_transaction(cur_trans
);
775 if (root
->fs_info
->closing
)
776 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_fs_roots
);
778 list_splice_init(&dirty_fs_roots
, &root
->fs_info
->dead_roots
);
780 mutex_unlock(&root
->fs_info
->trans_mutex
);
781 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
783 if (root
->fs_info
->closing
) {
784 drop_dirty_roots(root
->fs_info
->tree_root
, &dirty_fs_roots
);
789 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
791 struct list_head dirty_roots
;
792 INIT_LIST_HEAD(&dirty_roots
);
794 mutex_lock(&root
->fs_info
->trans_mutex
);
795 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_roots
);
796 mutex_unlock(&root
->fs_info
->trans_mutex
);
798 if (!list_empty(&dirty_roots
)) {
799 drop_dirty_roots(root
, &dirty_roots
);