2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
25 #include "transaction.h"
27 static int total_trans
= 0;
28 extern struct kmem_cache
*btrfs_trans_handle_cachep
;
29 extern struct kmem_cache
*btrfs_transaction_cachep
;
31 static struct workqueue_struct
*trans_wq
;
33 #define BTRFS_ROOT_TRANS_TAG 0
34 #define BTRFS_ROOT_DEFRAG_TAG 1
36 static noinline
void put_transaction(struct btrfs_transaction
*transaction
)
38 WARN_ON(transaction
->use_count
== 0);
39 transaction
->use_count
--;
40 if (transaction
->use_count
== 0) {
41 WARN_ON(total_trans
== 0);
43 list_del_init(&transaction
->list
);
44 memset(transaction
, 0, sizeof(*transaction
));
45 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
49 static noinline
int join_transaction(struct btrfs_root
*root
)
51 struct btrfs_transaction
*cur_trans
;
52 cur_trans
= root
->fs_info
->running_transaction
;
54 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
,
58 root
->fs_info
->generation
++;
59 root
->fs_info
->last_alloc
= 0;
60 root
->fs_info
->last_data_alloc
= 0;
61 cur_trans
->num_writers
= 1;
62 cur_trans
->num_joined
= 0;
63 cur_trans
->transid
= root
->fs_info
->generation
;
64 init_waitqueue_head(&cur_trans
->writer_wait
);
65 init_waitqueue_head(&cur_trans
->commit_wait
);
66 cur_trans
->in_commit
= 0;
67 cur_trans
->use_count
= 1;
68 cur_trans
->commit_done
= 0;
69 cur_trans
->start_time
= get_seconds();
70 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
71 list_add_tail(&cur_trans
->list
, &root
->fs_info
->trans_list
);
72 btrfs_ordered_inode_tree_init(&cur_trans
->ordered_inode_tree
);
73 extent_io_tree_init(&cur_trans
->dirty_pages
,
74 root
->fs_info
->btree_inode
->i_mapping
,
76 spin_lock(&root
->fs_info
->new_trans_lock
);
77 root
->fs_info
->running_transaction
= cur_trans
;
78 spin_unlock(&root
->fs_info
->new_trans_lock
);
80 cur_trans
->num_writers
++;
81 cur_trans
->num_joined
++;
87 static noinline
int record_root_in_trans(struct btrfs_root
*root
)
89 u64 running_trans_id
= root
->fs_info
->running_transaction
->transid
;
90 if (root
->ref_cows
&& root
->last_trans
< running_trans_id
) {
91 WARN_ON(root
== root
->fs_info
->extent_root
);
92 if (root
->root_item
.refs
!= 0) {
93 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
94 (unsigned long)root
->root_key
.objectid
,
95 BTRFS_ROOT_TRANS_TAG
);
96 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
97 (unsigned long)root
->root_key
.objectid
,
98 BTRFS_ROOT_DEFRAG_TAG
);
99 root
->commit_root
= root
->node
;
100 extent_buffer_get(root
->node
);
104 root
->last_trans
= running_trans_id
;
109 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
112 struct btrfs_trans_handle
*h
=
113 kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
116 mutex_lock(&root
->fs_info
->trans_mutex
);
117 ret
= join_transaction(root
);
120 record_root_in_trans(root
);
121 h
->transid
= root
->fs_info
->running_transaction
->transid
;
122 h
->transaction
= root
->fs_info
->running_transaction
;
123 h
->blocks_reserved
= num_blocks
;
125 h
->block_group
= NULL
;
126 h
->alloc_exclude_nr
= 0;
127 h
->alloc_exclude_start
= 0;
128 root
->fs_info
->running_transaction
->use_count
++;
129 mutex_unlock(&root
->fs_info
->trans_mutex
);
133 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
134 struct btrfs_root
*root
)
136 struct btrfs_transaction
*cur_trans
;
138 mutex_lock(&root
->fs_info
->trans_mutex
);
139 cur_trans
= root
->fs_info
->running_transaction
;
140 WARN_ON(cur_trans
!= trans
->transaction
);
141 WARN_ON(cur_trans
->num_writers
< 1);
142 cur_trans
->num_writers
--;
143 if (waitqueue_active(&cur_trans
->writer_wait
))
144 wake_up(&cur_trans
->writer_wait
);
145 put_transaction(cur_trans
);
146 mutex_unlock(&root
->fs_info
->trans_mutex
);
147 memset(trans
, 0, sizeof(*trans
));
148 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
153 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
154 struct btrfs_root
*root
)
159 struct extent_io_tree
*dirty_pages
;
161 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
166 if (!trans
|| !trans
->transaction
) {
167 return filemap_write_and_wait(btree_inode
->i_mapping
);
169 dirty_pages
= &trans
->transaction
->dirty_pages
;
171 ret
= find_first_extent_bit(dirty_pages
, 0, &start
, &end
,
175 clear_extent_dirty(dirty_pages
, start
, end
, GFP_NOFS
);
176 while(start
<= end
) {
177 index
= start
>> PAGE_CACHE_SHIFT
;
178 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
179 page
= find_lock_page(btree_inode
->i_mapping
, index
);
182 if (PageWriteback(page
)) {
184 wait_on_page_writeback(page
);
187 page_cache_release(page
);
191 err
= write_one_page(page
, 0);
194 page_cache_release(page
);
197 err
= filemap_fdatawait(btree_inode
->i_mapping
);
203 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
204 struct btrfs_root
*root
)
208 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
210 btrfs_write_dirty_block_groups(trans
, root
);
212 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
213 if (old_root_bytenr
== root
->node
->start
)
215 btrfs_set_root_bytenr(&root
->root_item
,
217 btrfs_set_root_level(&root
->root_item
,
218 btrfs_header_level(root
->node
));
219 ret
= btrfs_update_root(trans
, tree_root
,
223 btrfs_write_dirty_block_groups(trans
, root
);
228 int btrfs_commit_tree_roots(struct btrfs_trans_handle
*trans
,
229 struct btrfs_root
*root
)
231 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
232 struct list_head
*next
;
234 while(!list_empty(&fs_info
->dirty_cowonly_roots
)) {
235 next
= fs_info
->dirty_cowonly_roots
.next
;
237 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
238 update_cowonly_root(trans
, root
);
243 static noinline
int wait_for_commit(struct btrfs_root
*root
,
244 struct btrfs_transaction
*commit
)
247 mutex_lock(&root
->fs_info
->trans_mutex
);
248 while(!commit
->commit_done
) {
249 prepare_to_wait(&commit
->commit_wait
, &wait
,
250 TASK_UNINTERRUPTIBLE
);
251 if (commit
->commit_done
)
253 mutex_unlock(&root
->fs_info
->trans_mutex
);
255 mutex_lock(&root
->fs_info
->trans_mutex
);
257 mutex_unlock(&root
->fs_info
->trans_mutex
);
258 finish_wait(&commit
->commit_wait
, &wait
);
263 struct list_head list
;
264 struct btrfs_root
*root
;
265 struct btrfs_root
*latest_root
;
268 int btrfs_add_dead_root(struct btrfs_root
*root
,
269 struct btrfs_root
*latest
,
270 struct list_head
*dead_list
)
272 struct dirty_root
*dirty
;
274 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
278 dirty
->latest_root
= latest
;
279 list_add(&dirty
->list
, dead_list
);
283 static noinline
int add_dirty_roots(struct btrfs_trans_handle
*trans
,
284 struct radix_tree_root
*radix
,
285 struct list_head
*list
)
287 struct dirty_root
*dirty
;
288 struct btrfs_root
*gang
[8];
289 struct btrfs_root
*root
;
296 ret
= radix_tree_gang_lookup_tag(radix
, (void **)gang
, 0,
298 BTRFS_ROOT_TRANS_TAG
);
301 for (i
= 0; i
< ret
; i
++) {
303 radix_tree_tag_clear(radix
,
304 (unsigned long)root
->root_key
.objectid
,
305 BTRFS_ROOT_TRANS_TAG
);
306 if (root
->commit_root
== root
->node
) {
307 WARN_ON(root
->node
->start
!=
308 btrfs_root_bytenr(&root
->root_item
));
309 free_extent_buffer(root
->commit_root
);
310 root
->commit_root
= NULL
;
312 /* make sure to update the root on disk
313 * so we get any updates to the block used
316 err
= btrfs_update_root(trans
,
317 root
->fs_info
->tree_root
,
322 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
324 dirty
->root
= kmalloc(sizeof(*dirty
->root
), GFP_NOFS
);
325 BUG_ON(!dirty
->root
);
327 memset(&root
->root_item
.drop_progress
, 0,
328 sizeof(struct btrfs_disk_key
));
329 root
->root_item
.drop_level
= 0;
331 memcpy(dirty
->root
, root
, sizeof(*root
));
332 dirty
->root
->node
= root
->commit_root
;
333 dirty
->latest_root
= root
;
334 root
->commit_root
= NULL
;
336 root
->root_key
.offset
= root
->fs_info
->generation
;
337 btrfs_set_root_bytenr(&root
->root_item
,
339 btrfs_set_root_level(&root
->root_item
,
340 btrfs_header_level(root
->node
));
341 err
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
347 refs
= btrfs_root_refs(&dirty
->root
->root_item
);
348 btrfs_set_root_refs(&dirty
->root
->root_item
, refs
- 1);
349 err
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
350 &dirty
->root
->root_key
,
351 &dirty
->root
->root_item
);
355 list_add(&dirty
->list
, list
);
366 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
368 struct btrfs_fs_info
*info
= root
->fs_info
;
370 struct btrfs_trans_handle
*trans
;
373 if (root
->defrag_running
)
375 trans
= btrfs_start_transaction(root
, 1);
377 root
->defrag_running
= 1;
378 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
379 nr
= trans
->blocks_used
;
380 btrfs_end_transaction(trans
, root
);
381 mutex_unlock(&info
->fs_mutex
);
382 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
385 mutex_lock(&info
->fs_mutex
);
386 trans
= btrfs_start_transaction(root
, 1);
390 root
->defrag_running
= 0;
391 radix_tree_tag_clear(&info
->fs_roots_radix
,
392 (unsigned long)root
->root_key
.objectid
,
393 BTRFS_ROOT_DEFRAG_TAG
);
394 btrfs_end_transaction(trans
, root
);
398 int btrfs_defrag_dirty_roots(struct btrfs_fs_info
*info
)
400 struct btrfs_root
*gang
[1];
401 struct btrfs_root
*root
;
408 ret
= radix_tree_gang_lookup_tag(&info
->fs_roots_radix
,
411 BTRFS_ROOT_DEFRAG_TAG
);
414 for (i
= 0; i
< ret
; i
++) {
416 last
= root
->root_key
.objectid
+ 1;
417 btrfs_defrag_root(root
, 1);
420 btrfs_defrag_root(info
->extent_root
, 1);
424 static noinline
int drop_dirty_roots(struct btrfs_root
*tree_root
,
425 struct list_head
*list
)
427 struct dirty_root
*dirty
;
428 struct btrfs_trans_handle
*trans
;
435 while(!list_empty(list
)) {
436 struct btrfs_root
*root
;
438 mutex_lock(&tree_root
->fs_info
->fs_mutex
);
439 dirty
= list_entry(list
->next
, struct dirty_root
, list
);
440 list_del_init(&dirty
->list
);
442 num_bytes
= btrfs_root_used(&dirty
->root
->root_item
);
443 root
= dirty
->latest_root
;
444 root
->fs_info
->throttles
++;
447 trans
= btrfs_start_transaction(tree_root
, 1);
448 ret
= btrfs_drop_snapshot(trans
, dirty
->root
);
449 if (ret
!= -EAGAIN
) {
453 err
= btrfs_update_root(trans
,
455 &dirty
->root
->root_key
,
456 &dirty
->root
->root_item
);
459 nr
= trans
->blocks_used
;
460 ret
= btrfs_end_transaction(trans
, tree_root
);
462 mutex_unlock(&tree_root
->fs_info
->fs_mutex
);
463 btrfs_btree_balance_dirty(tree_root
, nr
);
465 mutex_lock(&tree_root
->fs_info
->fs_mutex
);
468 root
->fs_info
->throttles
--;
470 num_bytes
-= btrfs_root_used(&dirty
->root
->root_item
);
471 bytes_used
= btrfs_root_used(&root
->root_item
);
473 record_root_in_trans(root
);
474 btrfs_set_root_used(&root
->root_item
,
475 bytes_used
- num_bytes
);
477 ret
= btrfs_del_root(trans
, tree_root
, &dirty
->root
->root_key
);
482 nr
= trans
->blocks_used
;
483 ret
= btrfs_end_transaction(trans
, tree_root
);
486 free_extent_buffer(dirty
->root
->node
);
489 mutex_unlock(&tree_root
->fs_info
->fs_mutex
);
491 btrfs_btree_balance_dirty(tree_root
, nr
);
497 int btrfs_write_ordered_inodes(struct btrfs_trans_handle
*trans
,
498 struct btrfs_root
*root
)
500 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
502 u64 root_objectid
= 0;
506 root
->fs_info
->throttles
++;
508 ret
= btrfs_find_first_ordered_inode(
509 &cur_trans
->ordered_inode_tree
,
510 &root_objectid
, &objectid
, &inode
);
514 mutex_unlock(&root
->fs_info
->trans_mutex
);
515 mutex_unlock(&root
->fs_info
->fs_mutex
);
517 if (S_ISREG(inode
->i_mode
)) {
518 atomic_inc(&BTRFS_I(inode
)->ordered_writeback
);
519 filemap_fdatawrite(inode
->i_mapping
);
520 atomic_dec(&BTRFS_I(inode
)->ordered_writeback
);
524 mutex_lock(&root
->fs_info
->fs_mutex
);
525 mutex_lock(&root
->fs_info
->trans_mutex
);
530 ret
= btrfs_find_del_first_ordered_inode(
531 &cur_trans
->ordered_inode_tree
,
532 &root_objectid
, &objectid
, &inode
);
535 mutex_unlock(&root
->fs_info
->trans_mutex
);
536 mutex_unlock(&root
->fs_info
->fs_mutex
);
538 if (S_ISREG(inode
->i_mode
)) {
539 atomic_inc(&BTRFS_I(inode
)->ordered_writeback
);
540 filemap_write_and_wait(inode
->i_mapping
);
541 atomic_dec(&BTRFS_I(inode
)->ordered_writeback
);
543 atomic_dec(&inode
->i_count
);
546 mutex_lock(&root
->fs_info
->fs_mutex
);
547 mutex_lock(&root
->fs_info
->trans_mutex
);
549 root
->fs_info
->throttles
--;
553 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
554 struct btrfs_fs_info
*fs_info
,
555 struct btrfs_pending_snapshot
*pending
)
557 struct btrfs_key key
;
558 struct btrfs_root_item
*new_root_item
;
559 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
560 struct btrfs_root
*root
= pending
->root
;
561 struct extent_buffer
*tmp
;
565 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
566 if (!new_root_item
) {
570 ret
= btrfs_find_free_objectid(trans
, tree_root
, 0, &objectid
);
574 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
576 key
.objectid
= objectid
;
578 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
580 extent_buffer_get(root
->node
);
581 btrfs_cow_block(trans
, root
, root
->node
, NULL
, 0, &tmp
);
582 free_extent_buffer(tmp
);
584 btrfs_copy_root(trans
, root
, root
->node
, &tmp
, objectid
);
586 btrfs_set_root_bytenr(new_root_item
, tmp
->start
);
587 btrfs_set_root_level(new_root_item
, btrfs_header_level(tmp
));
588 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
, &key
,
590 free_extent_buffer(tmp
);
595 * insert the directory item
597 key
.offset
= (u64
)-1;
598 ret
= btrfs_insert_dir_item(trans
, root
->fs_info
->tree_root
,
599 pending
->name
, strlen(pending
->name
),
600 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
,
606 ret
= btrfs_insert_inode_ref(trans
, root
->fs_info
->tree_root
,
607 pending
->name
, strlen(pending
->name
), objectid
,
608 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
);
610 kfree(new_root_item
);
614 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
615 struct btrfs_fs_info
*fs_info
)
617 struct btrfs_pending_snapshot
*pending
;
618 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
621 while(!list_empty(head
)) {
622 pending
= list_entry(head
->next
,
623 struct btrfs_pending_snapshot
, list
);
624 ret
= create_pending_snapshot(trans
, fs_info
, pending
);
626 list_del(&pending
->list
);
627 kfree(pending
->name
);
633 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
634 struct btrfs_root
*root
)
636 unsigned long joined
= 0;
637 unsigned long timeout
= 1;
638 struct btrfs_transaction
*cur_trans
;
639 struct btrfs_transaction
*prev_trans
= NULL
;
640 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
641 struct list_head dirty_fs_roots
;
642 struct extent_io_tree
*pinned_copy
;
646 INIT_LIST_HEAD(&dirty_fs_roots
);
648 mutex_lock(&root
->fs_info
->trans_mutex
);
649 if (trans
->transaction
->in_commit
) {
650 cur_trans
= trans
->transaction
;
651 trans
->transaction
->use_count
++;
652 mutex_unlock(&root
->fs_info
->trans_mutex
);
653 btrfs_end_transaction(trans
, root
);
655 mutex_unlock(&root
->fs_info
->fs_mutex
);
656 ret
= wait_for_commit(root
, cur_trans
);
659 mutex_lock(&root
->fs_info
->trans_mutex
);
660 put_transaction(cur_trans
);
661 mutex_unlock(&root
->fs_info
->trans_mutex
);
663 mutex_lock(&root
->fs_info
->fs_mutex
);
667 pinned_copy
= kmalloc(sizeof(*pinned_copy
), GFP_NOFS
);
671 extent_io_tree_init(pinned_copy
,
672 root
->fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
674 trans
->transaction
->in_commit
= 1;
675 cur_trans
= trans
->transaction
;
676 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
677 prev_trans
= list_entry(cur_trans
->list
.prev
,
678 struct btrfs_transaction
, list
);
679 if (!prev_trans
->commit_done
) {
680 prev_trans
->use_count
++;
681 mutex_unlock(&root
->fs_info
->fs_mutex
);
682 mutex_unlock(&root
->fs_info
->trans_mutex
);
684 wait_for_commit(root
, prev_trans
);
686 mutex_lock(&root
->fs_info
->fs_mutex
);
687 mutex_lock(&root
->fs_info
->trans_mutex
);
688 put_transaction(prev_trans
);
693 joined
= cur_trans
->num_joined
;
694 WARN_ON(cur_trans
!= trans
->transaction
);
695 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
696 TASK_UNINTERRUPTIBLE
);
698 if (cur_trans
->num_writers
> 1)
699 timeout
= MAX_SCHEDULE_TIMEOUT
;
703 mutex_unlock(&root
->fs_info
->fs_mutex
);
704 mutex_unlock(&root
->fs_info
->trans_mutex
);
706 schedule_timeout(timeout
);
708 mutex_lock(&root
->fs_info
->fs_mutex
);
709 mutex_lock(&root
->fs_info
->trans_mutex
);
710 finish_wait(&cur_trans
->writer_wait
, &wait
);
711 ret
= btrfs_write_ordered_inodes(trans
, root
);
713 } while (cur_trans
->num_writers
> 1 ||
714 (cur_trans
->num_joined
!= joined
));
716 ret
= create_pending_snapshots(trans
, root
->fs_info
);
719 WARN_ON(cur_trans
!= trans
->transaction
);
721 ret
= add_dirty_roots(trans
, &root
->fs_info
->fs_roots_radix
,
725 ret
= btrfs_commit_tree_roots(trans
, root
);
728 cur_trans
= root
->fs_info
->running_transaction
;
729 spin_lock(&root
->fs_info
->new_trans_lock
);
730 root
->fs_info
->running_transaction
= NULL
;
731 spin_unlock(&root
->fs_info
->new_trans_lock
);
732 btrfs_set_super_generation(&root
->fs_info
->super_copy
,
734 btrfs_set_super_root(&root
->fs_info
->super_copy
,
735 root
->fs_info
->tree_root
->node
->start
);
736 btrfs_set_super_root_level(&root
->fs_info
->super_copy
,
737 btrfs_header_level(root
->fs_info
->tree_root
->node
));
739 btrfs_set_super_chunk_root(&root
->fs_info
->super_copy
,
740 chunk_root
->node
->start
);
741 btrfs_set_super_chunk_root_level(&root
->fs_info
->super_copy
,
742 btrfs_header_level(chunk_root
->node
));
743 memcpy(&root
->fs_info
->super_for_commit
, &root
->fs_info
->super_copy
,
744 sizeof(root
->fs_info
->super_copy
));
746 btrfs_copy_pinned(root
, pinned_copy
);
748 mutex_unlock(&root
->fs_info
->trans_mutex
);
749 mutex_unlock(&root
->fs_info
->fs_mutex
);
750 ret
= btrfs_write_and_wait_transaction(trans
, root
);
752 write_ctree_super(trans
, root
);
754 mutex_lock(&root
->fs_info
->fs_mutex
);
755 btrfs_finish_extent_commit(trans
, root
, pinned_copy
);
756 mutex_lock(&root
->fs_info
->trans_mutex
);
760 cur_trans
->commit_done
= 1;
761 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
762 wake_up(&cur_trans
->commit_wait
);
763 put_transaction(cur_trans
);
764 put_transaction(cur_trans
);
766 if (root
->fs_info
->closing
)
767 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_fs_roots
);
769 list_splice_init(&dirty_fs_roots
, &root
->fs_info
->dead_roots
);
771 mutex_unlock(&root
->fs_info
->trans_mutex
);
772 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
774 if (root
->fs_info
->closing
) {
775 mutex_unlock(&root
->fs_info
->fs_mutex
);
776 drop_dirty_roots(root
->fs_info
->tree_root
, &dirty_fs_roots
);
777 mutex_lock(&root
->fs_info
->fs_mutex
);
782 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
784 struct list_head dirty_roots
;
785 INIT_LIST_HEAD(&dirty_roots
);
787 mutex_lock(&root
->fs_info
->trans_mutex
);
788 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_roots
);
789 mutex_unlock(&root
->fs_info
->trans_mutex
);
791 if (!list_empty(&dirty_roots
)) {
792 drop_dirty_roots(root
, &dirty_roots
);
796 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
797 void btrfs_transaction_cleaner(void *p
)
799 void btrfs_transaction_cleaner(struct work_struct
*work
)
802 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
803 struct btrfs_fs_info
*fs_info
= p
;
805 struct btrfs_fs_info
*fs_info
= container_of(work
,
806 struct btrfs_fs_info
,
810 struct btrfs_root
*root
= fs_info
->tree_root
;
811 struct btrfs_transaction
*cur
;
812 struct btrfs_trans_handle
*trans
;
814 unsigned long delay
= HZ
* 30;
817 mutex_lock(&root
->fs_info
->fs_mutex
);
818 if (root
->fs_info
->closing
)
821 mutex_lock(&root
->fs_info
->trans_mutex
);
822 cur
= root
->fs_info
->running_transaction
;
824 mutex_unlock(&root
->fs_info
->trans_mutex
);
828 if (now
< cur
->start_time
|| now
- cur
->start_time
< 30) {
829 mutex_unlock(&root
->fs_info
->trans_mutex
);
833 mutex_unlock(&root
->fs_info
->trans_mutex
);
834 btrfs_defrag_dirty_roots(root
->fs_info
);
835 trans
= btrfs_start_transaction(root
, 1);
836 ret
= btrfs_commit_transaction(trans
, root
);
838 mutex_unlock(&root
->fs_info
->fs_mutex
);
839 btrfs_clean_old_snapshots(root
);
840 btrfs_transaction_queue_work(root
, delay
);
843 void btrfs_transaction_queue_work(struct btrfs_root
*root
, int delay
)
845 if (!root
->fs_info
->closing
)
846 queue_delayed_work(trans_wq
, &root
->fs_info
->trans_work
, delay
);
849 void btrfs_transaction_flush_work(struct btrfs_root
*root
)
851 cancel_delayed_work(&root
->fs_info
->trans_work
);
852 flush_workqueue(trans_wq
);
855 void __init
btrfs_init_transaction_sys(void)
857 trans_wq
= create_workqueue("btrfs-transaction");
860 void btrfs_exit_transaction_sys(void)
862 destroy_workqueue(trans_wq
);