Btrfs: Further reduce the concurrency penalty of defrag and drop_snapshot
[deliverable/linux.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include "ctree.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24
25 static int total_trans = 0;
26 extern struct kmem_cache *btrfs_trans_handle_cachep;
27 extern struct kmem_cache *btrfs_transaction_cachep;
28
29 static struct workqueue_struct *trans_wq;
30
31 #define BTRFS_ROOT_TRANS_TAG 0
32 #define BTRFS_ROOT_DEFRAG_TAG 1
33
34 static void put_transaction(struct btrfs_transaction *transaction)
35 {
36 WARN_ON(transaction->use_count == 0);
37 transaction->use_count--;
38 if (transaction->use_count == 0) {
39 WARN_ON(total_trans == 0);
40 total_trans--;
41 list_del_init(&transaction->list);
42 memset(transaction, 0, sizeof(*transaction));
43 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 }
45 }
46
47 static int join_transaction(struct btrfs_root *root)
48 {
49 struct btrfs_transaction *cur_trans;
50 cur_trans = root->fs_info->running_transaction;
51 if (!cur_trans) {
52 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
53 GFP_NOFS);
54 total_trans++;
55 BUG_ON(!cur_trans);
56 root->fs_info->generation++;
57 root->fs_info->running_transaction = cur_trans;
58 cur_trans->num_writers = 0;
59 cur_trans->transid = root->fs_info->generation;
60 init_waitqueue_head(&cur_trans->writer_wait);
61 init_waitqueue_head(&cur_trans->commit_wait);
62 cur_trans->in_commit = 0;
63 cur_trans->use_count = 1;
64 cur_trans->commit_done = 0;
65 cur_trans->start_time = get_seconds();
66 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
67 init_bit_radix(&cur_trans->dirty_pages);
68 }
69 cur_trans->num_writers++;
70 return 0;
71 }
72
73 static int record_root_in_trans(struct btrfs_root *root)
74 {
75 u64 running_trans_id = root->fs_info->running_transaction->transid;
76 if (root->ref_cows && root->last_trans < running_trans_id) {
77 WARN_ON(root == root->fs_info->extent_root);
78 if (root->root_item.refs != 0) {
79 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
80 (unsigned long)root->root_key.objectid,
81 BTRFS_ROOT_TRANS_TAG);
82 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
83 (unsigned long)root->root_key.objectid,
84 BTRFS_ROOT_DEFRAG_TAG);
85 root->commit_root = root->node;
86 get_bh(root->node);
87 } else {
88 WARN_ON(1);
89 }
90 root->last_trans = running_trans_id;
91 }
92 return 0;
93 }
94
95 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
96 int num_blocks)
97 {
98 struct btrfs_trans_handle *h =
99 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
100 int ret;
101
102 mutex_lock(&root->fs_info->trans_mutex);
103 ret = join_transaction(root);
104 BUG_ON(ret);
105
106 record_root_in_trans(root);
107 h->transid = root->fs_info->running_transaction->transid;
108 h->transaction = root->fs_info->running_transaction;
109 h->blocks_reserved = num_blocks;
110 h->blocks_used = 0;
111 h->block_group = NULL;
112 h->alloc_exclude_nr = 0;
113 h->alloc_exclude_start = 0;
114 root->fs_info->running_transaction->use_count++;
115 mutex_unlock(&root->fs_info->trans_mutex);
116 return h;
117 }
118
119 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
120 struct btrfs_root *root)
121 {
122 struct btrfs_transaction *cur_trans;
123
124 mutex_lock(&root->fs_info->trans_mutex);
125 cur_trans = root->fs_info->running_transaction;
126 WARN_ON(cur_trans != trans->transaction);
127 WARN_ON(cur_trans->num_writers < 1);
128 cur_trans->num_writers--;
129 if (waitqueue_active(&cur_trans->writer_wait))
130 wake_up(&cur_trans->writer_wait);
131 put_transaction(cur_trans);
132 mutex_unlock(&root->fs_info->trans_mutex);
133 memset(trans, 0, sizeof(*trans));
134 kmem_cache_free(btrfs_trans_handle_cachep, trans);
135 return 0;
136 }
137
138
139 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root)
141 {
142 unsigned long gang[16];
143 int ret;
144 int i;
145 int err;
146 int werr = 0;
147 struct page *page;
148 struct radix_tree_root *dirty_pages;
149 struct inode *btree_inode = root->fs_info->btree_inode;
150
151 if (!trans || !trans->transaction) {
152 return filemap_write_and_wait(btree_inode->i_mapping);
153 }
154 dirty_pages = &trans->transaction->dirty_pages;
155 while(1) {
156 ret = find_first_radix_bit(dirty_pages, gang,
157 0, ARRAY_SIZE(gang));
158 if (!ret)
159 break;
160 for (i = 0; i < ret; i++) {
161 /* FIXME EIO */
162 clear_radix_bit(dirty_pages, gang[i]);
163 page = find_lock_page(btree_inode->i_mapping,
164 gang[i]);
165 if (!page)
166 continue;
167 if (PageWriteback(page)) {
168 if (PageDirty(page))
169 wait_on_page_writeback(page);
170 else {
171 unlock_page(page);
172 page_cache_release(page);
173 continue;
174 }
175 }
176 err = write_one_page(page, 0);
177 if (err)
178 werr = err;
179 page_cache_release(page);
180 }
181 }
182 err = filemap_fdatawait(btree_inode->i_mapping);
183 if (err)
184 werr = err;
185 return werr;
186 }
187
188 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
189 struct btrfs_root *root)
190 {
191 int ret;
192 u64 old_extent_block;
193 struct btrfs_fs_info *fs_info = root->fs_info;
194 struct btrfs_root *tree_root = fs_info->tree_root;
195 struct btrfs_root *extent_root = fs_info->extent_root;
196
197 btrfs_write_dirty_block_groups(trans, extent_root);
198 while(1) {
199 old_extent_block = btrfs_root_blocknr(&extent_root->root_item);
200 if (old_extent_block == bh_blocknr(extent_root->node))
201 break;
202 btrfs_set_root_blocknr(&extent_root->root_item,
203 bh_blocknr(extent_root->node));
204 ret = btrfs_update_root(trans, tree_root,
205 &extent_root->root_key,
206 &extent_root->root_item);
207 BUG_ON(ret);
208 btrfs_write_dirty_block_groups(trans, extent_root);
209 }
210 return 0;
211 }
212
213 static int wait_for_commit(struct btrfs_root *root,
214 struct btrfs_transaction *commit)
215 {
216 DEFINE_WAIT(wait);
217 mutex_lock(&root->fs_info->trans_mutex);
218 while(!commit->commit_done) {
219 prepare_to_wait(&commit->commit_wait, &wait,
220 TASK_UNINTERRUPTIBLE);
221 if (commit->commit_done)
222 break;
223 mutex_unlock(&root->fs_info->trans_mutex);
224 schedule();
225 mutex_lock(&root->fs_info->trans_mutex);
226 }
227 mutex_unlock(&root->fs_info->trans_mutex);
228 finish_wait(&commit->commit_wait, &wait);
229 return 0;
230 }
231
232 struct dirty_root {
233 struct list_head list;
234 struct btrfs_root *root;
235 };
236
237 int btrfs_add_dead_root(struct btrfs_root *root, struct list_head *dead_list)
238 {
239 struct dirty_root *dirty;
240
241 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
242 if (!dirty)
243 return -ENOMEM;
244 dirty->root = root;
245 list_add(&dirty->list, dead_list);
246 return 0;
247 }
248
249 static int add_dirty_roots(struct btrfs_trans_handle *trans,
250 struct radix_tree_root *radix,
251 struct list_head *list)
252 {
253 struct dirty_root *dirty;
254 struct btrfs_root *gang[8];
255 struct btrfs_root *root;
256 int i;
257 int ret;
258 int err = 0;
259 u32 refs;
260
261 while(1) {
262 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
263 ARRAY_SIZE(gang),
264 BTRFS_ROOT_TRANS_TAG);
265 if (ret == 0)
266 break;
267 for (i = 0; i < ret; i++) {
268 root = gang[i];
269 radix_tree_tag_clear(radix,
270 (unsigned long)root->root_key.objectid,
271 BTRFS_ROOT_TRANS_TAG);
272 if (root->commit_root == root->node) {
273 WARN_ON(bh_blocknr(root->node) !=
274 btrfs_root_blocknr(&root->root_item));
275 brelse(root->commit_root);
276 root->commit_root = NULL;
277 continue;
278 }
279 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
280 BUG_ON(!dirty);
281 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
282 BUG_ON(!dirty->root);
283
284 memset(&root->root_item.drop_progress, 0,
285 sizeof(struct btrfs_disk_key));
286 root->root_item.drop_level = 0;
287
288 memcpy(dirty->root, root, sizeof(*root));
289 dirty->root->node = root->commit_root;
290 root->commit_root = NULL;
291
292 root->root_key.offset = root->fs_info->generation;
293 btrfs_set_root_blocknr(&root->root_item,
294 bh_blocknr(root->node));
295 err = btrfs_insert_root(trans, root->fs_info->tree_root,
296 &root->root_key,
297 &root->root_item);
298 if (err)
299 break;
300
301 refs = btrfs_root_refs(&dirty->root->root_item);
302 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
303 err = btrfs_update_root(trans, root->fs_info->tree_root,
304 &dirty->root->root_key,
305 &dirty->root->root_item);
306
307 BUG_ON(err);
308 if (refs == 1) {
309 list_add(&dirty->list, list);
310 } else {
311 WARN_ON(1);
312 kfree(dirty->root);
313 kfree(dirty);
314 }
315 }
316 }
317 return err;
318 }
319
320 int btrfs_defrag_dirty_roots(struct btrfs_fs_info *info)
321 {
322 struct btrfs_root *gang[1];
323 struct btrfs_root *root;
324 struct btrfs_root *tree_root = info->tree_root;
325 struct btrfs_trans_handle *trans;
326 int i;
327 int ret;
328 int err = 0;
329 u64 last = 0;
330
331 trans = btrfs_start_transaction(tree_root, 1);
332 while(1) {
333 ret = radix_tree_gang_lookup_tag(&info->fs_roots_radix,
334 (void **)gang, last,
335 ARRAY_SIZE(gang),
336 BTRFS_ROOT_DEFRAG_TAG);
337 if (ret == 0)
338 break;
339 for (i = 0; i < ret; i++) {
340 root = gang[i];
341 last = root->root_key.objectid + 1;
342 radix_tree_tag_clear(&info->fs_roots_radix,
343 (unsigned long)root->root_key.objectid,
344 BTRFS_ROOT_DEFRAG_TAG);
345 if (root->defrag_running)
346 continue;
347
348 while (1) {
349 mutex_lock(&root->fs_info->trans_mutex);
350 record_root_in_trans(root);
351 mutex_unlock(&root->fs_info->trans_mutex);
352
353 root->defrag_running = 1;
354 err = btrfs_defrag_leaves(trans, root, 1);
355 btrfs_end_transaction(trans, tree_root);
356 mutex_unlock(&info->fs_mutex);
357
358 btrfs_btree_balance_dirty(root);
359 cond_resched();
360
361 mutex_lock(&info->fs_mutex);
362 trans = btrfs_start_transaction(tree_root, 1);
363 if (err != -EAGAIN)
364 break;
365 }
366 root->defrag_running = 0;
367 radix_tree_tag_clear(&info->fs_roots_radix,
368 (unsigned long)root->root_key.objectid,
369 BTRFS_ROOT_DEFRAG_TAG);
370 }
371 }
372 btrfs_end_transaction(trans, tree_root);
373 return err;
374 }
375
376 static int drop_dirty_roots(struct btrfs_root *tree_root,
377 struct list_head *list)
378 {
379 struct dirty_root *dirty;
380 struct btrfs_trans_handle *trans;
381 int ret = 0;
382 int err;
383
384 while(!list_empty(list)) {
385 mutex_lock(&tree_root->fs_info->fs_mutex);
386 dirty = list_entry(list->next, struct dirty_root, list);
387 list_del_init(&dirty->list);
388
389 while(1) {
390 trans = btrfs_start_transaction(tree_root, 1);
391 ret = btrfs_drop_snapshot(trans, dirty->root);
392 if (ret != -EAGAIN) {
393 break;
394 }
395 err = btrfs_update_root(trans,
396 tree_root,
397 &dirty->root->root_key,
398 &dirty->root->root_item);
399 if (err)
400 ret = err;
401 ret = btrfs_end_transaction(trans, tree_root);
402 BUG_ON(ret);
403 mutex_unlock(&tree_root->fs_info->fs_mutex);
404
405 btrfs_btree_balance_dirty(tree_root);
406 schedule();
407
408 mutex_lock(&tree_root->fs_info->fs_mutex);
409 }
410 BUG_ON(ret);
411 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
412 if (ret)
413 break;
414 ret = btrfs_end_transaction(trans, tree_root);
415 BUG_ON(ret);
416
417 kfree(dirty->root);
418 kfree(dirty);
419 mutex_unlock(&tree_root->fs_info->fs_mutex);
420 btrfs_btree_balance_dirty(tree_root);
421 schedule();
422 }
423 return ret;
424 }
425
426 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
427 struct btrfs_root *root)
428 {
429 int ret = 0;
430 struct btrfs_transaction *cur_trans;
431 struct btrfs_transaction *prev_trans = NULL;
432 struct list_head dirty_fs_roots;
433 struct radix_tree_root pinned_copy;
434 DEFINE_WAIT(wait);
435
436 init_bit_radix(&pinned_copy);
437 INIT_LIST_HEAD(&dirty_fs_roots);
438
439 mutex_lock(&root->fs_info->trans_mutex);
440 if (trans->transaction->in_commit) {
441 cur_trans = trans->transaction;
442 trans->transaction->use_count++;
443 mutex_unlock(&root->fs_info->trans_mutex);
444 btrfs_end_transaction(trans, root);
445
446 mutex_unlock(&root->fs_info->fs_mutex);
447 ret = wait_for_commit(root, cur_trans);
448 BUG_ON(ret);
449 put_transaction(cur_trans);
450 mutex_lock(&root->fs_info->fs_mutex);
451 return 0;
452 }
453 trans->transaction->in_commit = 1;
454 cur_trans = trans->transaction;
455 if (cur_trans->list.prev != &root->fs_info->trans_list) {
456 prev_trans = list_entry(cur_trans->list.prev,
457 struct btrfs_transaction, list);
458 if (!prev_trans->commit_done) {
459 prev_trans->use_count++;
460 mutex_unlock(&root->fs_info->fs_mutex);
461 mutex_unlock(&root->fs_info->trans_mutex);
462
463 wait_for_commit(root, prev_trans);
464 put_transaction(prev_trans);
465
466 mutex_lock(&root->fs_info->fs_mutex);
467 mutex_lock(&root->fs_info->trans_mutex);
468 }
469 }
470 while (trans->transaction->num_writers > 1) {
471 WARN_ON(cur_trans != trans->transaction);
472 prepare_to_wait(&trans->transaction->writer_wait, &wait,
473 TASK_UNINTERRUPTIBLE);
474 if (trans->transaction->num_writers <= 1)
475 break;
476 mutex_unlock(&root->fs_info->fs_mutex);
477 mutex_unlock(&root->fs_info->trans_mutex);
478 schedule();
479 mutex_lock(&root->fs_info->fs_mutex);
480 mutex_lock(&root->fs_info->trans_mutex);
481 finish_wait(&trans->transaction->writer_wait, &wait);
482 }
483 finish_wait(&trans->transaction->writer_wait, &wait);
484 WARN_ON(cur_trans != trans->transaction);
485 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
486 &dirty_fs_roots);
487 BUG_ON(ret);
488
489 ret = btrfs_commit_tree_roots(trans, root);
490 BUG_ON(ret);
491
492 cur_trans = root->fs_info->running_transaction;
493 root->fs_info->running_transaction = NULL;
494 btrfs_set_super_generation(&root->fs_info->super_copy,
495 cur_trans->transid);
496 btrfs_set_super_root(&root->fs_info->super_copy,
497 bh_blocknr(root->fs_info->tree_root->node));
498 memcpy(root->fs_info->disk_super, &root->fs_info->super_copy,
499 sizeof(root->fs_info->super_copy));
500
501 btrfs_copy_pinned(root, &pinned_copy);
502
503 mutex_unlock(&root->fs_info->trans_mutex);
504 mutex_unlock(&root->fs_info->fs_mutex);
505 ret = btrfs_write_and_wait_transaction(trans, root);
506 BUG_ON(ret);
507 write_ctree_super(trans, root);
508 mutex_lock(&root->fs_info->fs_mutex);
509 btrfs_finish_extent_commit(trans, root, &pinned_copy);
510 mutex_lock(&root->fs_info->trans_mutex);
511 cur_trans->commit_done = 1;
512 wake_up(&cur_trans->commit_wait);
513 put_transaction(cur_trans);
514 put_transaction(cur_trans);
515 if (root->fs_info->closing)
516 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
517 else
518 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
519 mutex_unlock(&root->fs_info->trans_mutex);
520 kmem_cache_free(btrfs_trans_handle_cachep, trans);
521
522 if (root->fs_info->closing) {
523 mutex_unlock(&root->fs_info->fs_mutex);
524 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
525 mutex_lock(&root->fs_info->fs_mutex);
526 }
527 return ret;
528 }
529
530 void btrfs_transaction_cleaner(struct work_struct *work)
531 {
532 struct btrfs_fs_info *fs_info = container_of(work,
533 struct btrfs_fs_info,
534 trans_work.work);
535
536 struct btrfs_root *root = fs_info->tree_root;
537 struct btrfs_transaction *cur;
538 struct btrfs_trans_handle *trans;
539 struct list_head dirty_roots;
540 unsigned long now;
541 unsigned long delay = HZ * 30;
542 int ret;
543
544 INIT_LIST_HEAD(&dirty_roots);
545 mutex_lock(&root->fs_info->fs_mutex);
546 mutex_lock(&root->fs_info->trans_mutex);
547 cur = root->fs_info->running_transaction;
548 if (!cur) {
549 mutex_unlock(&root->fs_info->trans_mutex);
550 goto out;
551 }
552 now = get_seconds();
553 if (now < cur->start_time || now - cur->start_time < 30) {
554 mutex_unlock(&root->fs_info->trans_mutex);
555 delay = HZ * 5;
556 goto out;
557 }
558 mutex_unlock(&root->fs_info->trans_mutex);
559 btrfs_defrag_dirty_roots(root->fs_info);
560 trans = btrfs_start_transaction(root, 1);
561 ret = btrfs_commit_transaction(trans, root);
562 out:
563 mutex_unlock(&root->fs_info->fs_mutex);
564
565 mutex_lock(&root->fs_info->trans_mutex);
566 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
567 mutex_unlock(&root->fs_info->trans_mutex);
568
569 if (!list_empty(&dirty_roots)) {
570 drop_dirty_roots(root, &dirty_roots);
571 }
572 btrfs_transaction_queue_work(root, delay);
573 }
574
575 void btrfs_transaction_queue_work(struct btrfs_root *root, int delay)
576 {
577 queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
578 }
579
580 void btrfs_transaction_flush_work(struct btrfs_root *root)
581 {
582 cancel_rearming_delayed_workqueue(trans_wq, &root->fs_info->trans_work);
583 flush_workqueue(trans_wq);
584 }
585
586 void __init btrfs_init_transaction_sys(void)
587 {
588 trans_wq = create_workqueue("btrfs");
589 }
590
591 void __exit btrfs_exit_transaction_sys(void)
592 {
593 destroy_workqueue(trans_wq);
594 }
595
This page took 0.362174 seconds and 6 git commands to generate.