Btrfs: fix that error value is changed by mistake
[deliverable/linux.git] / fs / btrfs / ordered-data.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27
28 static u64 entry_end(struct btrfs_ordered_extent *entry)
29 {
30 if (entry->file_offset + entry->len < entry->file_offset)
31 return (u64)-1;
32 return entry->file_offset + entry->len;
33 }
34
35 /* returns NULL if the insertion worked, or it returns the node it did find
36 * in the tree
37 */
38 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 struct rb_node *node)
40 {
41 struct rb_node **p = &root->rb_node;
42 struct rb_node *parent = NULL;
43 struct btrfs_ordered_extent *entry;
44
45 while (*p) {
46 parent = *p;
47 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
48
49 if (file_offset < entry->file_offset)
50 p = &(*p)->rb_left;
51 else if (file_offset >= entry_end(entry))
52 p = &(*p)->rb_right;
53 else
54 return parent;
55 }
56
57 rb_link_node(node, parent, p);
58 rb_insert_color(node, root);
59 return NULL;
60 }
61
62 static void ordered_data_tree_panic(struct inode *inode, int errno,
63 u64 offset)
64 {
65 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
66 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
67 "%llu\n", (unsigned long long)offset);
68 }
69
70 /*
71 * look for a given offset in the tree, and if it can't be found return the
72 * first lesser offset
73 */
74 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
75 struct rb_node **prev_ret)
76 {
77 struct rb_node *n = root->rb_node;
78 struct rb_node *prev = NULL;
79 struct rb_node *test;
80 struct btrfs_ordered_extent *entry;
81 struct btrfs_ordered_extent *prev_entry = NULL;
82
83 while (n) {
84 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
85 prev = n;
86 prev_entry = entry;
87
88 if (file_offset < entry->file_offset)
89 n = n->rb_left;
90 else if (file_offset >= entry_end(entry))
91 n = n->rb_right;
92 else
93 return n;
94 }
95 if (!prev_ret)
96 return NULL;
97
98 while (prev && file_offset >= entry_end(prev_entry)) {
99 test = rb_next(prev);
100 if (!test)
101 break;
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103 rb_node);
104 if (file_offset < entry_end(prev_entry))
105 break;
106
107 prev = test;
108 }
109 if (prev)
110 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
111 rb_node);
112 while (prev && file_offset < entry_end(prev_entry)) {
113 test = rb_prev(prev);
114 if (!test)
115 break;
116 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
117 rb_node);
118 prev = test;
119 }
120 *prev_ret = prev;
121 return NULL;
122 }
123
124 /*
125 * helper to check if a given offset is inside a given entry
126 */
127 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
128 {
129 if (file_offset < entry->file_offset ||
130 entry->file_offset + entry->len <= file_offset)
131 return 0;
132 return 1;
133 }
134
135 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
136 u64 len)
137 {
138 if (file_offset + len <= entry->file_offset ||
139 entry->file_offset + entry->len <= file_offset)
140 return 0;
141 return 1;
142 }
143
144 /*
145 * look find the first ordered struct that has this offset, otherwise
146 * the first one less than this offset
147 */
148 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
149 u64 file_offset)
150 {
151 struct rb_root *root = &tree->tree;
152 struct rb_node *prev = NULL;
153 struct rb_node *ret;
154 struct btrfs_ordered_extent *entry;
155
156 if (tree->last) {
157 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
158 rb_node);
159 if (offset_in_entry(entry, file_offset))
160 return tree->last;
161 }
162 ret = __tree_search(root, file_offset, &prev);
163 if (!ret)
164 ret = prev;
165 if (ret)
166 tree->last = ret;
167 return ret;
168 }
169
170 /* allocate and add a new ordered_extent into the per-inode tree.
171 * file_offset is the logical offset in the file
172 *
173 * start is the disk block number of an extent already reserved in the
174 * extent allocation tree
175 *
176 * len is the length of the extent
177 *
178 * The tree is given a single reference on the ordered extent that was
179 * inserted.
180 */
181 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
182 u64 start, u64 len, u64 disk_len,
183 int type, int dio, int compress_type)
184 {
185 struct btrfs_ordered_inode_tree *tree;
186 struct rb_node *node;
187 struct btrfs_ordered_extent *entry;
188
189 tree = &BTRFS_I(inode)->ordered_tree;
190 entry = kzalloc(sizeof(*entry), GFP_NOFS);
191 if (!entry)
192 return -ENOMEM;
193
194 entry->file_offset = file_offset;
195 entry->start = start;
196 entry->len = len;
197 entry->disk_len = disk_len;
198 entry->bytes_left = len;
199 entry->inode = igrab(inode);
200 entry->compress_type = compress_type;
201 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
202 set_bit(type, &entry->flags);
203
204 if (dio)
205 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
206
207 /* one ref for the tree */
208 atomic_set(&entry->refs, 1);
209 init_waitqueue_head(&entry->wait);
210 INIT_LIST_HEAD(&entry->list);
211 INIT_LIST_HEAD(&entry->root_extent_list);
212
213 trace_btrfs_ordered_extent_add(inode, entry);
214
215 spin_lock_irq(&tree->lock);
216 node = tree_insert(&tree->tree, file_offset,
217 &entry->rb_node);
218 if (node)
219 ordered_data_tree_panic(inode, -EEXIST, file_offset);
220 spin_unlock_irq(&tree->lock);
221
222 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
223 list_add_tail(&entry->root_extent_list,
224 &BTRFS_I(inode)->root->fs_info->ordered_extents);
225 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
226
227 return 0;
228 }
229
230 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
231 u64 start, u64 len, u64 disk_len, int type)
232 {
233 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
234 disk_len, type, 0,
235 BTRFS_COMPRESS_NONE);
236 }
237
238 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
239 u64 start, u64 len, u64 disk_len, int type)
240 {
241 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
242 disk_len, type, 1,
243 BTRFS_COMPRESS_NONE);
244 }
245
246 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
247 u64 start, u64 len, u64 disk_len,
248 int type, int compress_type)
249 {
250 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
251 disk_len, type, 0,
252 compress_type);
253 }
254
255 /*
256 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
257 * when an ordered extent is finished. If the list covers more than one
258 * ordered extent, it is split across multiples.
259 */
260 void btrfs_add_ordered_sum(struct inode *inode,
261 struct btrfs_ordered_extent *entry,
262 struct btrfs_ordered_sum *sum)
263 {
264 struct btrfs_ordered_inode_tree *tree;
265
266 tree = &BTRFS_I(inode)->ordered_tree;
267 spin_lock_irq(&tree->lock);
268 list_add_tail(&sum->list, &entry->list);
269 spin_unlock_irq(&tree->lock);
270 }
271
272 /*
273 * this is used to account for finished IO across a given range
274 * of the file. The IO may span ordered extents. If
275 * a given ordered_extent is completely done, 1 is returned, otherwise
276 * 0.
277 *
278 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
279 * to make sure this function only returns 1 once for a given ordered extent.
280 *
281 * file_offset is updated to one byte past the range that is recorded as
282 * complete. This allows you to walk forward in the file.
283 */
284 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
285 struct btrfs_ordered_extent **cached,
286 u64 *file_offset, u64 io_size, int uptodate)
287 {
288 struct btrfs_ordered_inode_tree *tree;
289 struct rb_node *node;
290 struct btrfs_ordered_extent *entry = NULL;
291 int ret;
292 unsigned long flags;
293 u64 dec_end;
294 u64 dec_start;
295 u64 to_dec;
296
297 tree = &BTRFS_I(inode)->ordered_tree;
298 spin_lock_irqsave(&tree->lock, flags);
299 node = tree_search(tree, *file_offset);
300 if (!node) {
301 ret = 1;
302 goto out;
303 }
304
305 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
306 if (!offset_in_entry(entry, *file_offset)) {
307 ret = 1;
308 goto out;
309 }
310
311 dec_start = max(*file_offset, entry->file_offset);
312 dec_end = min(*file_offset + io_size, entry->file_offset +
313 entry->len);
314 *file_offset = dec_end;
315 if (dec_start > dec_end) {
316 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
317 (unsigned long long)dec_start,
318 (unsigned long long)dec_end);
319 }
320 to_dec = dec_end - dec_start;
321 if (to_dec > entry->bytes_left) {
322 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
323 (unsigned long long)entry->bytes_left,
324 (unsigned long long)to_dec);
325 }
326 entry->bytes_left -= to_dec;
327 if (!uptodate)
328 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
329
330 if (entry->bytes_left == 0)
331 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
332 else
333 ret = 1;
334 out:
335 if (!ret && cached && entry) {
336 *cached = entry;
337 atomic_inc(&entry->refs);
338 }
339 spin_unlock_irqrestore(&tree->lock, flags);
340 return ret == 0;
341 }
342
343 /*
344 * this is used to account for finished IO across a given range
345 * of the file. The IO should not span ordered extents. If
346 * a given ordered_extent is completely done, 1 is returned, otherwise
347 * 0.
348 *
349 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
350 * to make sure this function only returns 1 once for a given ordered extent.
351 */
352 int btrfs_dec_test_ordered_pending(struct inode *inode,
353 struct btrfs_ordered_extent **cached,
354 u64 file_offset, u64 io_size, int uptodate)
355 {
356 struct btrfs_ordered_inode_tree *tree;
357 struct rb_node *node;
358 struct btrfs_ordered_extent *entry = NULL;
359 unsigned long flags;
360 int ret;
361
362 tree = &BTRFS_I(inode)->ordered_tree;
363 spin_lock_irqsave(&tree->lock, flags);
364 if (cached && *cached) {
365 entry = *cached;
366 goto have_entry;
367 }
368
369 node = tree_search(tree, file_offset);
370 if (!node) {
371 ret = 1;
372 goto out;
373 }
374
375 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
376 have_entry:
377 if (!offset_in_entry(entry, file_offset)) {
378 ret = 1;
379 goto out;
380 }
381
382 if (io_size > entry->bytes_left) {
383 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
384 (unsigned long long)entry->bytes_left,
385 (unsigned long long)io_size);
386 }
387 entry->bytes_left -= io_size;
388 if (!uptodate)
389 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
390
391 if (entry->bytes_left == 0)
392 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
393 else
394 ret = 1;
395 out:
396 if (!ret && cached && entry) {
397 *cached = entry;
398 atomic_inc(&entry->refs);
399 }
400 spin_unlock_irqrestore(&tree->lock, flags);
401 return ret == 0;
402 }
403
404 /*
405 * used to drop a reference on an ordered extent. This will free
406 * the extent if the last reference is dropped
407 */
408 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
409 {
410 struct list_head *cur;
411 struct btrfs_ordered_sum *sum;
412
413 trace_btrfs_ordered_extent_put(entry->inode, entry);
414
415 if (atomic_dec_and_test(&entry->refs)) {
416 if (entry->inode)
417 btrfs_add_delayed_iput(entry->inode);
418 while (!list_empty(&entry->list)) {
419 cur = entry->list.next;
420 sum = list_entry(cur, struct btrfs_ordered_sum, list);
421 list_del(&sum->list);
422 kfree(sum);
423 }
424 kfree(entry);
425 }
426 }
427
428 /*
429 * remove an ordered extent from the tree. No references are dropped
430 * and waiters are woken up.
431 */
432 void btrfs_remove_ordered_extent(struct inode *inode,
433 struct btrfs_ordered_extent *entry)
434 {
435 struct btrfs_ordered_inode_tree *tree;
436 struct btrfs_root *root = BTRFS_I(inode)->root;
437 struct rb_node *node;
438
439 tree = &BTRFS_I(inode)->ordered_tree;
440 spin_lock_irq(&tree->lock);
441 node = &entry->rb_node;
442 rb_erase(node, &tree->tree);
443 tree->last = NULL;
444 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
445 spin_unlock_irq(&tree->lock);
446
447 spin_lock(&root->fs_info->ordered_extent_lock);
448 list_del_init(&entry->root_extent_list);
449
450 trace_btrfs_ordered_extent_remove(inode, entry);
451
452 /*
453 * we have no more ordered extents for this inode and
454 * no dirty pages. We can safely remove it from the
455 * list of ordered extents
456 */
457 if (RB_EMPTY_ROOT(&tree->tree) &&
458 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
459 list_del_init(&BTRFS_I(inode)->ordered_operations);
460 }
461 spin_unlock(&root->fs_info->ordered_extent_lock);
462 wake_up(&entry->wait);
463 }
464
465 /*
466 * wait for all the ordered extents in a root. This is done when balancing
467 * space between drives.
468 */
469 void btrfs_wait_ordered_extents(struct btrfs_root *root,
470 int nocow_only, int delay_iput)
471 {
472 struct list_head splice;
473 struct list_head *cur;
474 struct btrfs_ordered_extent *ordered;
475 struct inode *inode;
476
477 INIT_LIST_HEAD(&splice);
478
479 spin_lock(&root->fs_info->ordered_extent_lock);
480 list_splice_init(&root->fs_info->ordered_extents, &splice);
481 while (!list_empty(&splice)) {
482 cur = splice.next;
483 ordered = list_entry(cur, struct btrfs_ordered_extent,
484 root_extent_list);
485 if (nocow_only &&
486 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
487 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
488 list_move(&ordered->root_extent_list,
489 &root->fs_info->ordered_extents);
490 cond_resched_lock(&root->fs_info->ordered_extent_lock);
491 continue;
492 }
493
494 list_del_init(&ordered->root_extent_list);
495 atomic_inc(&ordered->refs);
496
497 /*
498 * the inode may be getting freed (in sys_unlink path).
499 */
500 inode = igrab(ordered->inode);
501
502 spin_unlock(&root->fs_info->ordered_extent_lock);
503
504 if (inode) {
505 btrfs_start_ordered_extent(inode, ordered, 1);
506 btrfs_put_ordered_extent(ordered);
507 if (delay_iput)
508 btrfs_add_delayed_iput(inode);
509 else
510 iput(inode);
511 } else {
512 btrfs_put_ordered_extent(ordered);
513 }
514
515 spin_lock(&root->fs_info->ordered_extent_lock);
516 }
517 spin_unlock(&root->fs_info->ordered_extent_lock);
518 }
519
520 /*
521 * this is used during transaction commit to write all the inodes
522 * added to the ordered operation list. These files must be fully on
523 * disk before the transaction commits.
524 *
525 * we have two modes here, one is to just start the IO via filemap_flush
526 * and the other is to wait for all the io. When we wait, we have an
527 * extra check to make sure the ordered operation list really is empty
528 * before we return
529 */
530 void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
531 {
532 struct btrfs_inode *btrfs_inode;
533 struct inode *inode;
534 struct list_head splice;
535
536 INIT_LIST_HEAD(&splice);
537
538 mutex_lock(&root->fs_info->ordered_operations_mutex);
539 spin_lock(&root->fs_info->ordered_extent_lock);
540 again:
541 list_splice_init(&root->fs_info->ordered_operations, &splice);
542
543 while (!list_empty(&splice)) {
544 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
545 ordered_operations);
546
547 inode = &btrfs_inode->vfs_inode;
548
549 list_del_init(&btrfs_inode->ordered_operations);
550
551 /*
552 * the inode may be getting freed (in sys_unlink path).
553 */
554 inode = igrab(inode);
555
556 if (!wait && inode) {
557 list_add_tail(&BTRFS_I(inode)->ordered_operations,
558 &root->fs_info->ordered_operations);
559 }
560 spin_unlock(&root->fs_info->ordered_extent_lock);
561
562 if (inode) {
563 if (wait)
564 btrfs_wait_ordered_range(inode, 0, (u64)-1);
565 else
566 filemap_flush(inode->i_mapping);
567 btrfs_add_delayed_iput(inode);
568 }
569
570 cond_resched();
571 spin_lock(&root->fs_info->ordered_extent_lock);
572 }
573 if (wait && !list_empty(&root->fs_info->ordered_operations))
574 goto again;
575
576 spin_unlock(&root->fs_info->ordered_extent_lock);
577 mutex_unlock(&root->fs_info->ordered_operations_mutex);
578 }
579
580 /*
581 * Used to start IO or wait for a given ordered extent to finish.
582 *
583 * If wait is one, this effectively waits on page writeback for all the pages
584 * in the extent, and it waits on the io completion code to insert
585 * metadata into the btree corresponding to the extent
586 */
587 void btrfs_start_ordered_extent(struct inode *inode,
588 struct btrfs_ordered_extent *entry,
589 int wait)
590 {
591 u64 start = entry->file_offset;
592 u64 end = start + entry->len - 1;
593
594 trace_btrfs_ordered_extent_start(inode, entry);
595
596 /*
597 * pages in the range can be dirty, clean or writeback. We
598 * start IO on any dirty ones so the wait doesn't stall waiting
599 * for pdflush to find them
600 */
601 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
602 filemap_fdatawrite_range(inode->i_mapping, start, end);
603 if (wait) {
604 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
605 &entry->flags));
606 }
607 }
608
609 /*
610 * Used to wait on ordered extents across a large range of bytes.
611 */
612 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
613 {
614 u64 end;
615 u64 orig_end;
616 struct btrfs_ordered_extent *ordered;
617 int found;
618
619 if (start + len < start) {
620 orig_end = INT_LIMIT(loff_t);
621 } else {
622 orig_end = start + len - 1;
623 if (orig_end > INT_LIMIT(loff_t))
624 orig_end = INT_LIMIT(loff_t);
625 }
626
627 /* start IO across the range first to instantiate any delalloc
628 * extents
629 */
630 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
631
632 /*
633 * So with compression we will find and lock a dirty page and clear the
634 * first one as dirty, setup an async extent, and immediately return
635 * with the entire range locked but with nobody actually marked with
636 * writeback. So we can't just filemap_write_and_wait_range() and
637 * expect it to work since it will just kick off a thread to do the
638 * actual work. So we need to call filemap_fdatawrite_range _again_
639 * since it will wait on the page lock, which won't be unlocked until
640 * after the pages have been marked as writeback and so we're good to go
641 * from there. We have to do this otherwise we'll miss the ordered
642 * extents and that results in badness. Please Josef, do not think you
643 * know better and pull this out at some point in the future, it is
644 * right and you are wrong.
645 */
646 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
647 &BTRFS_I(inode)->runtime_flags))
648 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
649
650 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
651
652 end = orig_end;
653 found = 0;
654 while (1) {
655 ordered = btrfs_lookup_first_ordered_extent(inode, end);
656 if (!ordered)
657 break;
658 if (ordered->file_offset > orig_end) {
659 btrfs_put_ordered_extent(ordered);
660 break;
661 }
662 if (ordered->file_offset + ordered->len < start) {
663 btrfs_put_ordered_extent(ordered);
664 break;
665 }
666 found++;
667 btrfs_start_ordered_extent(inode, ordered, 1);
668 end = ordered->file_offset;
669 btrfs_put_ordered_extent(ordered);
670 if (end == 0 || end == start)
671 break;
672 end--;
673 }
674 }
675
676 /*
677 * find an ordered extent corresponding to file_offset. return NULL if
678 * nothing is found, otherwise take a reference on the extent and return it
679 */
680 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
681 u64 file_offset)
682 {
683 struct btrfs_ordered_inode_tree *tree;
684 struct rb_node *node;
685 struct btrfs_ordered_extent *entry = NULL;
686
687 tree = &BTRFS_I(inode)->ordered_tree;
688 spin_lock_irq(&tree->lock);
689 node = tree_search(tree, file_offset);
690 if (!node)
691 goto out;
692
693 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
694 if (!offset_in_entry(entry, file_offset))
695 entry = NULL;
696 if (entry)
697 atomic_inc(&entry->refs);
698 out:
699 spin_unlock_irq(&tree->lock);
700 return entry;
701 }
702
703 /* Since the DIO code tries to lock a wide area we need to look for any ordered
704 * extents that exist in the range, rather than just the start of the range.
705 */
706 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
707 u64 file_offset,
708 u64 len)
709 {
710 struct btrfs_ordered_inode_tree *tree;
711 struct rb_node *node;
712 struct btrfs_ordered_extent *entry = NULL;
713
714 tree = &BTRFS_I(inode)->ordered_tree;
715 spin_lock_irq(&tree->lock);
716 node = tree_search(tree, file_offset);
717 if (!node) {
718 node = tree_search(tree, file_offset + len);
719 if (!node)
720 goto out;
721 }
722
723 while (1) {
724 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
725 if (range_overlaps(entry, file_offset, len))
726 break;
727
728 if (entry->file_offset >= file_offset + len) {
729 entry = NULL;
730 break;
731 }
732 entry = NULL;
733 node = rb_next(node);
734 if (!node)
735 break;
736 }
737 out:
738 if (entry)
739 atomic_inc(&entry->refs);
740 spin_unlock_irq(&tree->lock);
741 return entry;
742 }
743
744 /*
745 * lookup and return any extent before 'file_offset'. NULL is returned
746 * if none is found
747 */
748 struct btrfs_ordered_extent *
749 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
750 {
751 struct btrfs_ordered_inode_tree *tree;
752 struct rb_node *node;
753 struct btrfs_ordered_extent *entry = NULL;
754
755 tree = &BTRFS_I(inode)->ordered_tree;
756 spin_lock_irq(&tree->lock);
757 node = tree_search(tree, file_offset);
758 if (!node)
759 goto out;
760
761 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
762 atomic_inc(&entry->refs);
763 out:
764 spin_unlock_irq(&tree->lock);
765 return entry;
766 }
767
768 /*
769 * After an extent is done, call this to conditionally update the on disk
770 * i_size. i_size is updated to cover any fully written part of the file.
771 */
772 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
773 struct btrfs_ordered_extent *ordered)
774 {
775 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
776 u64 disk_i_size;
777 u64 new_i_size;
778 u64 i_size_test;
779 u64 i_size = i_size_read(inode);
780 struct rb_node *node;
781 struct rb_node *prev = NULL;
782 struct btrfs_ordered_extent *test;
783 int ret = 1;
784
785 if (ordered)
786 offset = entry_end(ordered);
787 else
788 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
789
790 spin_lock_irq(&tree->lock);
791 disk_i_size = BTRFS_I(inode)->disk_i_size;
792
793 /* truncate file */
794 if (disk_i_size > i_size) {
795 BTRFS_I(inode)->disk_i_size = i_size;
796 ret = 0;
797 goto out;
798 }
799
800 /*
801 * if the disk i_size is already at the inode->i_size, or
802 * this ordered extent is inside the disk i_size, we're done
803 */
804 if (disk_i_size == i_size || offset <= disk_i_size) {
805 goto out;
806 }
807
808 /*
809 * walk backward from this ordered extent to disk_i_size.
810 * if we find an ordered extent then we can't update disk i_size
811 * yet
812 */
813 if (ordered) {
814 node = rb_prev(&ordered->rb_node);
815 } else {
816 prev = tree_search(tree, offset);
817 /*
818 * we insert file extents without involving ordered struct,
819 * so there should be no ordered struct cover this offset
820 */
821 if (prev) {
822 test = rb_entry(prev, struct btrfs_ordered_extent,
823 rb_node);
824 BUG_ON(offset_in_entry(test, offset));
825 }
826 node = prev;
827 }
828 for (; node; node = rb_prev(node)) {
829 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
830
831 /* We treat this entry as if it doesnt exist */
832 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
833 continue;
834 if (test->file_offset + test->len <= disk_i_size)
835 break;
836 if (test->file_offset >= i_size)
837 break;
838 if (test->file_offset >= disk_i_size)
839 goto out;
840 }
841 new_i_size = min_t(u64, offset, i_size);
842
843 /*
844 * at this point, we know we can safely update i_size to at least
845 * the offset from this ordered extent. But, we need to
846 * walk forward and see if ios from higher up in the file have
847 * finished.
848 */
849 if (ordered) {
850 node = rb_next(&ordered->rb_node);
851 } else {
852 if (prev)
853 node = rb_next(prev);
854 else
855 node = rb_first(&tree->tree);
856 }
857
858 /*
859 * We are looking for an area between our current extent and the next
860 * ordered extent to update the i_size to. There are 3 cases here
861 *
862 * 1) We don't actually have anything and we can update to i_size.
863 * 2) We have stuff but they already did their i_size update so again we
864 * can just update to i_size.
865 * 3) We have an outstanding ordered extent so the most we can update
866 * our disk_i_size to is the start of the next offset.
867 */
868 i_size_test = i_size;
869 for (; node; node = rb_next(node)) {
870 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
871
872 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
873 continue;
874 if (test->file_offset > offset) {
875 i_size_test = test->file_offset;
876 break;
877 }
878 }
879
880 /*
881 * i_size_test is the end of a region after this ordered
882 * extent where there are no ordered extents, we can safely set
883 * disk_i_size to this.
884 */
885 if (i_size_test > offset)
886 new_i_size = min_t(u64, i_size_test, i_size);
887 BTRFS_I(inode)->disk_i_size = new_i_size;
888 ret = 0;
889 out:
890 /*
891 * We need to do this because we can't remove ordered extents until
892 * after the i_disk_size has been updated and then the inode has been
893 * updated to reflect the change, so we need to tell anybody who finds
894 * this ordered extent that we've already done all the real work, we
895 * just haven't completed all the other work.
896 */
897 if (ordered)
898 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
899 spin_unlock_irq(&tree->lock);
900 return ret;
901 }
902
903 /*
904 * search the ordered extents for one corresponding to 'offset' and
905 * try to find a checksum. This is used because we allow pages to
906 * be reclaimed before their checksum is actually put into the btree
907 */
908 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
909 u32 *sum)
910 {
911 struct btrfs_ordered_sum *ordered_sum;
912 struct btrfs_sector_sum *sector_sums;
913 struct btrfs_ordered_extent *ordered;
914 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
915 unsigned long num_sectors;
916 unsigned long i;
917 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
918 int ret = 1;
919
920 ordered = btrfs_lookup_ordered_extent(inode, offset);
921 if (!ordered)
922 return 1;
923
924 spin_lock_irq(&tree->lock);
925 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
926 if (disk_bytenr >= ordered_sum->bytenr) {
927 num_sectors = ordered_sum->len / sectorsize;
928 sector_sums = ordered_sum->sums;
929 for (i = 0; i < num_sectors; i++) {
930 if (sector_sums[i].bytenr == disk_bytenr) {
931 *sum = sector_sums[i].sum;
932 ret = 0;
933 goto out;
934 }
935 }
936 }
937 }
938 out:
939 spin_unlock_irq(&tree->lock);
940 btrfs_put_ordered_extent(ordered);
941 return ret;
942 }
943
944
945 /*
946 * add a given inode to the list of inodes that must be fully on
947 * disk before a transaction commit finishes.
948 *
949 * This basically gives us the ext3 style data=ordered mode, and it is mostly
950 * used to make sure renamed files are fully on disk.
951 *
952 * It is a noop if the inode is already fully on disk.
953 *
954 * If trans is not null, we'll do a friendly check for a transaction that
955 * is already flushing things and force the IO down ourselves.
956 */
957 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
958 struct btrfs_root *root, struct inode *inode)
959 {
960 u64 last_mod;
961
962 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
963
964 /*
965 * if this file hasn't been changed since the last transaction
966 * commit, we can safely return without doing anything
967 */
968 if (last_mod < root->fs_info->last_trans_committed)
969 return;
970
971 /*
972 * the transaction is already committing. Just start the IO and
973 * don't bother with all of this list nonsense
974 */
975 if (trans && root->fs_info->running_transaction->blocked) {
976 btrfs_wait_ordered_range(inode, 0, (u64)-1);
977 return;
978 }
979
980 spin_lock(&root->fs_info->ordered_extent_lock);
981 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
982 list_add_tail(&BTRFS_I(inode)->ordered_operations,
983 &root->fs_info->ordered_operations);
984 }
985 spin_unlock(&root->fs_info->ordered_extent_lock);
986 }
This page took 0.087322 seconds and 5 git commands to generate.