Btrfs: update hole handling v2
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include <linux/posix_acl.h>
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "compat.h"
50 #include "tree-log.h"
51 #include "ref-cache.h"
52 #include "compression.h"
53
54 struct btrfs_iget_args {
55 u64 ino;
56 struct btrfs_root *root;
57 };
58
59 static struct inode_operations btrfs_dir_inode_operations;
60 static struct inode_operations btrfs_symlink_inode_operations;
61 static struct inode_operations btrfs_dir_ro_inode_operations;
62 static struct inode_operations btrfs_special_inode_operations;
63 static struct inode_operations btrfs_file_inode_operations;
64 static struct address_space_operations btrfs_aops;
65 static struct address_space_operations btrfs_symlink_aops;
66 static struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
68
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_bit_radix_cachep;
73 struct kmem_cache *btrfs_path_cachep;
74
75 #define S_SHIFT 12
76 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
77 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
78 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
79 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
80 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
81 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
82 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
83 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
84 };
85
86 static void btrfs_truncate(struct inode *inode);
87 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
88
89 /*
90 * a very lame attempt at stopping writes when the FS is 85% full. There
91 * are countless ways this is incorrect, but it is better than nothing.
92 */
93 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
94 int for_del)
95 {
96 u64 total;
97 u64 used;
98 u64 thresh;
99 unsigned long flags;
100 int ret = 0;
101
102 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
103 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
104 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
105 if (for_del)
106 thresh = total * 90;
107 else
108 thresh = total * 85;
109
110 do_div(thresh, 100);
111
112 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
113 ret = -ENOSPC;
114 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
115 return ret;
116 }
117
118 /*
119 * this does all the hard work for inserting an inline extent into
120 * the btree. The caller should have done a btrfs_drop_extents so that
121 * no overlapping inline items exist in the btree
122 */
123 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
124 struct btrfs_root *root, struct inode *inode,
125 u64 start, size_t size, size_t compressed_size,
126 struct page **compressed_pages)
127 {
128 struct btrfs_key key;
129 struct btrfs_path *path;
130 struct extent_buffer *leaf;
131 struct page *page = NULL;
132 char *kaddr;
133 unsigned long ptr;
134 struct btrfs_file_extent_item *ei;
135 int err = 0;
136 int ret;
137 size_t cur_size = size;
138 size_t datasize;
139 unsigned long offset;
140 int use_compress = 0;
141
142 if (compressed_size && compressed_pages) {
143 use_compress = 1;
144 cur_size = compressed_size;
145 }
146
147 path = btrfs_alloc_path(); if (!path)
148 return -ENOMEM;
149
150 btrfs_set_trans_block_group(trans, inode);
151
152 key.objectid = inode->i_ino;
153 key.offset = start;
154 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
155 inode_add_bytes(inode, size);
156 datasize = btrfs_file_extent_calc_inline_size(cur_size);
157
158 inode_add_bytes(inode, size);
159 ret = btrfs_insert_empty_item(trans, root, path, &key,
160 datasize);
161 BUG_ON(ret);
162 if (ret) {
163 err = ret;
164 printk("got bad ret %d\n", ret);
165 goto fail;
166 }
167 leaf = path->nodes[0];
168 ei = btrfs_item_ptr(leaf, path->slots[0],
169 struct btrfs_file_extent_item);
170 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
171 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
172 btrfs_set_file_extent_encryption(leaf, ei, 0);
173 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
174 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
175 ptr = btrfs_file_extent_inline_start(ei);
176
177 if (use_compress) {
178 struct page *cpage;
179 int i = 0;
180 while(compressed_size > 0) {
181 cpage = compressed_pages[i];
182 cur_size = min(compressed_size,
183 PAGE_CACHE_SIZE);
184
185 kaddr = kmap(cpage);
186 write_extent_buffer(leaf, kaddr, ptr, cur_size);
187 kunmap(cpage);
188
189 i++;
190 ptr += cur_size;
191 compressed_size -= cur_size;
192 }
193 btrfs_set_file_extent_compression(leaf, ei,
194 BTRFS_COMPRESS_ZLIB);
195 } else {
196 page = find_get_page(inode->i_mapping,
197 start >> PAGE_CACHE_SHIFT);
198 btrfs_set_file_extent_compression(leaf, ei, 0);
199 kaddr = kmap_atomic(page, KM_USER0);
200 offset = start & (PAGE_CACHE_SIZE - 1);
201 write_extent_buffer(leaf, kaddr + offset, ptr, size);
202 kunmap_atomic(kaddr, KM_USER0);
203 page_cache_release(page);
204 }
205 btrfs_mark_buffer_dirty(leaf);
206 btrfs_free_path(path);
207
208 BTRFS_I(inode)->disk_i_size = inode->i_size;
209 btrfs_update_inode(trans, root, inode);
210 return 0;
211 fail:
212 btrfs_free_path(path);
213 return err;
214 }
215
216
217 /*
218 * conditionally insert an inline extent into the file. This
219 * does the checks required to make sure the data is small enough
220 * to fit as an inline extent.
221 */
222 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
223 struct btrfs_root *root,
224 struct inode *inode, u64 start, u64 end,
225 size_t compressed_size,
226 struct page **compressed_pages)
227 {
228 u64 isize = i_size_read(inode);
229 u64 actual_end = min(end + 1, isize);
230 u64 inline_len = actual_end - start;
231 u64 aligned_end = (end + root->sectorsize - 1) &
232 ~((u64)root->sectorsize - 1);
233 u64 hint_byte;
234 u64 data_len = inline_len;
235 int ret;
236
237 if (compressed_size)
238 data_len = compressed_size;
239
240 if (start > 0 ||
241 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
242 (!compressed_size &&
243 (actual_end & (root->sectorsize - 1)) == 0) ||
244 end + 1 < isize ||
245 data_len > root->fs_info->max_inline) {
246 return 1;
247 }
248
249 mutex_lock(&BTRFS_I(inode)->extent_mutex);
250 ret = btrfs_drop_extents(trans, root, inode, start,
251 aligned_end, aligned_end, &hint_byte);
252 BUG_ON(ret);
253
254 if (isize > actual_end)
255 inline_len = min_t(u64, isize, actual_end);
256 ret = insert_inline_extent(trans, root, inode, start,
257 inline_len, compressed_size,
258 compressed_pages);
259 BUG_ON(ret);
260 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
261 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
262 return 0;
263 }
264
265 /*
266 * when extent_io.c finds a delayed allocation range in the file,
267 * the call backs end up in this code. The basic idea is to
268 * allocate extents on disk for the range, and create ordered data structs
269 * in ram to track those extents.
270 *
271 * locked_page is the page that writepage had locked already. We use
272 * it to make sure we don't do extra locks or unlocks.
273 *
274 * *page_started is set to one if we unlock locked_page and do everything
275 * required to start IO on it. It may be clean and already done with
276 * IO when we return.
277 */
278 static int cow_file_range(struct inode *inode, struct page *locked_page,
279 u64 start, u64 end, int *page_started)
280 {
281 struct btrfs_root *root = BTRFS_I(inode)->root;
282 struct btrfs_trans_handle *trans;
283 u64 alloc_hint = 0;
284 u64 num_bytes;
285 unsigned long ram_size;
286 u64 orig_start;
287 u64 disk_num_bytes;
288 u64 cur_alloc_size;
289 u64 blocksize = root->sectorsize;
290 u64 actual_end;
291 struct btrfs_key ins;
292 struct extent_map *em;
293 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
294 int ret = 0;
295 struct page **pages = NULL;
296 unsigned long nr_pages;
297 unsigned long nr_pages_ret = 0;
298 unsigned long total_compressed = 0;
299 unsigned long total_in = 0;
300 unsigned long max_compressed = 128 * 1024;
301 unsigned long max_uncompressed = 256 * 1024;
302 int i;
303 int will_compress;
304
305 trans = btrfs_join_transaction(root, 1);
306 BUG_ON(!trans);
307 btrfs_set_trans_block_group(trans, inode);
308 orig_start = start;
309
310 /*
311 * compression made this loop a bit ugly, but the basic idea is to
312 * compress some pages but keep the total size of the compressed
313 * extent relatively small. If compression is off, this goto target
314 * is never used.
315 */
316 again:
317 will_compress = 0;
318 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
319 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
320
321 actual_end = min_t(u64, i_size_read(inode), end + 1);
322 total_compressed = actual_end - start;
323
324 /* we want to make sure that amount of ram required to uncompress
325 * an extent is reasonable, so we limit the total size in ram
326 * of a compressed extent to 256k
327 */
328 total_compressed = min(total_compressed, max_uncompressed);
329 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
330 num_bytes = max(blocksize, num_bytes);
331 disk_num_bytes = num_bytes;
332 total_in = 0;
333 ret = 0;
334
335 /* we do compression for mount -o compress and when the
336 * inode has not been flagged as nocompress
337 */
338 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
339 btrfs_test_opt(root, COMPRESS)) {
340 WARN_ON(pages);
341 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
342
343 /* we want to make sure the amount of IO required to satisfy
344 * a random read is reasonably small, so we limit the size
345 * of a compressed extent to 128k
346 */
347 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
348 total_compressed, pages,
349 nr_pages, &nr_pages_ret,
350 &total_in,
351 &total_compressed,
352 max_compressed);
353
354 if (!ret) {
355 unsigned long offset = total_compressed &
356 (PAGE_CACHE_SIZE - 1);
357 struct page *page = pages[nr_pages_ret - 1];
358 char *kaddr;
359
360 /* zero the tail end of the last page, we might be
361 * sending it down to disk
362 */
363 if (offset) {
364 kaddr = kmap_atomic(page, KM_USER0);
365 memset(kaddr + offset, 0,
366 PAGE_CACHE_SIZE - offset);
367 kunmap_atomic(kaddr, KM_USER0);
368 }
369 will_compress = 1;
370 }
371 }
372 if (start == 0) {
373 /* lets try to make an inline extent */
374 if (ret || total_in < (end - start + 1)) {
375 /* we didn't compress the entire range, try
376 * to make an uncompressed inline extent. This
377 * is almost sure to fail, but maybe inline sizes
378 * will get bigger later
379 */
380 ret = cow_file_range_inline(trans, root, inode,
381 start, end, 0, NULL);
382 } else {
383 ret = cow_file_range_inline(trans, root, inode,
384 start, end,
385 total_compressed, pages);
386 }
387 if (ret == 0) {
388 extent_clear_unlock_delalloc(inode,
389 &BTRFS_I(inode)->io_tree,
390 start, end, NULL,
391 1, 1, 1);
392 *page_started = 1;
393 ret = 0;
394 goto free_pages_out;
395 }
396 }
397
398 if (will_compress) {
399 /*
400 * we aren't doing an inline extent round the compressed size
401 * up to a block size boundary so the allocator does sane
402 * things
403 */
404 total_compressed = (total_compressed + blocksize - 1) &
405 ~(blocksize - 1);
406
407 /*
408 * one last check to make sure the compression is really a
409 * win, compare the page count read with the blocks on disk
410 */
411 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
412 ~(PAGE_CACHE_SIZE - 1);
413 if (total_compressed >= total_in) {
414 will_compress = 0;
415 } else {
416 disk_num_bytes = total_compressed;
417 num_bytes = total_in;
418 }
419 }
420 if (!will_compress && pages) {
421 /*
422 * the compression code ran but failed to make things smaller,
423 * free any pages it allocated and our page pointer array
424 */
425 for (i = 0; i < nr_pages_ret; i++) {
426 page_cache_release(pages[i]);
427 }
428 kfree(pages);
429 pages = NULL;
430 total_compressed = 0;
431 nr_pages_ret = 0;
432
433 /* flag the file so we don't compress in the future */
434 btrfs_set_flag(inode, NOCOMPRESS);
435 }
436
437 BUG_ON(disk_num_bytes >
438 btrfs_super_total_bytes(&root->fs_info->super_copy));
439
440 mutex_lock(&BTRFS_I(inode)->extent_mutex);
441 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
442 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
443
444 while(disk_num_bytes > 0) {
445 unsigned long min_bytes;
446
447 /*
448 * the max size of a compressed extent is pretty small,
449 * make the code a little less complex by forcing
450 * the allocator to find a whole compressed extent at once
451 */
452 if (will_compress)
453 min_bytes = disk_num_bytes;
454 else
455 min_bytes = root->sectorsize;
456
457 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
458 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
459 min_bytes, 0, alloc_hint,
460 (u64)-1, &ins, 1);
461 if (ret) {
462 WARN_ON(1);
463 goto free_pages_out_fail;
464 }
465 em = alloc_extent_map(GFP_NOFS);
466 em->start = start;
467
468 if (will_compress) {
469 ram_size = num_bytes;
470 em->len = num_bytes;
471 } else {
472 /* ramsize == disk size */
473 ram_size = ins.offset;
474 em->len = ins.offset;
475 }
476
477 em->block_start = ins.objectid;
478 em->block_len = ins.offset;
479 em->bdev = root->fs_info->fs_devices->latest_bdev;
480
481 mutex_lock(&BTRFS_I(inode)->extent_mutex);
482 set_bit(EXTENT_FLAG_PINNED, &em->flags);
483
484 if (will_compress)
485 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
486
487 while(1) {
488 spin_lock(&em_tree->lock);
489 ret = add_extent_mapping(em_tree, em);
490 spin_unlock(&em_tree->lock);
491 if (ret != -EEXIST) {
492 free_extent_map(em);
493 break;
494 }
495 btrfs_drop_extent_cache(inode, start,
496 start + ram_size - 1, 0);
497 }
498 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
499
500 cur_alloc_size = ins.offset;
501 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
502 ram_size, cur_alloc_size, 0,
503 will_compress);
504 BUG_ON(ret);
505
506 if (disk_num_bytes < cur_alloc_size) {
507 printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes,
508 cur_alloc_size);
509 break;
510 }
511
512 if (will_compress) {
513 /*
514 * we're doing compression, we and we need to
515 * submit the compressed extents down to the device.
516 *
517 * We lock down all the file pages, clearing their
518 * dirty bits and setting them writeback. Everyone
519 * that wants to modify the page will wait on the
520 * ordered extent above.
521 *
522 * The writeback bits on the file pages are
523 * cleared when the compressed pages are on disk
524 */
525 btrfs_end_transaction(trans, root);
526
527 if (start <= page_offset(locked_page) &&
528 page_offset(locked_page) < start + ram_size) {
529 *page_started = 1;
530 }
531
532 extent_clear_unlock_delalloc(inode,
533 &BTRFS_I(inode)->io_tree,
534 start,
535 start + ram_size - 1,
536 NULL, 1, 1, 0);
537
538 ret = btrfs_submit_compressed_write(inode, start,
539 ram_size, ins.objectid,
540 cur_alloc_size, pages,
541 nr_pages_ret);
542
543 BUG_ON(ret);
544 trans = btrfs_join_transaction(root, 1);
545 if (start + ram_size < end) {
546 start += ram_size;
547 alloc_hint = ins.objectid + ins.offset;
548 /* pages will be freed at end_bio time */
549 pages = NULL;
550 goto again;
551 } else {
552 /* we've written everything, time to go */
553 break;
554 }
555 }
556 /* we're not doing compressed IO, don't unlock the first
557 * page (which the caller expects to stay locked), don't
558 * clear any dirty bits and don't set any writeback bits
559 */
560 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
561 start, start + ram_size - 1,
562 locked_page, 0, 0, 0);
563 disk_num_bytes -= cur_alloc_size;
564 num_bytes -= cur_alloc_size;
565 alloc_hint = ins.objectid + ins.offset;
566 start += cur_alloc_size;
567 }
568
569 ret = 0;
570 out:
571 btrfs_end_transaction(trans, root);
572
573 return ret;
574
575 free_pages_out_fail:
576 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
577 start, end, locked_page, 0, 0, 0);
578 free_pages_out:
579 for (i = 0; i < nr_pages_ret; i++)
580 page_cache_release(pages[i]);
581 if (pages)
582 kfree(pages);
583
584 goto out;
585 }
586
587 /*
588 * when nowcow writeback call back. This checks for snapshots or COW copies
589 * of the extents that exist in the file, and COWs the file as required.
590 *
591 * If no cow copies or snapshots exist, we write directly to the existing
592 * blocks on disk
593 */
594 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
595 u64 start, u64 end, int *page_started)
596 {
597 u64 extent_start;
598 u64 extent_end;
599 u64 bytenr;
600 u64 loops = 0;
601 u64 total_fs_bytes;
602 struct btrfs_root *root = BTRFS_I(inode)->root;
603 struct btrfs_block_group_cache *block_group;
604 struct btrfs_trans_handle *trans;
605 struct extent_buffer *leaf;
606 int found_type;
607 struct btrfs_path *path;
608 struct btrfs_file_extent_item *item;
609 int ret;
610 int err = 0;
611 struct btrfs_key found_key;
612
613 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
614 path = btrfs_alloc_path();
615 BUG_ON(!path);
616 trans = btrfs_join_transaction(root, 1);
617 BUG_ON(!trans);
618 again:
619 ret = btrfs_lookup_file_extent(NULL, root, path,
620 inode->i_ino, start, 0);
621 if (ret < 0) {
622 err = ret;
623 goto out;
624 }
625
626 if (ret != 0) {
627 if (path->slots[0] == 0)
628 goto not_found;
629 path->slots[0]--;
630 }
631
632 leaf = path->nodes[0];
633 item = btrfs_item_ptr(leaf, path->slots[0],
634 struct btrfs_file_extent_item);
635
636 /* are we inside the extent that was found? */
637 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
638 found_type = btrfs_key_type(&found_key);
639 if (found_key.objectid != inode->i_ino ||
640 found_type != BTRFS_EXTENT_DATA_KEY)
641 goto not_found;
642
643 found_type = btrfs_file_extent_type(leaf, item);
644 extent_start = found_key.offset;
645 if (found_type == BTRFS_FILE_EXTENT_REG) {
646 u64 extent_num_bytes;
647
648 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
649 extent_end = extent_start + extent_num_bytes;
650 err = 0;
651
652 if (btrfs_file_extent_compression(leaf, item) ||
653 btrfs_file_extent_encryption(leaf,item) ||
654 btrfs_file_extent_other_encoding(leaf, item))
655 goto not_found;
656
657 if (loops && start != extent_start)
658 goto not_found;
659
660 if (start < extent_start || start >= extent_end)
661 goto not_found;
662
663 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
664 if (bytenr == 0)
665 goto not_found;
666
667 if (btrfs_cross_ref_exists(trans, root, &found_key, bytenr))
668 goto not_found;
669 /*
670 * we may be called by the resizer, make sure we're inside
671 * the limits of the FS
672 */
673 block_group = btrfs_lookup_block_group(root->fs_info,
674 bytenr);
675 if (!block_group || block_group->ro)
676 goto not_found;
677
678 bytenr += btrfs_file_extent_offset(leaf, item);
679 extent_num_bytes = min(end + 1, extent_end) - start;
680 ret = btrfs_add_ordered_extent(inode, start, bytenr,
681 extent_num_bytes,
682 extent_num_bytes, 1, 0);
683 if (ret) {
684 err = ret;
685 goto out;
686 }
687
688 btrfs_release_path(root, path);
689 start = extent_end;
690 if (start <= end) {
691 loops++;
692 goto again;
693 }
694 } else {
695 not_found:
696 btrfs_end_transaction(trans, root);
697 btrfs_free_path(path);
698 return cow_file_range(inode, locked_page, start, end,
699 page_started);
700 }
701 out:
702 WARN_ON(err);
703 btrfs_end_transaction(trans, root);
704 btrfs_free_path(path);
705 return err;
706 }
707
708 /*
709 * extent_io.c call back to do delayed allocation processing
710 */
711 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
712 u64 start, u64 end, int *page_started)
713 {
714 struct btrfs_root *root = BTRFS_I(inode)->root;
715 int ret;
716
717 if (btrfs_test_opt(root, NODATACOW) ||
718 btrfs_test_flag(inode, NODATACOW))
719 ret = run_delalloc_nocow(inode, locked_page, start, end,
720 page_started);
721 else
722 ret = cow_file_range(inode, locked_page, start, end,
723 page_started);
724
725 return ret;
726 }
727
728 /*
729 * extent_io.c set_bit_hook, used to track delayed allocation
730 * bytes in this file, and to maintain the list of inodes that
731 * have pending delalloc work to be done.
732 */
733 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
734 unsigned long old, unsigned long bits)
735 {
736 unsigned long flags;
737 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
738 struct btrfs_root *root = BTRFS_I(inode)->root;
739 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
740 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
741 root->fs_info->delalloc_bytes += end - start + 1;
742 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
743 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
744 &root->fs_info->delalloc_inodes);
745 }
746 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
747 }
748 return 0;
749 }
750
751 /*
752 * extent_io.c clear_bit_hook, see set_bit_hook for why
753 */
754 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
755 unsigned long old, unsigned long bits)
756 {
757 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
758 struct btrfs_root *root = BTRFS_I(inode)->root;
759 unsigned long flags;
760
761 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
762 if (end - start + 1 > root->fs_info->delalloc_bytes) {
763 printk("warning: delalloc account %Lu %Lu\n",
764 end - start + 1, root->fs_info->delalloc_bytes);
765 root->fs_info->delalloc_bytes = 0;
766 BTRFS_I(inode)->delalloc_bytes = 0;
767 } else {
768 root->fs_info->delalloc_bytes -= end - start + 1;
769 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
770 }
771 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
772 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
773 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
774 }
775 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
776 }
777 return 0;
778 }
779
780 /*
781 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
782 * we don't create bios that span stripes or chunks
783 */
784 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
785 size_t size, struct bio *bio,
786 unsigned long bio_flags)
787 {
788 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
789 struct btrfs_mapping_tree *map_tree;
790 u64 logical = (u64)bio->bi_sector << 9;
791 u64 length = 0;
792 u64 map_length;
793 int ret;
794
795 length = bio->bi_size;
796 map_tree = &root->fs_info->mapping_tree;
797 map_length = length;
798 ret = btrfs_map_block(map_tree, READ, logical,
799 &map_length, NULL, 0);
800
801 if (map_length < length + size) {
802 return 1;
803 }
804 return 0;
805 }
806
807 /*
808 * in order to insert checksums into the metadata in large chunks,
809 * we wait until bio submission time. All the pages in the bio are
810 * checksummed and sums are attached onto the ordered extent record.
811 *
812 * At IO completion time the cums attached on the ordered extent record
813 * are inserted into the btree
814 */
815 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
816 int mirror_num, unsigned long bio_flags)
817 {
818 struct btrfs_root *root = BTRFS_I(inode)->root;
819 int ret = 0;
820
821 ret = btrfs_csum_one_bio(root, inode, bio);
822 BUG_ON(ret);
823
824 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
825 }
826
827 /*
828 * extent_io.c submission hook. This does the right thing for csum calculation on write,
829 * or reading the csums from the tree before a read
830 */
831 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
832 int mirror_num, unsigned long bio_flags)
833 {
834 struct btrfs_root *root = BTRFS_I(inode)->root;
835 int ret = 0;
836 int skip_sum;
837
838 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
839 BUG_ON(ret);
840
841 skip_sum = btrfs_test_opt(root, NODATASUM) ||
842 btrfs_test_flag(inode, NODATASUM);
843
844 if (!(rw & (1 << BIO_RW))) {
845 if (!skip_sum)
846 btrfs_lookup_bio_sums(root, inode, bio);
847
848 if (bio_flags & EXTENT_BIO_COMPRESSED)
849 return btrfs_submit_compressed_read(inode, bio,
850 mirror_num, bio_flags);
851 goto mapit;
852 } else if (!skip_sum) {
853 /* we're doing a write, do the async checksumming */
854 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
855 inode, rw, bio, mirror_num,
856 bio_flags, __btrfs_submit_bio_hook);
857 }
858
859 mapit:
860 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
861 }
862
863 /*
864 * given a list of ordered sums record them in the inode. This happens
865 * at IO completion time based on sums calculated at bio submission time.
866 */
867 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
868 struct inode *inode, u64 file_offset,
869 struct list_head *list)
870 {
871 struct list_head *cur;
872 struct btrfs_ordered_sum *sum;
873
874 btrfs_set_trans_block_group(trans, inode);
875 list_for_each(cur, list) {
876 sum = list_entry(cur, struct btrfs_ordered_sum, list);
877 btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root,
878 inode, sum);
879 }
880 return 0;
881 }
882
883 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
884 {
885 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
886 GFP_NOFS);
887 }
888
889 /* see btrfs_writepage_start_hook for details on why this is required */
890 struct btrfs_writepage_fixup {
891 struct page *page;
892 struct btrfs_work work;
893 };
894
895 void btrfs_writepage_fixup_worker(struct btrfs_work *work)
896 {
897 struct btrfs_writepage_fixup *fixup;
898 struct btrfs_ordered_extent *ordered;
899 struct page *page;
900 struct inode *inode;
901 u64 page_start;
902 u64 page_end;
903
904 fixup = container_of(work, struct btrfs_writepage_fixup, work);
905 page = fixup->page;
906 again:
907 lock_page(page);
908 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
909 ClearPageChecked(page);
910 goto out_page;
911 }
912
913 inode = page->mapping->host;
914 page_start = page_offset(page);
915 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
916
917 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
918
919 /* already ordered? We're done */
920 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
921 EXTENT_ORDERED, 0)) {
922 goto out;
923 }
924
925 ordered = btrfs_lookup_ordered_extent(inode, page_start);
926 if (ordered) {
927 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
928 page_end, GFP_NOFS);
929 unlock_page(page);
930 btrfs_start_ordered_extent(inode, ordered, 1);
931 goto again;
932 }
933
934 btrfs_set_extent_delalloc(inode, page_start, page_end);
935 ClearPageChecked(page);
936 out:
937 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
938 out_page:
939 unlock_page(page);
940 page_cache_release(page);
941 }
942
943 /*
944 * There are a few paths in the higher layers of the kernel that directly
945 * set the page dirty bit without asking the filesystem if it is a
946 * good idea. This causes problems because we want to make sure COW
947 * properly happens and the data=ordered rules are followed.
948 *
949 * In our case any range that doesn't have the ORDERED bit set
950 * hasn't been properly setup for IO. We kick off an async process
951 * to fix it up. The async helper will wait for ordered extents, set
952 * the delalloc bit and make it safe to write the page.
953 */
954 int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
955 {
956 struct inode *inode = page->mapping->host;
957 struct btrfs_writepage_fixup *fixup;
958 struct btrfs_root *root = BTRFS_I(inode)->root;
959 int ret;
960
961 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
962 EXTENT_ORDERED, 0);
963 if (ret)
964 return 0;
965
966 if (PageChecked(page))
967 return -EAGAIN;
968
969 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
970 if (!fixup)
971 return -EAGAIN;
972
973 SetPageChecked(page);
974 page_cache_get(page);
975 fixup->work.func = btrfs_writepage_fixup_worker;
976 fixup->page = page;
977 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
978 return -EAGAIN;
979 }
980
981 /* as ordered data IO finishes, this gets called so we can finish
982 * an ordered extent if the range of bytes in the file it covers are
983 * fully written.
984 */
985 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
986 {
987 struct btrfs_root *root = BTRFS_I(inode)->root;
988 struct btrfs_trans_handle *trans;
989 struct btrfs_ordered_extent *ordered_extent;
990 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
991 struct btrfs_file_extent_item *extent_item;
992 struct btrfs_path *path = NULL;
993 struct extent_buffer *leaf;
994 u64 alloc_hint = 0;
995 struct list_head list;
996 struct btrfs_key ins;
997 int ret;
998
999 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1000 if (!ret)
1001 return 0;
1002
1003 trans = btrfs_join_transaction(root, 1);
1004
1005 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1006 BUG_ON(!ordered_extent);
1007 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1008 goto nocow;
1009
1010 path = btrfs_alloc_path();
1011 BUG_ON(!path);
1012
1013 lock_extent(io_tree, ordered_extent->file_offset,
1014 ordered_extent->file_offset + ordered_extent->len - 1,
1015 GFP_NOFS);
1016
1017 INIT_LIST_HEAD(&list);
1018
1019 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1020
1021 ret = btrfs_drop_extents(trans, root, inode,
1022 ordered_extent->file_offset,
1023 ordered_extent->file_offset +
1024 ordered_extent->len,
1025 ordered_extent->file_offset, &alloc_hint);
1026 BUG_ON(ret);
1027
1028 ins.objectid = inode->i_ino;
1029 ins.offset = ordered_extent->file_offset;
1030 ins.type = BTRFS_EXTENT_DATA_KEY;
1031 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1032 sizeof(*extent_item));
1033 BUG_ON(ret);
1034 leaf = path->nodes[0];
1035 extent_item = btrfs_item_ptr(leaf, path->slots[0],
1036 struct btrfs_file_extent_item);
1037 btrfs_set_file_extent_generation(leaf, extent_item, trans->transid);
1038 btrfs_set_file_extent_type(leaf, extent_item, BTRFS_FILE_EXTENT_REG);
1039 btrfs_set_file_extent_disk_bytenr(leaf, extent_item,
1040 ordered_extent->start);
1041 btrfs_set_file_extent_disk_num_bytes(leaf, extent_item,
1042 ordered_extent->disk_len);
1043 btrfs_set_file_extent_offset(leaf, extent_item, 0);
1044
1045 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1046 btrfs_set_file_extent_compression(leaf, extent_item, 1);
1047 else
1048 btrfs_set_file_extent_compression(leaf, extent_item, 0);
1049 btrfs_set_file_extent_encryption(leaf, extent_item, 0);
1050 btrfs_set_file_extent_other_encoding(leaf, extent_item, 0);
1051
1052 /* ram bytes = extent_num_bytes for now */
1053 btrfs_set_file_extent_num_bytes(leaf, extent_item,
1054 ordered_extent->len);
1055 btrfs_set_file_extent_ram_bytes(leaf, extent_item,
1056 ordered_extent->len);
1057 btrfs_mark_buffer_dirty(leaf);
1058
1059 btrfs_drop_extent_cache(inode, ordered_extent->file_offset,
1060 ordered_extent->file_offset +
1061 ordered_extent->len - 1, 0);
1062 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1063
1064 ins.objectid = ordered_extent->start;
1065 ins.offset = ordered_extent->disk_len;
1066 ins.type = BTRFS_EXTENT_ITEM_KEY;
1067 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1068 root->root_key.objectid,
1069 trans->transid, inode->i_ino, &ins);
1070 BUG_ON(ret);
1071 btrfs_release_path(root, path);
1072
1073 inode_add_bytes(inode, ordered_extent->len);
1074 unlock_extent(io_tree, ordered_extent->file_offset,
1075 ordered_extent->file_offset + ordered_extent->len - 1,
1076 GFP_NOFS);
1077 nocow:
1078 add_pending_csums(trans, inode, ordered_extent->file_offset,
1079 &ordered_extent->list);
1080
1081 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1082 btrfs_ordered_update_i_size(inode, ordered_extent);
1083 btrfs_update_inode(trans, root, inode);
1084 btrfs_remove_ordered_extent(inode, ordered_extent);
1085 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1086
1087 /* once for us */
1088 btrfs_put_ordered_extent(ordered_extent);
1089 /* once for the tree */
1090 btrfs_put_ordered_extent(ordered_extent);
1091
1092 btrfs_end_transaction(trans, root);
1093 if (path)
1094 btrfs_free_path(path);
1095 return 0;
1096 }
1097
1098 int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1099 struct extent_state *state, int uptodate)
1100 {
1101 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1102 }
1103
1104 /*
1105 * When IO fails, either with EIO or csum verification fails, we
1106 * try other mirrors that might have a good copy of the data. This
1107 * io_failure_record is used to record state as we go through all the
1108 * mirrors. If another mirror has good data, the page is set up to date
1109 * and things continue. If a good mirror can't be found, the original
1110 * bio end_io callback is called to indicate things have failed.
1111 */
1112 struct io_failure_record {
1113 struct page *page;
1114 u64 start;
1115 u64 len;
1116 u64 logical;
1117 int last_mirror;
1118 };
1119
1120 int btrfs_io_failed_hook(struct bio *failed_bio,
1121 struct page *page, u64 start, u64 end,
1122 struct extent_state *state)
1123 {
1124 struct io_failure_record *failrec = NULL;
1125 u64 private;
1126 struct extent_map *em;
1127 struct inode *inode = page->mapping->host;
1128 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1129 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1130 struct bio *bio;
1131 int num_copies;
1132 int ret;
1133 int rw;
1134 u64 logical;
1135 unsigned long bio_flags = 0;
1136
1137 ret = get_state_private(failure_tree, start, &private);
1138 if (ret) {
1139 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1140 if (!failrec)
1141 return -ENOMEM;
1142 failrec->start = start;
1143 failrec->len = end - start + 1;
1144 failrec->last_mirror = 0;
1145
1146 spin_lock(&em_tree->lock);
1147 em = lookup_extent_mapping(em_tree, start, failrec->len);
1148 if (em->start > start || em->start + em->len < start) {
1149 free_extent_map(em);
1150 em = NULL;
1151 }
1152 spin_unlock(&em_tree->lock);
1153
1154 if (!em || IS_ERR(em)) {
1155 kfree(failrec);
1156 return -EIO;
1157 }
1158 logical = start - em->start;
1159 logical = em->block_start + logical;
1160 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1161 bio_flags = EXTENT_BIO_COMPRESSED;
1162 failrec->logical = logical;
1163 free_extent_map(em);
1164 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1165 EXTENT_DIRTY, GFP_NOFS);
1166 set_state_private(failure_tree, start,
1167 (u64)(unsigned long)failrec);
1168 } else {
1169 failrec = (struct io_failure_record *)(unsigned long)private;
1170 }
1171 num_copies = btrfs_num_copies(
1172 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1173 failrec->logical, failrec->len);
1174 failrec->last_mirror++;
1175 if (!state) {
1176 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
1177 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1178 failrec->start,
1179 EXTENT_LOCKED);
1180 if (state && state->start != failrec->start)
1181 state = NULL;
1182 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
1183 }
1184 if (!state || failrec->last_mirror > num_copies) {
1185 set_state_private(failure_tree, failrec->start, 0);
1186 clear_extent_bits(failure_tree, failrec->start,
1187 failrec->start + failrec->len - 1,
1188 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1189 kfree(failrec);
1190 return -EIO;
1191 }
1192 bio = bio_alloc(GFP_NOFS, 1);
1193 bio->bi_private = state;
1194 bio->bi_end_io = failed_bio->bi_end_io;
1195 bio->bi_sector = failrec->logical >> 9;
1196 bio->bi_bdev = failed_bio->bi_bdev;
1197 bio->bi_size = 0;
1198 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1199 if (failed_bio->bi_rw & (1 << BIO_RW))
1200 rw = WRITE;
1201 else
1202 rw = READ;
1203
1204 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1205 failrec->last_mirror,
1206 bio_flags);
1207 return 0;
1208 }
1209
1210 /*
1211 * each time an IO finishes, we do a fast check in the IO failure tree
1212 * to see if we need to process or clean up an io_failure_record
1213 */
1214 int btrfs_clean_io_failures(struct inode *inode, u64 start)
1215 {
1216 u64 private;
1217 u64 private_failure;
1218 struct io_failure_record *failure;
1219 int ret;
1220
1221 private = 0;
1222 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1223 (u64)-1, 1, EXTENT_DIRTY)) {
1224 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1225 start, &private_failure);
1226 if (ret == 0) {
1227 failure = (struct io_failure_record *)(unsigned long)
1228 private_failure;
1229 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1230 failure->start, 0);
1231 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1232 failure->start,
1233 failure->start + failure->len - 1,
1234 EXTENT_DIRTY | EXTENT_LOCKED,
1235 GFP_NOFS);
1236 kfree(failure);
1237 }
1238 }
1239 return 0;
1240 }
1241
1242 /*
1243 * when reads are done, we need to check csums to verify the data is correct
1244 * if there's a match, we allow the bio to finish. If not, we go through
1245 * the io_failure_record routines to find good copies
1246 */
1247 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1248 struct extent_state *state)
1249 {
1250 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1251 struct inode *inode = page->mapping->host;
1252 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1253 char *kaddr;
1254 u64 private = ~(u32)0;
1255 int ret;
1256 struct btrfs_root *root = BTRFS_I(inode)->root;
1257 u32 csum = ~(u32)0;
1258 unsigned long flags;
1259
1260 if (btrfs_test_opt(root, NODATASUM) ||
1261 btrfs_test_flag(inode, NODATASUM))
1262 return 0;
1263 if (state && state->start == start) {
1264 private = state->private;
1265 ret = 0;
1266 } else {
1267 ret = get_state_private(io_tree, start, &private);
1268 }
1269 local_irq_save(flags);
1270 kaddr = kmap_atomic(page, KM_IRQ0);
1271 if (ret) {
1272 goto zeroit;
1273 }
1274 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1275 btrfs_csum_final(csum, (char *)&csum);
1276 if (csum != private) {
1277 goto zeroit;
1278 }
1279 kunmap_atomic(kaddr, KM_IRQ0);
1280 local_irq_restore(flags);
1281
1282 /* if the io failure tree for this inode is non-empty,
1283 * check to see if we've recovered from a failed IO
1284 */
1285 btrfs_clean_io_failures(inode, start);
1286 return 0;
1287
1288 zeroit:
1289 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
1290 page->mapping->host->i_ino, (unsigned long long)start, csum,
1291 private);
1292 memset(kaddr + offset, 1, end - start + 1);
1293 flush_dcache_page(page);
1294 kunmap_atomic(kaddr, KM_IRQ0);
1295 local_irq_restore(flags);
1296 if (private == 0)
1297 return 0;
1298 return -EIO;
1299 }
1300
1301 /*
1302 * This creates an orphan entry for the given inode in case something goes
1303 * wrong in the middle of an unlink/truncate.
1304 */
1305 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1306 {
1307 struct btrfs_root *root = BTRFS_I(inode)->root;
1308 int ret = 0;
1309
1310 spin_lock(&root->list_lock);
1311
1312 /* already on the orphan list, we're good */
1313 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1314 spin_unlock(&root->list_lock);
1315 return 0;
1316 }
1317
1318 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1319
1320 spin_unlock(&root->list_lock);
1321
1322 /*
1323 * insert an orphan item to track this unlinked/truncated file
1324 */
1325 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1326
1327 return ret;
1328 }
1329
1330 /*
1331 * We have done the truncate/delete so we can go ahead and remove the orphan
1332 * item for this particular inode.
1333 */
1334 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1335 {
1336 struct btrfs_root *root = BTRFS_I(inode)->root;
1337 int ret = 0;
1338
1339 spin_lock(&root->list_lock);
1340
1341 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1342 spin_unlock(&root->list_lock);
1343 return 0;
1344 }
1345
1346 list_del_init(&BTRFS_I(inode)->i_orphan);
1347 if (!trans) {
1348 spin_unlock(&root->list_lock);
1349 return 0;
1350 }
1351
1352 spin_unlock(&root->list_lock);
1353
1354 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1355
1356 return ret;
1357 }
1358
1359 /*
1360 * this cleans up any orphans that may be left on the list from the last use
1361 * of this root.
1362 */
1363 void btrfs_orphan_cleanup(struct btrfs_root *root)
1364 {
1365 struct btrfs_path *path;
1366 struct extent_buffer *leaf;
1367 struct btrfs_item *item;
1368 struct btrfs_key key, found_key;
1369 struct btrfs_trans_handle *trans;
1370 struct inode *inode;
1371 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1372
1373 /* don't do orphan cleanup if the fs is readonly. */
1374 if (root->fs_info->sb->s_flags & MS_RDONLY)
1375 return;
1376
1377 path = btrfs_alloc_path();
1378 if (!path)
1379 return;
1380 path->reada = -1;
1381
1382 key.objectid = BTRFS_ORPHAN_OBJECTID;
1383 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1384 key.offset = (u64)-1;
1385
1386
1387 while (1) {
1388 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1389 if (ret < 0) {
1390 printk(KERN_ERR "Error searching slot for orphan: %d"
1391 "\n", ret);
1392 break;
1393 }
1394
1395 /*
1396 * if ret == 0 means we found what we were searching for, which
1397 * is weird, but possible, so only screw with path if we didnt
1398 * find the key and see if we have stuff that matches
1399 */
1400 if (ret > 0) {
1401 if (path->slots[0] == 0)
1402 break;
1403 path->slots[0]--;
1404 }
1405
1406 /* pull out the item */
1407 leaf = path->nodes[0];
1408 item = btrfs_item_nr(leaf, path->slots[0]);
1409 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1410
1411 /* make sure the item matches what we want */
1412 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1413 break;
1414 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1415 break;
1416
1417 /* release the path since we're done with it */
1418 btrfs_release_path(root, path);
1419
1420 /*
1421 * this is where we are basically btrfs_lookup, without the
1422 * crossing root thing. we store the inode number in the
1423 * offset of the orphan item.
1424 */
1425 inode = btrfs_iget_locked(root->fs_info->sb,
1426 found_key.offset, root);
1427 if (!inode)
1428 break;
1429
1430 if (inode->i_state & I_NEW) {
1431 BTRFS_I(inode)->root = root;
1432
1433 /* have to set the location manually */
1434 BTRFS_I(inode)->location.objectid = inode->i_ino;
1435 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1436 BTRFS_I(inode)->location.offset = 0;
1437
1438 btrfs_read_locked_inode(inode);
1439 unlock_new_inode(inode);
1440 }
1441
1442 /*
1443 * add this inode to the orphan list so btrfs_orphan_del does
1444 * the proper thing when we hit it
1445 */
1446 spin_lock(&root->list_lock);
1447 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1448 spin_unlock(&root->list_lock);
1449
1450 /*
1451 * if this is a bad inode, means we actually succeeded in
1452 * removing the inode, but not the orphan record, which means
1453 * we need to manually delete the orphan since iput will just
1454 * do a destroy_inode
1455 */
1456 if (is_bad_inode(inode)) {
1457 trans = btrfs_start_transaction(root, 1);
1458 btrfs_orphan_del(trans, inode);
1459 btrfs_end_transaction(trans, root);
1460 iput(inode);
1461 continue;
1462 }
1463
1464 /* if we have links, this was a truncate, lets do that */
1465 if (inode->i_nlink) {
1466 nr_truncate++;
1467 btrfs_truncate(inode);
1468 } else {
1469 nr_unlink++;
1470 }
1471
1472 /* this will do delete_inode and everything for us */
1473 iput(inode);
1474 }
1475
1476 if (nr_unlink)
1477 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1478 if (nr_truncate)
1479 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1480
1481 btrfs_free_path(path);
1482 }
1483
1484 /*
1485 * read an inode from the btree into the in-memory inode
1486 */
1487 void btrfs_read_locked_inode(struct inode *inode)
1488 {
1489 struct btrfs_path *path;
1490 struct extent_buffer *leaf;
1491 struct btrfs_inode_item *inode_item;
1492 struct btrfs_timespec *tspec;
1493 struct btrfs_root *root = BTRFS_I(inode)->root;
1494 struct btrfs_key location;
1495 u64 alloc_group_block;
1496 u32 rdev;
1497 int ret;
1498
1499 path = btrfs_alloc_path();
1500 BUG_ON(!path);
1501 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1502
1503 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1504 if (ret)
1505 goto make_bad;
1506
1507 leaf = path->nodes[0];
1508 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1509 struct btrfs_inode_item);
1510
1511 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1512 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1513 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1514 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1515 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
1516
1517 tspec = btrfs_inode_atime(inode_item);
1518 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1519 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1520
1521 tspec = btrfs_inode_mtime(inode_item);
1522 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1523 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1524
1525 tspec = btrfs_inode_ctime(inode_item);
1526 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1527 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1528
1529 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
1530 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
1531 inode->i_generation = BTRFS_I(inode)->generation;
1532 inode->i_rdev = 0;
1533 rdev = btrfs_inode_rdev(leaf, inode_item);
1534
1535 BTRFS_I(inode)->index_cnt = (u64)-1;
1536
1537 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
1538 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
1539 alloc_group_block);
1540 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
1541 if (!BTRFS_I(inode)->block_group) {
1542 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
1543 NULL, 0,
1544 BTRFS_BLOCK_GROUP_METADATA, 0);
1545 }
1546 btrfs_free_path(path);
1547 inode_item = NULL;
1548
1549 switch (inode->i_mode & S_IFMT) {
1550 case S_IFREG:
1551 inode->i_mapping->a_ops = &btrfs_aops;
1552 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1553 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1554 inode->i_fop = &btrfs_file_operations;
1555 inode->i_op = &btrfs_file_inode_operations;
1556 break;
1557 case S_IFDIR:
1558 inode->i_fop = &btrfs_dir_file_operations;
1559 if (root == root->fs_info->tree_root)
1560 inode->i_op = &btrfs_dir_ro_inode_operations;
1561 else
1562 inode->i_op = &btrfs_dir_inode_operations;
1563 break;
1564 case S_IFLNK:
1565 inode->i_op = &btrfs_symlink_inode_operations;
1566 inode->i_mapping->a_ops = &btrfs_symlink_aops;
1567 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1568 break;
1569 default:
1570 init_special_inode(inode, inode->i_mode, rdev);
1571 break;
1572 }
1573 return;
1574
1575 make_bad:
1576 btrfs_free_path(path);
1577 make_bad_inode(inode);
1578 }
1579
1580 /*
1581 * given a leaf and an inode, copy the inode fields into the leaf
1582 */
1583 static void fill_inode_item(struct btrfs_trans_handle *trans,
1584 struct extent_buffer *leaf,
1585 struct btrfs_inode_item *item,
1586 struct inode *inode)
1587 {
1588 btrfs_set_inode_uid(leaf, item, inode->i_uid);
1589 btrfs_set_inode_gid(leaf, item, inode->i_gid);
1590 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
1591 btrfs_set_inode_mode(leaf, item, inode->i_mode);
1592 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
1593
1594 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
1595 inode->i_atime.tv_sec);
1596 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
1597 inode->i_atime.tv_nsec);
1598
1599 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
1600 inode->i_mtime.tv_sec);
1601 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
1602 inode->i_mtime.tv_nsec);
1603
1604 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
1605 inode->i_ctime.tv_sec);
1606 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
1607 inode->i_ctime.tv_nsec);
1608
1609 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
1610 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
1611 btrfs_set_inode_transid(leaf, item, trans->transid);
1612 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
1613 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
1614 btrfs_set_inode_block_group(leaf, item,
1615 BTRFS_I(inode)->block_group->key.objectid);
1616 }
1617
1618 /*
1619 * copy everything in the in-memory inode into the btree.
1620 */
1621 int noinline btrfs_update_inode(struct btrfs_trans_handle *trans,
1622 struct btrfs_root *root,
1623 struct inode *inode)
1624 {
1625 struct btrfs_inode_item *inode_item;
1626 struct btrfs_path *path;
1627 struct extent_buffer *leaf;
1628 int ret;
1629
1630 path = btrfs_alloc_path();
1631 BUG_ON(!path);
1632 ret = btrfs_lookup_inode(trans, root, path,
1633 &BTRFS_I(inode)->location, 1);
1634 if (ret) {
1635 if (ret > 0)
1636 ret = -ENOENT;
1637 goto failed;
1638 }
1639
1640 leaf = path->nodes[0];
1641 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1642 struct btrfs_inode_item);
1643
1644 fill_inode_item(trans, leaf, inode_item, inode);
1645 btrfs_mark_buffer_dirty(leaf);
1646 btrfs_set_inode_last_trans(trans, inode);
1647 ret = 0;
1648 failed:
1649 btrfs_free_path(path);
1650 return ret;
1651 }
1652
1653
1654 /*
1655 * unlink helper that gets used here in inode.c and in the tree logging
1656 * recovery code. It remove a link in a directory with a given name, and
1657 * also drops the back refs in the inode to the directory
1658 */
1659 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
1660 struct btrfs_root *root,
1661 struct inode *dir, struct inode *inode,
1662 const char *name, int name_len)
1663 {
1664 struct btrfs_path *path;
1665 int ret = 0;
1666 struct extent_buffer *leaf;
1667 struct btrfs_dir_item *di;
1668 struct btrfs_key key;
1669 u64 index;
1670
1671 path = btrfs_alloc_path();
1672 if (!path) {
1673 ret = -ENOMEM;
1674 goto err;
1675 }
1676
1677 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
1678 name, name_len, -1);
1679 if (IS_ERR(di)) {
1680 ret = PTR_ERR(di);
1681 goto err;
1682 }
1683 if (!di) {
1684 ret = -ENOENT;
1685 goto err;
1686 }
1687 leaf = path->nodes[0];
1688 btrfs_dir_item_key_to_cpu(leaf, di, &key);
1689 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1690 if (ret)
1691 goto err;
1692 btrfs_release_path(root, path);
1693
1694 ret = btrfs_del_inode_ref(trans, root, name, name_len,
1695 inode->i_ino,
1696 dir->i_ino, &index);
1697 if (ret) {
1698 printk("failed to delete reference to %.*s, "
1699 "inode %lu parent %lu\n", name_len, name,
1700 inode->i_ino, dir->i_ino);
1701 goto err;
1702 }
1703
1704 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
1705 index, name, name_len, -1);
1706 if (IS_ERR(di)) {
1707 ret = PTR_ERR(di);
1708 goto err;
1709 }
1710 if (!di) {
1711 ret = -ENOENT;
1712 goto err;
1713 }
1714 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1715 btrfs_release_path(root, path);
1716
1717 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
1718 inode, dir->i_ino);
1719 BUG_ON(ret != 0 && ret != -ENOENT);
1720 if (ret != -ENOENT)
1721 BTRFS_I(dir)->log_dirty_trans = trans->transid;
1722
1723 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
1724 dir, index);
1725 BUG_ON(ret);
1726 err:
1727 btrfs_free_path(path);
1728 if (ret)
1729 goto out;
1730
1731 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
1732 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
1733 btrfs_update_inode(trans, root, dir);
1734 btrfs_drop_nlink(inode);
1735 ret = btrfs_update_inode(trans, root, inode);
1736 dir->i_sb->s_dirt = 1;
1737 out:
1738 return ret;
1739 }
1740
1741 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
1742 {
1743 struct btrfs_root *root;
1744 struct btrfs_trans_handle *trans;
1745 struct inode *inode = dentry->d_inode;
1746 int ret;
1747 unsigned long nr = 0;
1748
1749 root = BTRFS_I(dir)->root;
1750
1751 ret = btrfs_check_free_space(root, 1, 1);
1752 if (ret)
1753 goto fail;
1754
1755 trans = btrfs_start_transaction(root, 1);
1756
1757 btrfs_set_trans_block_group(trans, dir);
1758 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1759 dentry->d_name.name, dentry->d_name.len);
1760
1761 if (inode->i_nlink == 0)
1762 ret = btrfs_orphan_add(trans, inode);
1763
1764 nr = trans->blocks_used;
1765
1766 btrfs_end_transaction_throttle(trans, root);
1767 fail:
1768 btrfs_btree_balance_dirty(root, nr);
1769 return ret;
1770 }
1771
1772 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
1773 {
1774 struct inode *inode = dentry->d_inode;
1775 int err = 0;
1776 int ret;
1777 struct btrfs_root *root = BTRFS_I(dir)->root;
1778 struct btrfs_trans_handle *trans;
1779 unsigned long nr = 0;
1780
1781 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
1782 return -ENOTEMPTY;
1783 }
1784
1785 ret = btrfs_check_free_space(root, 1, 1);
1786 if (ret)
1787 goto fail;
1788
1789 trans = btrfs_start_transaction(root, 1);
1790 btrfs_set_trans_block_group(trans, dir);
1791
1792 err = btrfs_orphan_add(trans, inode);
1793 if (err)
1794 goto fail_trans;
1795
1796 /* now the directory is empty */
1797 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1798 dentry->d_name.name, dentry->d_name.len);
1799 if (!err) {
1800 btrfs_i_size_write(inode, 0);
1801 }
1802
1803 fail_trans:
1804 nr = trans->blocks_used;
1805 ret = btrfs_end_transaction_throttle(trans, root);
1806 fail:
1807 btrfs_btree_balance_dirty(root, nr);
1808
1809 if (ret && !err)
1810 err = ret;
1811 return err;
1812 }
1813
1814 /*
1815 * when truncating bytes in a file, it is possible to avoid reading
1816 * the leaves that contain only checksum items. This can be the
1817 * majority of the IO required to delete a large file, but it must
1818 * be done carefully.
1819 *
1820 * The keys in the level just above the leaves are checked to make sure
1821 * the lowest key in a given leaf is a csum key, and starts at an offset
1822 * after the new size.
1823 *
1824 * Then the key for the next leaf is checked to make sure it also has
1825 * a checksum item for the same file. If it does, we know our target leaf
1826 * contains only checksum items, and it can be safely freed without reading
1827 * it.
1828 *
1829 * This is just an optimization targeted at large files. It may do
1830 * nothing. It will return 0 unless things went badly.
1831 */
1832 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
1833 struct btrfs_root *root,
1834 struct btrfs_path *path,
1835 struct inode *inode, u64 new_size)
1836 {
1837 struct btrfs_key key;
1838 int ret;
1839 int nritems;
1840 struct btrfs_key found_key;
1841 struct btrfs_key other_key;
1842 struct btrfs_leaf_ref *ref;
1843 u64 leaf_gen;
1844 u64 leaf_start;
1845
1846 path->lowest_level = 1;
1847 key.objectid = inode->i_ino;
1848 key.type = BTRFS_CSUM_ITEM_KEY;
1849 key.offset = new_size;
1850 again:
1851 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1852 if (ret < 0)
1853 goto out;
1854
1855 if (path->nodes[1] == NULL) {
1856 ret = 0;
1857 goto out;
1858 }
1859 ret = 0;
1860 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
1861 nritems = btrfs_header_nritems(path->nodes[1]);
1862
1863 if (!nritems)
1864 goto out;
1865
1866 if (path->slots[1] >= nritems)
1867 goto next_node;
1868
1869 /* did we find a key greater than anything we want to delete? */
1870 if (found_key.objectid > inode->i_ino ||
1871 (found_key.objectid == inode->i_ino && found_key.type > key.type))
1872 goto out;
1873
1874 /* we check the next key in the node to make sure the leave contains
1875 * only checksum items. This comparison doesn't work if our
1876 * leaf is the last one in the node
1877 */
1878 if (path->slots[1] + 1 >= nritems) {
1879 next_node:
1880 /* search forward from the last key in the node, this
1881 * will bring us into the next node in the tree
1882 */
1883 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
1884
1885 /* unlikely, but we inc below, so check to be safe */
1886 if (found_key.offset == (u64)-1)
1887 goto out;
1888
1889 /* search_forward needs a path with locks held, do the
1890 * search again for the original key. It is possible
1891 * this will race with a balance and return a path that
1892 * we could modify, but this drop is just an optimization
1893 * and is allowed to miss some leaves.
1894 */
1895 btrfs_release_path(root, path);
1896 found_key.offset++;
1897
1898 /* setup a max key for search_forward */
1899 other_key.offset = (u64)-1;
1900 other_key.type = key.type;
1901 other_key.objectid = key.objectid;
1902
1903 path->keep_locks = 1;
1904 ret = btrfs_search_forward(root, &found_key, &other_key,
1905 path, 0, 0);
1906 path->keep_locks = 0;
1907 if (ret || found_key.objectid != key.objectid ||
1908 found_key.type != key.type) {
1909 ret = 0;
1910 goto out;
1911 }
1912
1913 key.offset = found_key.offset;
1914 btrfs_release_path(root, path);
1915 cond_resched();
1916 goto again;
1917 }
1918
1919 /* we know there's one more slot after us in the tree,
1920 * read that key so we can verify it is also a checksum item
1921 */
1922 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
1923
1924 if (found_key.objectid < inode->i_ino)
1925 goto next_key;
1926
1927 if (found_key.type != key.type || found_key.offset < new_size)
1928 goto next_key;
1929
1930 /*
1931 * if the key for the next leaf isn't a csum key from this objectid,
1932 * we can't be sure there aren't good items inside this leaf.
1933 * Bail out
1934 */
1935 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
1936 goto out;
1937
1938 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
1939 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
1940 /*
1941 * it is safe to delete this leaf, it contains only
1942 * csum items from this inode at an offset >= new_size
1943 */
1944 ret = btrfs_del_leaf(trans, root, path, leaf_start);
1945 BUG_ON(ret);
1946
1947 if (root->ref_cows && leaf_gen < trans->transid) {
1948 ref = btrfs_alloc_leaf_ref(root, 0);
1949 if (ref) {
1950 ref->root_gen = root->root_key.offset;
1951 ref->bytenr = leaf_start;
1952 ref->owner = 0;
1953 ref->generation = leaf_gen;
1954 ref->nritems = 0;
1955
1956 ret = btrfs_add_leaf_ref(root, ref, 0);
1957 WARN_ON(ret);
1958 btrfs_free_leaf_ref(root, ref);
1959 } else {
1960 WARN_ON(1);
1961 }
1962 }
1963 next_key:
1964 btrfs_release_path(root, path);
1965
1966 if (other_key.objectid == inode->i_ino &&
1967 other_key.type == key.type && other_key.offset > key.offset) {
1968 key.offset = other_key.offset;
1969 cond_resched();
1970 goto again;
1971 }
1972 ret = 0;
1973 out:
1974 /* fixup any changes we've made to the path */
1975 path->lowest_level = 0;
1976 path->keep_locks = 0;
1977 btrfs_release_path(root, path);
1978 return ret;
1979 }
1980
1981 /*
1982 * this can truncate away extent items, csum items and directory items.
1983 * It starts at a high offset and removes keys until it can't find
1984 * any higher than new_size
1985 *
1986 * csum items that cross the new i_size are truncated to the new size
1987 * as well.
1988 *
1989 * min_type is the minimum key type to truncate down to. If set to 0, this
1990 * will kill all the items on this inode, including the INODE_ITEM_KEY.
1991 */
1992 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
1993 struct btrfs_root *root,
1994 struct inode *inode,
1995 u64 new_size, u32 min_type)
1996 {
1997 int ret;
1998 struct btrfs_path *path;
1999 struct btrfs_key key;
2000 struct btrfs_key found_key;
2001 u32 found_type;
2002 struct extent_buffer *leaf;
2003 struct btrfs_file_extent_item *fi;
2004 u64 extent_start = 0;
2005 u64 extent_num_bytes = 0;
2006 u64 item_end = 0;
2007 u64 root_gen = 0;
2008 u64 root_owner = 0;
2009 int found_extent;
2010 int del_item;
2011 int pending_del_nr = 0;
2012 int pending_del_slot = 0;
2013 int extent_type = -1;
2014 u64 mask = root->sectorsize - 1;
2015
2016 if (root->ref_cows)
2017 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2018 path = btrfs_alloc_path();
2019 path->reada = -1;
2020 BUG_ON(!path);
2021
2022 /* FIXME, add redo link to tree so we don't leak on crash */
2023 key.objectid = inode->i_ino;
2024 key.offset = (u64)-1;
2025 key.type = (u8)-1;
2026
2027 btrfs_init_path(path);
2028
2029 ret = drop_csum_leaves(trans, root, path, inode, new_size);
2030 BUG_ON(ret);
2031
2032 search_again:
2033 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2034 if (ret < 0) {
2035 goto error;
2036 }
2037 if (ret > 0) {
2038 /* there are no items in the tree for us to truncate, we're
2039 * done
2040 */
2041 if (path->slots[0] == 0) {
2042 ret = 0;
2043 goto error;
2044 }
2045 path->slots[0]--;
2046 }
2047
2048 while(1) {
2049 fi = NULL;
2050 leaf = path->nodes[0];
2051 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2052 found_type = btrfs_key_type(&found_key);
2053
2054 if (found_key.objectid != inode->i_ino)
2055 break;
2056
2057 if (found_type < min_type)
2058 break;
2059
2060 item_end = found_key.offset;
2061 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2062 fi = btrfs_item_ptr(leaf, path->slots[0],
2063 struct btrfs_file_extent_item);
2064 extent_type = btrfs_file_extent_type(leaf, fi);
2065 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2066 item_end +=
2067 btrfs_file_extent_num_bytes(leaf, fi);
2068 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2069 item_end += btrfs_file_extent_inline_len(leaf,
2070 fi);
2071 }
2072 item_end--;
2073 }
2074 if (found_type == BTRFS_CSUM_ITEM_KEY) {
2075 ret = btrfs_csum_truncate(trans, root, path,
2076 new_size);
2077 BUG_ON(ret);
2078 }
2079 if (item_end < new_size) {
2080 if (found_type == BTRFS_DIR_ITEM_KEY) {
2081 found_type = BTRFS_INODE_ITEM_KEY;
2082 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
2083 found_type = BTRFS_CSUM_ITEM_KEY;
2084 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
2085 found_type = BTRFS_XATTR_ITEM_KEY;
2086 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
2087 found_type = BTRFS_INODE_REF_KEY;
2088 } else if (found_type) {
2089 found_type--;
2090 } else {
2091 break;
2092 }
2093 btrfs_set_key_type(&key, found_type);
2094 goto next;
2095 }
2096 if (found_key.offset >= new_size)
2097 del_item = 1;
2098 else
2099 del_item = 0;
2100 found_extent = 0;
2101
2102 /* FIXME, shrink the extent if the ref count is only 1 */
2103 if (found_type != BTRFS_EXTENT_DATA_KEY)
2104 goto delete;
2105
2106 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2107 u64 num_dec;
2108 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2109 if (!del_item) {
2110 u64 orig_num_bytes =
2111 btrfs_file_extent_num_bytes(leaf, fi);
2112 extent_num_bytes = new_size -
2113 found_key.offset + root->sectorsize - 1;
2114 extent_num_bytes = extent_num_bytes &
2115 ~((u64)root->sectorsize - 1);
2116 btrfs_set_file_extent_num_bytes(leaf, fi,
2117 extent_num_bytes);
2118 num_dec = (orig_num_bytes -
2119 extent_num_bytes);
2120 if (root->ref_cows && extent_start != 0)
2121 inode_sub_bytes(inode, num_dec);
2122 btrfs_mark_buffer_dirty(leaf);
2123 } else {
2124 extent_num_bytes =
2125 btrfs_file_extent_disk_num_bytes(leaf,
2126 fi);
2127 /* FIXME blocksize != 4096 */
2128 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2129 if (extent_start != 0) {
2130 found_extent = 1;
2131 if (root->ref_cows)
2132 inode_sub_bytes(inode, num_dec);
2133 }
2134 root_gen = btrfs_header_generation(leaf);
2135 root_owner = btrfs_header_owner(leaf);
2136 }
2137 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2138 /*
2139 * we can't truncate inline items that have had
2140 * special encodings
2141 */
2142 if (!del_item &&
2143 btrfs_file_extent_compression(leaf, fi) == 0 &&
2144 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2145 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2146 u32 size = new_size - found_key.offset;
2147
2148 if (root->ref_cows) {
2149 inode_sub_bytes(inode, item_end + 1 -
2150 new_size);
2151 }
2152 size =
2153 btrfs_file_extent_calc_inline_size(size);
2154 ret = btrfs_truncate_item(trans, root, path,
2155 size, 1);
2156 BUG_ON(ret);
2157 } else if (root->ref_cows) {
2158 inode_sub_bytes(inode, item_end + 1 -
2159 found_key.offset);
2160 }
2161 }
2162 delete:
2163 if (del_item) {
2164 if (!pending_del_nr) {
2165 /* no pending yet, add ourselves */
2166 pending_del_slot = path->slots[0];
2167 pending_del_nr = 1;
2168 } else if (pending_del_nr &&
2169 path->slots[0] + 1 == pending_del_slot) {
2170 /* hop on the pending chunk */
2171 pending_del_nr++;
2172 pending_del_slot = path->slots[0];
2173 } else {
2174 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
2175 }
2176 } else {
2177 break;
2178 }
2179 if (found_extent) {
2180 ret = btrfs_free_extent(trans, root, extent_start,
2181 extent_num_bytes,
2182 leaf->start, root_owner,
2183 root_gen, inode->i_ino, 0);
2184 BUG_ON(ret);
2185 }
2186 next:
2187 if (path->slots[0] == 0) {
2188 if (pending_del_nr)
2189 goto del_pending;
2190 btrfs_release_path(root, path);
2191 goto search_again;
2192 }
2193
2194 path->slots[0]--;
2195 if (pending_del_nr &&
2196 path->slots[0] + 1 != pending_del_slot) {
2197 struct btrfs_key debug;
2198 del_pending:
2199 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2200 pending_del_slot);
2201 ret = btrfs_del_items(trans, root, path,
2202 pending_del_slot,
2203 pending_del_nr);
2204 BUG_ON(ret);
2205 pending_del_nr = 0;
2206 btrfs_release_path(root, path);
2207 goto search_again;
2208 }
2209 }
2210 ret = 0;
2211 error:
2212 if (pending_del_nr) {
2213 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2214 pending_del_nr);
2215 }
2216 btrfs_free_path(path);
2217 inode->i_sb->s_dirt = 1;
2218 return ret;
2219 }
2220
2221 /*
2222 * taken from block_truncate_page, but does cow as it zeros out
2223 * any bytes left in the last page in the file.
2224 */
2225 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2226 {
2227 struct inode *inode = mapping->host;
2228 struct btrfs_root *root = BTRFS_I(inode)->root;
2229 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2230 struct btrfs_ordered_extent *ordered;
2231 char *kaddr;
2232 u32 blocksize = root->sectorsize;
2233 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2234 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2235 struct page *page;
2236 int ret = 0;
2237 u64 page_start;
2238 u64 page_end;
2239
2240 if ((offset & (blocksize - 1)) == 0)
2241 goto out;
2242
2243 ret = -ENOMEM;
2244 again:
2245 page = grab_cache_page(mapping, index);
2246 if (!page)
2247 goto out;
2248
2249 page_start = page_offset(page);
2250 page_end = page_start + PAGE_CACHE_SIZE - 1;
2251
2252 if (!PageUptodate(page)) {
2253 ret = btrfs_readpage(NULL, page);
2254 lock_page(page);
2255 if (page->mapping != mapping) {
2256 unlock_page(page);
2257 page_cache_release(page);
2258 goto again;
2259 }
2260 if (!PageUptodate(page)) {
2261 ret = -EIO;
2262 goto out_unlock;
2263 }
2264 }
2265 wait_on_page_writeback(page);
2266
2267 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2268 set_page_extent_mapped(page);
2269
2270 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2271 if (ordered) {
2272 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2273 unlock_page(page);
2274 page_cache_release(page);
2275 btrfs_start_ordered_extent(inode, ordered, 1);
2276 btrfs_put_ordered_extent(ordered);
2277 goto again;
2278 }
2279
2280 btrfs_set_extent_delalloc(inode, page_start, page_end);
2281 ret = 0;
2282 if (offset != PAGE_CACHE_SIZE) {
2283 kaddr = kmap(page);
2284 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2285 flush_dcache_page(page);
2286 kunmap(page);
2287 }
2288 ClearPageChecked(page);
2289 set_page_dirty(page);
2290 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2291
2292 out_unlock:
2293 unlock_page(page);
2294 page_cache_release(page);
2295 out:
2296 return ret;
2297 }
2298
2299 int btrfs_cont_expand(struct inode *inode, loff_t size)
2300 {
2301 struct btrfs_trans_handle *trans;
2302 struct btrfs_root *root = BTRFS_I(inode)->root;
2303 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2304 struct extent_map *em;
2305 u64 mask = root->sectorsize - 1;
2306 u64 hole_start = (inode->i_size + mask) & ~mask;
2307 u64 block_end = (size + mask) & ~mask;
2308 u64 last_byte;
2309 u64 cur_offset;
2310 u64 hole_size;
2311 int err;
2312
2313 if (size <= hole_start)
2314 return 0;
2315
2316 err = btrfs_check_free_space(root, 1, 0);
2317 if (err)
2318 return err;
2319
2320 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2321
2322 while (1) {
2323 struct btrfs_ordered_extent *ordered;
2324 btrfs_wait_ordered_range(inode, hole_start,
2325 block_end - hole_start);
2326 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2327 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2328 if (!ordered)
2329 break;
2330 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2331 btrfs_put_ordered_extent(ordered);
2332 }
2333
2334 trans = btrfs_start_transaction(root, 1);
2335 btrfs_set_trans_block_group(trans, inode);
2336
2337 cur_offset = hole_start;
2338 while (1) {
2339 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2340 block_end - cur_offset, 0);
2341 BUG_ON(IS_ERR(em) || !em);
2342 last_byte = min(extent_map_end(em), block_end);
2343 last_byte = (last_byte + mask) & ~mask;
2344 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2345 hole_size = last_byte - cur_offset;
2346 err = btrfs_insert_file_extent(trans, root,
2347 inode->i_ino, cur_offset, 0,
2348 0, hole_size, 0, hole_size,
2349 0, 0, 0);
2350 btrfs_drop_extent_cache(inode, hole_start,
2351 last_byte - 1, 0);
2352 }
2353 free_extent_map(em);
2354 cur_offset = last_byte;
2355 if (err || cur_offset >= block_end)
2356 break;
2357 }
2358
2359 btrfs_end_transaction(trans, root);
2360 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2361 return err;
2362 }
2363
2364 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2365 {
2366 struct inode *inode = dentry->d_inode;
2367 int err;
2368
2369 err = inode_change_ok(inode, attr);
2370 if (err)
2371 return err;
2372
2373 if (S_ISREG(inode->i_mode) &&
2374 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2375 err = btrfs_cont_expand(inode, attr->ia_size);
2376 if (err)
2377 return err;
2378 }
2379
2380 err = inode_setattr(inode, attr);
2381
2382 if (!err && ((attr->ia_valid & ATTR_MODE)))
2383 err = btrfs_acl_chmod(inode);
2384 return err;
2385 }
2386
2387 void btrfs_delete_inode(struct inode *inode)
2388 {
2389 struct btrfs_trans_handle *trans;
2390 struct btrfs_root *root = BTRFS_I(inode)->root;
2391 unsigned long nr;
2392 int ret;
2393
2394 truncate_inode_pages(&inode->i_data, 0);
2395 if (is_bad_inode(inode)) {
2396 btrfs_orphan_del(NULL, inode);
2397 goto no_delete;
2398 }
2399 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2400
2401 btrfs_i_size_write(inode, 0);
2402 trans = btrfs_start_transaction(root, 1);
2403
2404 btrfs_set_trans_block_group(trans, inode);
2405 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2406 if (ret) {
2407 btrfs_orphan_del(NULL, inode);
2408 goto no_delete_lock;
2409 }
2410
2411 btrfs_orphan_del(trans, inode);
2412
2413 nr = trans->blocks_used;
2414 clear_inode(inode);
2415
2416 btrfs_end_transaction(trans, root);
2417 btrfs_btree_balance_dirty(root, nr);
2418 return;
2419
2420 no_delete_lock:
2421 nr = trans->blocks_used;
2422 btrfs_end_transaction(trans, root);
2423 btrfs_btree_balance_dirty(root, nr);
2424 no_delete:
2425 clear_inode(inode);
2426 }
2427
2428 /*
2429 * this returns the key found in the dir entry in the location pointer.
2430 * If no dir entries were found, location->objectid is 0.
2431 */
2432 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2433 struct btrfs_key *location)
2434 {
2435 const char *name = dentry->d_name.name;
2436 int namelen = dentry->d_name.len;
2437 struct btrfs_dir_item *di;
2438 struct btrfs_path *path;
2439 struct btrfs_root *root = BTRFS_I(dir)->root;
2440 int ret = 0;
2441
2442 path = btrfs_alloc_path();
2443 BUG_ON(!path);
2444
2445 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2446 namelen, 0);
2447 if (IS_ERR(di))
2448 ret = PTR_ERR(di);
2449 if (!di || IS_ERR(di)) {
2450 goto out_err;
2451 }
2452 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2453 out:
2454 btrfs_free_path(path);
2455 return ret;
2456 out_err:
2457 location->objectid = 0;
2458 goto out;
2459 }
2460
2461 /*
2462 * when we hit a tree root in a directory, the btrfs part of the inode
2463 * needs to be changed to reflect the root directory of the tree root. This
2464 * is kind of like crossing a mount point.
2465 */
2466 static int fixup_tree_root_location(struct btrfs_root *root,
2467 struct btrfs_key *location,
2468 struct btrfs_root **sub_root,
2469 struct dentry *dentry)
2470 {
2471 struct btrfs_root_item *ri;
2472
2473 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2474 return 0;
2475 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2476 return 0;
2477
2478 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2479 dentry->d_name.name,
2480 dentry->d_name.len);
2481 if (IS_ERR(*sub_root))
2482 return PTR_ERR(*sub_root);
2483
2484 ri = &(*sub_root)->root_item;
2485 location->objectid = btrfs_root_dirid(ri);
2486 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2487 location->offset = 0;
2488
2489 return 0;
2490 }
2491
2492 static noinline void init_btrfs_i(struct inode *inode)
2493 {
2494 struct btrfs_inode *bi = BTRFS_I(inode);
2495
2496 bi->i_acl = NULL;
2497 bi->i_default_acl = NULL;
2498
2499 bi->generation = 0;
2500 bi->last_trans = 0;
2501 bi->logged_trans = 0;
2502 bi->delalloc_bytes = 0;
2503 bi->disk_i_size = 0;
2504 bi->flags = 0;
2505 bi->index_cnt = (u64)-1;
2506 bi->log_dirty_trans = 0;
2507 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2508 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2509 inode->i_mapping, GFP_NOFS);
2510 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2511 inode->i_mapping, GFP_NOFS);
2512 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
2513 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
2514 mutex_init(&BTRFS_I(inode)->csum_mutex);
2515 mutex_init(&BTRFS_I(inode)->extent_mutex);
2516 mutex_init(&BTRFS_I(inode)->log_mutex);
2517 }
2518
2519 static int btrfs_init_locked_inode(struct inode *inode, void *p)
2520 {
2521 struct btrfs_iget_args *args = p;
2522 inode->i_ino = args->ino;
2523 init_btrfs_i(inode);
2524 BTRFS_I(inode)->root = args->root;
2525 return 0;
2526 }
2527
2528 static int btrfs_find_actor(struct inode *inode, void *opaque)
2529 {
2530 struct btrfs_iget_args *args = opaque;
2531 return (args->ino == inode->i_ino &&
2532 args->root == BTRFS_I(inode)->root);
2533 }
2534
2535 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
2536 struct btrfs_root *root, int wait)
2537 {
2538 struct inode *inode;
2539 struct btrfs_iget_args args;
2540 args.ino = objectid;
2541 args.root = root;
2542
2543 if (wait) {
2544 inode = ilookup5(s, objectid, btrfs_find_actor,
2545 (void *)&args);
2546 } else {
2547 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
2548 (void *)&args);
2549 }
2550 return inode;
2551 }
2552
2553 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
2554 struct btrfs_root *root)
2555 {
2556 struct inode *inode;
2557 struct btrfs_iget_args args;
2558 args.ino = objectid;
2559 args.root = root;
2560
2561 inode = iget5_locked(s, objectid, btrfs_find_actor,
2562 btrfs_init_locked_inode,
2563 (void *)&args);
2564 return inode;
2565 }
2566
2567 /* Get an inode object given its location and corresponding root.
2568 * Returns in *is_new if the inode was read from disk
2569 */
2570 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2571 struct btrfs_root *root, int *is_new)
2572 {
2573 struct inode *inode;
2574
2575 inode = btrfs_iget_locked(s, location->objectid, root);
2576 if (!inode)
2577 return ERR_PTR(-EACCES);
2578
2579 if (inode->i_state & I_NEW) {
2580 BTRFS_I(inode)->root = root;
2581 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
2582 btrfs_read_locked_inode(inode);
2583 unlock_new_inode(inode);
2584 if (is_new)
2585 *is_new = 1;
2586 } else {
2587 if (is_new)
2588 *is_new = 0;
2589 }
2590
2591 return inode;
2592 }
2593
2594 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
2595 struct nameidata *nd)
2596 {
2597 struct inode * inode;
2598 struct btrfs_inode *bi = BTRFS_I(dir);
2599 struct btrfs_root *root = bi->root;
2600 struct btrfs_root *sub_root = root;
2601 struct btrfs_key location;
2602 int ret, new, do_orphan = 0;
2603
2604 if (dentry->d_name.len > BTRFS_NAME_LEN)
2605 return ERR_PTR(-ENAMETOOLONG);
2606
2607 ret = btrfs_inode_by_name(dir, dentry, &location);
2608
2609 if (ret < 0)
2610 return ERR_PTR(ret);
2611
2612 inode = NULL;
2613 if (location.objectid) {
2614 ret = fixup_tree_root_location(root, &location, &sub_root,
2615 dentry);
2616 if (ret < 0)
2617 return ERR_PTR(ret);
2618 if (ret > 0)
2619 return ERR_PTR(-ENOENT);
2620 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
2621 if (IS_ERR(inode))
2622 return ERR_CAST(inode);
2623
2624 /* the inode and parent dir are two different roots */
2625 if (new && root != sub_root) {
2626 igrab(inode);
2627 sub_root->inode = inode;
2628 do_orphan = 1;
2629 }
2630 }
2631
2632 if (unlikely(do_orphan))
2633 btrfs_orphan_cleanup(sub_root);
2634
2635 return d_splice_alias(inode, dentry);
2636 }
2637
2638 static unsigned char btrfs_filetype_table[] = {
2639 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
2640 };
2641
2642 static int btrfs_real_readdir(struct file *filp, void *dirent,
2643 filldir_t filldir)
2644 {
2645 struct inode *inode = filp->f_dentry->d_inode;
2646 struct btrfs_root *root = BTRFS_I(inode)->root;
2647 struct btrfs_item *item;
2648 struct btrfs_dir_item *di;
2649 struct btrfs_key key;
2650 struct btrfs_key found_key;
2651 struct btrfs_path *path;
2652 int ret;
2653 u32 nritems;
2654 struct extent_buffer *leaf;
2655 int slot;
2656 int advance;
2657 unsigned char d_type;
2658 int over = 0;
2659 u32 di_cur;
2660 u32 di_total;
2661 u32 di_len;
2662 int key_type = BTRFS_DIR_INDEX_KEY;
2663 char tmp_name[32];
2664 char *name_ptr;
2665 int name_len;
2666
2667 /* FIXME, use a real flag for deciding about the key type */
2668 if (root->fs_info->tree_root == root)
2669 key_type = BTRFS_DIR_ITEM_KEY;
2670
2671 /* special case for "." */
2672 if (filp->f_pos == 0) {
2673 over = filldir(dirent, ".", 1,
2674 1, inode->i_ino,
2675 DT_DIR);
2676 if (over)
2677 return 0;
2678 filp->f_pos = 1;
2679 }
2680 /* special case for .., just use the back ref */
2681 if (filp->f_pos == 1) {
2682 u64 pino = parent_ino(filp->f_path.dentry);
2683 over = filldir(dirent, "..", 2,
2684 2, pino, DT_DIR);
2685 if (over)
2686 return 0;
2687 filp->f_pos = 2;
2688 }
2689
2690 path = btrfs_alloc_path();
2691 path->reada = 2;
2692
2693 btrfs_set_key_type(&key, key_type);
2694 key.offset = filp->f_pos;
2695 key.objectid = inode->i_ino;
2696
2697 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2698 if (ret < 0)
2699 goto err;
2700 advance = 0;
2701
2702 while (1) {
2703 leaf = path->nodes[0];
2704 nritems = btrfs_header_nritems(leaf);
2705 slot = path->slots[0];
2706 if (advance || slot >= nritems) {
2707 if (slot >= nritems - 1) {
2708 ret = btrfs_next_leaf(root, path);
2709 if (ret)
2710 break;
2711 leaf = path->nodes[0];
2712 nritems = btrfs_header_nritems(leaf);
2713 slot = path->slots[0];
2714 } else {
2715 slot++;
2716 path->slots[0]++;
2717 }
2718 }
2719 advance = 1;
2720 item = btrfs_item_nr(leaf, slot);
2721 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2722
2723 if (found_key.objectid != key.objectid)
2724 break;
2725 if (btrfs_key_type(&found_key) != key_type)
2726 break;
2727 if (found_key.offset < filp->f_pos)
2728 continue;
2729
2730 filp->f_pos = found_key.offset;
2731
2732 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
2733 di_cur = 0;
2734 di_total = btrfs_item_size(leaf, item);
2735
2736 while (di_cur < di_total) {
2737 struct btrfs_key location;
2738
2739 name_len = btrfs_dir_name_len(leaf, di);
2740 if (name_len <= sizeof(tmp_name)) {
2741 name_ptr = tmp_name;
2742 } else {
2743 name_ptr = kmalloc(name_len, GFP_NOFS);
2744 if (!name_ptr) {
2745 ret = -ENOMEM;
2746 goto err;
2747 }
2748 }
2749 read_extent_buffer(leaf, name_ptr,
2750 (unsigned long)(di + 1), name_len);
2751
2752 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
2753 btrfs_dir_item_key_to_cpu(leaf, di, &location);
2754 over = filldir(dirent, name_ptr, name_len,
2755 found_key.offset, location.objectid,
2756 d_type);
2757
2758 if (name_ptr != tmp_name)
2759 kfree(name_ptr);
2760
2761 if (over)
2762 goto nopos;
2763
2764 di_len = btrfs_dir_name_len(leaf, di) +
2765 btrfs_dir_data_len(leaf, di) + sizeof(*di);
2766 di_cur += di_len;
2767 di = (struct btrfs_dir_item *)((char *)di + di_len);
2768 }
2769 }
2770
2771 /* Reached end of directory/root. Bump pos past the last item. */
2772 if (key_type == BTRFS_DIR_INDEX_KEY)
2773 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
2774 else
2775 filp->f_pos++;
2776 nopos:
2777 ret = 0;
2778 err:
2779 btrfs_free_path(path);
2780 return ret;
2781 }
2782
2783 int btrfs_write_inode(struct inode *inode, int wait)
2784 {
2785 struct btrfs_root *root = BTRFS_I(inode)->root;
2786 struct btrfs_trans_handle *trans;
2787 int ret = 0;
2788
2789 if (root->fs_info->closing > 1)
2790 return 0;
2791
2792 if (wait) {
2793 trans = btrfs_join_transaction(root, 1);
2794 btrfs_set_trans_block_group(trans, inode);
2795 ret = btrfs_commit_transaction(trans, root);
2796 }
2797 return ret;
2798 }
2799
2800 /*
2801 * This is somewhat expensive, updating the tree every time the
2802 * inode changes. But, it is most likely to find the inode in cache.
2803 * FIXME, needs more benchmarking...there are no reasons other than performance
2804 * to keep or drop this code.
2805 */
2806 void btrfs_dirty_inode(struct inode *inode)
2807 {
2808 struct btrfs_root *root = BTRFS_I(inode)->root;
2809 struct btrfs_trans_handle *trans;
2810
2811 trans = btrfs_join_transaction(root, 1);
2812 btrfs_set_trans_block_group(trans, inode);
2813 btrfs_update_inode(trans, root, inode);
2814 btrfs_end_transaction(trans, root);
2815 }
2816
2817 /*
2818 * find the highest existing sequence number in a directory
2819 * and then set the in-memory index_cnt variable to reflect
2820 * free sequence numbers
2821 */
2822 static int btrfs_set_inode_index_count(struct inode *inode)
2823 {
2824 struct btrfs_root *root = BTRFS_I(inode)->root;
2825 struct btrfs_key key, found_key;
2826 struct btrfs_path *path;
2827 struct extent_buffer *leaf;
2828 int ret;
2829
2830 key.objectid = inode->i_ino;
2831 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
2832 key.offset = (u64)-1;
2833
2834 path = btrfs_alloc_path();
2835 if (!path)
2836 return -ENOMEM;
2837
2838 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2839 if (ret < 0)
2840 goto out;
2841 /* FIXME: we should be able to handle this */
2842 if (ret == 0)
2843 goto out;
2844 ret = 0;
2845
2846 /*
2847 * MAGIC NUMBER EXPLANATION:
2848 * since we search a directory based on f_pos we have to start at 2
2849 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
2850 * else has to start at 2
2851 */
2852 if (path->slots[0] == 0) {
2853 BTRFS_I(inode)->index_cnt = 2;
2854 goto out;
2855 }
2856
2857 path->slots[0]--;
2858
2859 leaf = path->nodes[0];
2860 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2861
2862 if (found_key.objectid != inode->i_ino ||
2863 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
2864 BTRFS_I(inode)->index_cnt = 2;
2865 goto out;
2866 }
2867
2868 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
2869 out:
2870 btrfs_free_path(path);
2871 return ret;
2872 }
2873
2874 /*
2875 * helper to find a free sequence number in a given directory. This current
2876 * code is very simple, later versions will do smarter things in the btree
2877 */
2878 static int btrfs_set_inode_index(struct inode *dir, struct inode *inode,
2879 u64 *index)
2880 {
2881 int ret = 0;
2882
2883 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
2884 ret = btrfs_set_inode_index_count(dir);
2885 if (ret) {
2886 return ret;
2887 }
2888 }
2889
2890 *index = BTRFS_I(dir)->index_cnt;
2891 BTRFS_I(dir)->index_cnt++;
2892
2893 return ret;
2894 }
2895
2896 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
2897 struct btrfs_root *root,
2898 struct inode *dir,
2899 const char *name, int name_len,
2900 u64 ref_objectid,
2901 u64 objectid,
2902 struct btrfs_block_group_cache *group,
2903 int mode, u64 *index)
2904 {
2905 struct inode *inode;
2906 struct btrfs_inode_item *inode_item;
2907 struct btrfs_block_group_cache *new_inode_group;
2908 struct btrfs_key *location;
2909 struct btrfs_path *path;
2910 struct btrfs_inode_ref *ref;
2911 struct btrfs_key key[2];
2912 u32 sizes[2];
2913 unsigned long ptr;
2914 int ret;
2915 int owner;
2916
2917 path = btrfs_alloc_path();
2918 BUG_ON(!path);
2919
2920 inode = new_inode(root->fs_info->sb);
2921 if (!inode)
2922 return ERR_PTR(-ENOMEM);
2923
2924 if (dir) {
2925 ret = btrfs_set_inode_index(dir, inode, index);
2926 if (ret)
2927 return ERR_PTR(ret);
2928 }
2929 /*
2930 * index_cnt is ignored for everything but a dir,
2931 * btrfs_get_inode_index_count has an explanation for the magic
2932 * number
2933 */
2934 init_btrfs_i(inode);
2935 BTRFS_I(inode)->index_cnt = 2;
2936 BTRFS_I(inode)->root = root;
2937 BTRFS_I(inode)->generation = trans->transid;
2938
2939 if (mode & S_IFDIR)
2940 owner = 0;
2941 else
2942 owner = 1;
2943 new_inode_group = btrfs_find_block_group(root, group, 0,
2944 BTRFS_BLOCK_GROUP_METADATA, owner);
2945 if (!new_inode_group) {
2946 printk("find_block group failed\n");
2947 new_inode_group = group;
2948 }
2949 BTRFS_I(inode)->block_group = new_inode_group;
2950
2951 key[0].objectid = objectid;
2952 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
2953 key[0].offset = 0;
2954
2955 key[1].objectid = objectid;
2956 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
2957 key[1].offset = ref_objectid;
2958
2959 sizes[0] = sizeof(struct btrfs_inode_item);
2960 sizes[1] = name_len + sizeof(*ref);
2961
2962 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
2963 if (ret != 0)
2964 goto fail;
2965
2966 if (objectid > root->highest_inode)
2967 root->highest_inode = objectid;
2968
2969 inode->i_uid = current->fsuid;
2970 inode->i_gid = current->fsgid;
2971 inode->i_mode = mode;
2972 inode->i_ino = objectid;
2973 inode_set_bytes(inode, 0);
2974 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
2975 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2976 struct btrfs_inode_item);
2977 fill_inode_item(trans, path->nodes[0], inode_item, inode);
2978
2979 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2980 struct btrfs_inode_ref);
2981 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
2982 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
2983 ptr = (unsigned long)(ref + 1);
2984 write_extent_buffer(path->nodes[0], name, ptr, name_len);
2985
2986 btrfs_mark_buffer_dirty(path->nodes[0]);
2987 btrfs_free_path(path);
2988
2989 location = &BTRFS_I(inode)->location;
2990 location->objectid = objectid;
2991 location->offset = 0;
2992 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2993
2994 insert_inode_hash(inode);
2995 return inode;
2996 fail:
2997 if (dir)
2998 BTRFS_I(dir)->index_cnt--;
2999 btrfs_free_path(path);
3000 return ERR_PTR(ret);
3001 }
3002
3003 static inline u8 btrfs_inode_type(struct inode *inode)
3004 {
3005 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3006 }
3007
3008 /*
3009 * utility function to add 'inode' into 'parent_inode' with
3010 * a give name and a given sequence number.
3011 * if 'add_backref' is true, also insert a backref from the
3012 * inode to the parent directory.
3013 */
3014 int btrfs_add_link(struct btrfs_trans_handle *trans,
3015 struct inode *parent_inode, struct inode *inode,
3016 const char *name, int name_len, int add_backref, u64 index)
3017 {
3018 int ret;
3019 struct btrfs_key key;
3020 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3021
3022 key.objectid = inode->i_ino;
3023 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3024 key.offset = 0;
3025
3026 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3027 parent_inode->i_ino,
3028 &key, btrfs_inode_type(inode),
3029 index);
3030 if (ret == 0) {
3031 if (add_backref) {
3032 ret = btrfs_insert_inode_ref(trans, root,
3033 name, name_len,
3034 inode->i_ino,
3035 parent_inode->i_ino,
3036 index);
3037 }
3038 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3039 name_len * 2);
3040 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3041 ret = btrfs_update_inode(trans, root, parent_inode);
3042 }
3043 return ret;
3044 }
3045
3046 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3047 struct dentry *dentry, struct inode *inode,
3048 int backref, u64 index)
3049 {
3050 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3051 inode, dentry->d_name.name,
3052 dentry->d_name.len, backref, index);
3053 if (!err) {
3054 d_instantiate(dentry, inode);
3055 return 0;
3056 }
3057 if (err > 0)
3058 err = -EEXIST;
3059 return err;
3060 }
3061
3062 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3063 int mode, dev_t rdev)
3064 {
3065 struct btrfs_trans_handle *trans;
3066 struct btrfs_root *root = BTRFS_I(dir)->root;
3067 struct inode *inode = NULL;
3068 int err;
3069 int drop_inode = 0;
3070 u64 objectid;
3071 unsigned long nr = 0;
3072 u64 index = 0;
3073
3074 if (!new_valid_dev(rdev))
3075 return -EINVAL;
3076
3077 err = btrfs_check_free_space(root, 1, 0);
3078 if (err)
3079 goto fail;
3080
3081 trans = btrfs_start_transaction(root, 1);
3082 btrfs_set_trans_block_group(trans, dir);
3083
3084 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3085 if (err) {
3086 err = -ENOSPC;
3087 goto out_unlock;
3088 }
3089
3090 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3091 dentry->d_name.len,
3092 dentry->d_parent->d_inode->i_ino, objectid,
3093 BTRFS_I(dir)->block_group, mode, &index);
3094 err = PTR_ERR(inode);
3095 if (IS_ERR(inode))
3096 goto out_unlock;
3097
3098 err = btrfs_init_acl(inode, dir);
3099 if (err) {
3100 drop_inode = 1;
3101 goto out_unlock;
3102 }
3103
3104 btrfs_set_trans_block_group(trans, inode);
3105 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3106 if (err)
3107 drop_inode = 1;
3108 else {
3109 inode->i_op = &btrfs_special_inode_operations;
3110 init_special_inode(inode, inode->i_mode, rdev);
3111 btrfs_update_inode(trans, root, inode);
3112 }
3113 dir->i_sb->s_dirt = 1;
3114 btrfs_update_inode_block_group(trans, inode);
3115 btrfs_update_inode_block_group(trans, dir);
3116 out_unlock:
3117 nr = trans->blocks_used;
3118 btrfs_end_transaction_throttle(trans, root);
3119 fail:
3120 if (drop_inode) {
3121 inode_dec_link_count(inode);
3122 iput(inode);
3123 }
3124 btrfs_btree_balance_dirty(root, nr);
3125 return err;
3126 }
3127
3128 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3129 int mode, struct nameidata *nd)
3130 {
3131 struct btrfs_trans_handle *trans;
3132 struct btrfs_root *root = BTRFS_I(dir)->root;
3133 struct inode *inode = NULL;
3134 int err;
3135 int drop_inode = 0;
3136 unsigned long nr = 0;
3137 u64 objectid;
3138 u64 index = 0;
3139
3140 err = btrfs_check_free_space(root, 1, 0);
3141 if (err)
3142 goto fail;
3143 trans = btrfs_start_transaction(root, 1);
3144 btrfs_set_trans_block_group(trans, dir);
3145
3146 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3147 if (err) {
3148 err = -ENOSPC;
3149 goto out_unlock;
3150 }
3151
3152 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3153 dentry->d_name.len,
3154 dentry->d_parent->d_inode->i_ino,
3155 objectid, BTRFS_I(dir)->block_group, mode,
3156 &index);
3157 err = PTR_ERR(inode);
3158 if (IS_ERR(inode))
3159 goto out_unlock;
3160
3161 err = btrfs_init_acl(inode, dir);
3162 if (err) {
3163 drop_inode = 1;
3164 goto out_unlock;
3165 }
3166
3167 btrfs_set_trans_block_group(trans, inode);
3168 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3169 if (err)
3170 drop_inode = 1;
3171 else {
3172 inode->i_mapping->a_ops = &btrfs_aops;
3173 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3174 inode->i_fop = &btrfs_file_operations;
3175 inode->i_op = &btrfs_file_inode_operations;
3176 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3177 }
3178 dir->i_sb->s_dirt = 1;
3179 btrfs_update_inode_block_group(trans, inode);
3180 btrfs_update_inode_block_group(trans, dir);
3181 out_unlock:
3182 nr = trans->blocks_used;
3183 btrfs_end_transaction_throttle(trans, root);
3184 fail:
3185 if (drop_inode) {
3186 inode_dec_link_count(inode);
3187 iput(inode);
3188 }
3189 btrfs_btree_balance_dirty(root, nr);
3190 return err;
3191 }
3192
3193 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3194 struct dentry *dentry)
3195 {
3196 struct btrfs_trans_handle *trans;
3197 struct btrfs_root *root = BTRFS_I(dir)->root;
3198 struct inode *inode = old_dentry->d_inode;
3199 u64 index;
3200 unsigned long nr = 0;
3201 int err;
3202 int drop_inode = 0;
3203
3204 if (inode->i_nlink == 0)
3205 return -ENOENT;
3206
3207 btrfs_inc_nlink(inode);
3208 err = btrfs_check_free_space(root, 1, 0);
3209 if (err)
3210 goto fail;
3211 err = btrfs_set_inode_index(dir, inode, &index);
3212 if (err)
3213 goto fail;
3214
3215 trans = btrfs_start_transaction(root, 1);
3216
3217 btrfs_set_trans_block_group(trans, dir);
3218 atomic_inc(&inode->i_count);
3219
3220 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3221
3222 if (err)
3223 drop_inode = 1;
3224
3225 dir->i_sb->s_dirt = 1;
3226 btrfs_update_inode_block_group(trans, dir);
3227 err = btrfs_update_inode(trans, root, inode);
3228
3229 if (err)
3230 drop_inode = 1;
3231
3232 nr = trans->blocks_used;
3233 btrfs_end_transaction_throttle(trans, root);
3234 fail:
3235 if (drop_inode) {
3236 inode_dec_link_count(inode);
3237 iput(inode);
3238 }
3239 btrfs_btree_balance_dirty(root, nr);
3240 return err;
3241 }
3242
3243 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3244 {
3245 struct inode *inode = NULL;
3246 struct btrfs_trans_handle *trans;
3247 struct btrfs_root *root = BTRFS_I(dir)->root;
3248 int err = 0;
3249 int drop_on_err = 0;
3250 u64 objectid = 0;
3251 u64 index = 0;
3252 unsigned long nr = 1;
3253
3254 err = btrfs_check_free_space(root, 1, 0);
3255 if (err)
3256 goto out_unlock;
3257
3258 trans = btrfs_start_transaction(root, 1);
3259 btrfs_set_trans_block_group(trans, dir);
3260
3261 if (IS_ERR(trans)) {
3262 err = PTR_ERR(trans);
3263 goto out_unlock;
3264 }
3265
3266 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3267 if (err) {
3268 err = -ENOSPC;
3269 goto out_unlock;
3270 }
3271
3272 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3273 dentry->d_name.len,
3274 dentry->d_parent->d_inode->i_ino, objectid,
3275 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3276 &index);
3277 if (IS_ERR(inode)) {
3278 err = PTR_ERR(inode);
3279 goto out_fail;
3280 }
3281
3282 drop_on_err = 1;
3283
3284 err = btrfs_init_acl(inode, dir);
3285 if (err)
3286 goto out_fail;
3287
3288 inode->i_op = &btrfs_dir_inode_operations;
3289 inode->i_fop = &btrfs_dir_file_operations;
3290 btrfs_set_trans_block_group(trans, inode);
3291
3292 btrfs_i_size_write(inode, 0);
3293 err = btrfs_update_inode(trans, root, inode);
3294 if (err)
3295 goto out_fail;
3296
3297 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3298 inode, dentry->d_name.name,
3299 dentry->d_name.len, 0, index);
3300 if (err)
3301 goto out_fail;
3302
3303 d_instantiate(dentry, inode);
3304 drop_on_err = 0;
3305 dir->i_sb->s_dirt = 1;
3306 btrfs_update_inode_block_group(trans, inode);
3307 btrfs_update_inode_block_group(trans, dir);
3308
3309 out_fail:
3310 nr = trans->blocks_used;
3311 btrfs_end_transaction_throttle(trans, root);
3312
3313 out_unlock:
3314 if (drop_on_err)
3315 iput(inode);
3316 btrfs_btree_balance_dirty(root, nr);
3317 return err;
3318 }
3319
3320 /* helper for btfs_get_extent. Given an existing extent in the tree,
3321 * and an extent that you want to insert, deal with overlap and insert
3322 * the new extent into the tree.
3323 */
3324 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3325 struct extent_map *existing,
3326 struct extent_map *em,
3327 u64 map_start, u64 map_len)
3328 {
3329 u64 start_diff;
3330
3331 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3332 start_diff = map_start - em->start;
3333 em->start = map_start;
3334 em->len = map_len;
3335 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3336 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3337 em->block_start += start_diff;
3338 em->block_len -= start_diff;
3339 }
3340 return add_extent_mapping(em_tree, em);
3341 }
3342
3343 static noinline int uncompress_inline(struct btrfs_path *path,
3344 struct inode *inode, struct page *page,
3345 size_t pg_offset, u64 extent_offset,
3346 struct btrfs_file_extent_item *item)
3347 {
3348 int ret;
3349 struct extent_buffer *leaf = path->nodes[0];
3350 char *tmp;
3351 size_t max_size;
3352 unsigned long inline_size;
3353 unsigned long ptr;
3354
3355 WARN_ON(pg_offset != 0);
3356 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3357 inline_size = btrfs_file_extent_inline_item_len(leaf,
3358 btrfs_item_nr(leaf, path->slots[0]));
3359 tmp = kmalloc(inline_size, GFP_NOFS);
3360 ptr = btrfs_file_extent_inline_start(item);
3361
3362 read_extent_buffer(leaf, tmp, ptr, inline_size);
3363
3364 max_size = min(PAGE_CACHE_SIZE, max_size);
3365 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3366 inline_size, max_size);
3367 if (ret) {
3368 char *kaddr = kmap_atomic(page, KM_USER0);
3369 unsigned long copy_size = min_t(u64,
3370 PAGE_CACHE_SIZE - pg_offset,
3371 max_size - extent_offset);
3372 memset(kaddr + pg_offset, 0, copy_size);
3373 kunmap_atomic(kaddr, KM_USER0);
3374 }
3375 kfree(tmp);
3376 return 0;
3377 }
3378
3379 /*
3380 * a bit scary, this does extent mapping from logical file offset to the disk.
3381 * the ugly parts come from merging extents from the disk with the
3382 * in-ram representation. This gets more complex because of the data=ordered code,
3383 * where the in-ram extents might be locked pending data=ordered completion.
3384 *
3385 * This also copies inline extents directly into the page.
3386 */
3387 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3388 size_t pg_offset, u64 start, u64 len,
3389 int create)
3390 {
3391 int ret;
3392 int err = 0;
3393 u64 bytenr;
3394 u64 extent_start = 0;
3395 u64 extent_end = 0;
3396 u64 objectid = inode->i_ino;
3397 u32 found_type;
3398 struct btrfs_path *path = NULL;
3399 struct btrfs_root *root = BTRFS_I(inode)->root;
3400 struct btrfs_file_extent_item *item;
3401 struct extent_buffer *leaf;
3402 struct btrfs_key found_key;
3403 struct extent_map *em = NULL;
3404 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3405 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3406 struct btrfs_trans_handle *trans = NULL;
3407 int compressed;
3408
3409 again:
3410 spin_lock(&em_tree->lock);
3411 em = lookup_extent_mapping(em_tree, start, len);
3412 if (em)
3413 em->bdev = root->fs_info->fs_devices->latest_bdev;
3414 spin_unlock(&em_tree->lock);
3415
3416 if (em) {
3417 if (em->start > start || em->start + em->len <= start)
3418 free_extent_map(em);
3419 else if (em->block_start == EXTENT_MAP_INLINE && page)
3420 free_extent_map(em);
3421 else
3422 goto out;
3423 }
3424 em = alloc_extent_map(GFP_NOFS);
3425 if (!em) {
3426 err = -ENOMEM;
3427 goto out;
3428 }
3429 em->bdev = root->fs_info->fs_devices->latest_bdev;
3430 em->start = EXTENT_MAP_HOLE;
3431 em->len = (u64)-1;
3432 em->block_len = (u64)-1;
3433
3434 if (!path) {
3435 path = btrfs_alloc_path();
3436 BUG_ON(!path);
3437 }
3438
3439 ret = btrfs_lookup_file_extent(trans, root, path,
3440 objectid, start, trans != NULL);
3441 if (ret < 0) {
3442 err = ret;
3443 goto out;
3444 }
3445
3446 if (ret != 0) {
3447 if (path->slots[0] == 0)
3448 goto not_found;
3449 path->slots[0]--;
3450 }
3451
3452 leaf = path->nodes[0];
3453 item = btrfs_item_ptr(leaf, path->slots[0],
3454 struct btrfs_file_extent_item);
3455 /* are we inside the extent that was found? */
3456 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3457 found_type = btrfs_key_type(&found_key);
3458 if (found_key.objectid != objectid ||
3459 found_type != BTRFS_EXTENT_DATA_KEY) {
3460 goto not_found;
3461 }
3462
3463 found_type = btrfs_file_extent_type(leaf, item);
3464 extent_start = found_key.offset;
3465 compressed = btrfs_file_extent_compression(leaf, item);
3466 if (found_type == BTRFS_FILE_EXTENT_REG) {
3467 extent_end = extent_start +
3468 btrfs_file_extent_num_bytes(leaf, item);
3469 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3470 size_t size;
3471 size = btrfs_file_extent_inline_len(leaf, item);
3472 extent_end = (extent_start + size + root->sectorsize - 1) &
3473 ~((u64)root->sectorsize - 1);
3474 }
3475
3476 if (start >= extent_end) {
3477 path->slots[0]++;
3478 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3479 ret = btrfs_next_leaf(root, path);
3480 if (ret < 0) {
3481 err = ret;
3482 goto out;
3483 }
3484 if (ret > 0)
3485 goto not_found;
3486 leaf = path->nodes[0];
3487 }
3488 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3489 if (found_key.objectid != objectid ||
3490 found_key.type != BTRFS_EXTENT_DATA_KEY)
3491 goto not_found;
3492 if (start + len <= found_key.offset)
3493 goto not_found;
3494 em->start = start;
3495 em->len = found_key.offset - start;
3496 goto not_found_em;
3497 }
3498
3499 if (found_type == BTRFS_FILE_EXTENT_REG) {
3500 em->start = extent_start;
3501 em->len = extent_end - extent_start;
3502 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
3503 if (bytenr == 0) {
3504 em->block_start = EXTENT_MAP_HOLE;
3505 goto insert;
3506 }
3507 if (compressed) {
3508 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3509 em->block_start = bytenr;
3510 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
3511 item);
3512 } else {
3513 bytenr += btrfs_file_extent_offset(leaf, item);
3514 em->block_start = bytenr;
3515 em->block_len = em->len;
3516 }
3517 goto insert;
3518 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3519 unsigned long ptr;
3520 char *map;
3521 size_t size;
3522 size_t extent_offset;
3523 size_t copy_size;
3524
3525 em->block_start = EXTENT_MAP_INLINE;
3526 if (!page || create) {
3527 em->start = extent_start;
3528 em->len = extent_end - extent_start;
3529 goto out;
3530 }
3531
3532 size = btrfs_file_extent_inline_len(leaf, item);
3533 extent_offset = page_offset(page) + pg_offset - extent_start;
3534 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3535 size - extent_offset);
3536 em->start = extent_start + extent_offset;
3537 em->len = (copy_size + root->sectorsize - 1) &
3538 ~((u64)root->sectorsize - 1);
3539 if (compressed)
3540 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3541 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
3542 if (create == 0 && !PageUptodate(page)) {
3543 if (btrfs_file_extent_compression(leaf, item) ==
3544 BTRFS_COMPRESS_ZLIB) {
3545 ret = uncompress_inline(path, inode, page,
3546 pg_offset,
3547 extent_offset, item);
3548 BUG_ON(ret);
3549 } else {
3550 map = kmap(page);
3551 read_extent_buffer(leaf, map + pg_offset, ptr,
3552 copy_size);
3553 kunmap(page);
3554 }
3555 flush_dcache_page(page);
3556 } else if (create && PageUptodate(page)) {
3557 if (!trans) {
3558 kunmap(page);
3559 free_extent_map(em);
3560 em = NULL;
3561 btrfs_release_path(root, path);
3562 trans = btrfs_join_transaction(root, 1);
3563 goto again;
3564 }
3565 map = kmap(page);
3566 write_extent_buffer(leaf, map + pg_offset, ptr,
3567 copy_size);
3568 kunmap(page);
3569 btrfs_mark_buffer_dirty(leaf);
3570 }
3571 set_extent_uptodate(io_tree, em->start,
3572 extent_map_end(em) - 1, GFP_NOFS);
3573 goto insert;
3574 } else {
3575 printk("unkknown found_type %d\n", found_type);
3576 WARN_ON(1);
3577 }
3578 not_found:
3579 em->start = start;
3580 em->len = len;
3581 not_found_em:
3582 em->block_start = EXTENT_MAP_HOLE;
3583 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
3584 insert:
3585 btrfs_release_path(root, path);
3586 if (em->start > start || extent_map_end(em) <= start) {
3587 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
3588 err = -EIO;
3589 goto out;
3590 }
3591
3592 err = 0;
3593 spin_lock(&em_tree->lock);
3594 ret = add_extent_mapping(em_tree, em);
3595 /* it is possible that someone inserted the extent into the tree
3596 * while we had the lock dropped. It is also possible that
3597 * an overlapping map exists in the tree
3598 */
3599 if (ret == -EEXIST) {
3600 struct extent_map *existing;
3601
3602 ret = 0;
3603
3604 existing = lookup_extent_mapping(em_tree, start, len);
3605 if (existing && (existing->start > start ||
3606 existing->start + existing->len <= start)) {
3607 free_extent_map(existing);
3608 existing = NULL;
3609 }
3610 if (!existing) {
3611 existing = lookup_extent_mapping(em_tree, em->start,
3612 em->len);
3613 if (existing) {
3614 err = merge_extent_mapping(em_tree, existing,
3615 em, start,
3616 root->sectorsize);
3617 free_extent_map(existing);
3618 if (err) {
3619 free_extent_map(em);
3620 em = NULL;
3621 }
3622 } else {
3623 err = -EIO;
3624 printk("failing to insert %Lu %Lu\n",
3625 start, len);
3626 free_extent_map(em);
3627 em = NULL;
3628 }
3629 } else {
3630 free_extent_map(em);
3631 em = existing;
3632 err = 0;
3633 }
3634 }
3635 spin_unlock(&em_tree->lock);
3636 out:
3637 if (path)
3638 btrfs_free_path(path);
3639 if (trans) {
3640 ret = btrfs_end_transaction(trans, root);
3641 if (!err) {
3642 err = ret;
3643 }
3644 }
3645 if (err) {
3646 free_extent_map(em);
3647 WARN_ON(1);
3648 return ERR_PTR(err);
3649 }
3650 return em;
3651 }
3652
3653 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
3654 const struct iovec *iov, loff_t offset,
3655 unsigned long nr_segs)
3656 {
3657 return -EINVAL;
3658 }
3659
3660 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
3661 {
3662 return extent_bmap(mapping, iblock, btrfs_get_extent);
3663 }
3664
3665 int btrfs_readpage(struct file *file, struct page *page)
3666 {
3667 struct extent_io_tree *tree;
3668 tree = &BTRFS_I(page->mapping->host)->io_tree;
3669 return extent_read_full_page(tree, page, btrfs_get_extent);
3670 }
3671
3672 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
3673 {
3674 struct extent_io_tree *tree;
3675
3676
3677 if (current->flags & PF_MEMALLOC) {
3678 redirty_page_for_writepage(wbc, page);
3679 unlock_page(page);
3680 return 0;
3681 }
3682 tree = &BTRFS_I(page->mapping->host)->io_tree;
3683 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
3684 }
3685
3686 int btrfs_writepages(struct address_space *mapping,
3687 struct writeback_control *wbc)
3688 {
3689 struct extent_io_tree *tree;
3690 tree = &BTRFS_I(mapping->host)->io_tree;
3691 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
3692 }
3693
3694 static int
3695 btrfs_readpages(struct file *file, struct address_space *mapping,
3696 struct list_head *pages, unsigned nr_pages)
3697 {
3698 struct extent_io_tree *tree;
3699 tree = &BTRFS_I(mapping->host)->io_tree;
3700 return extent_readpages(tree, mapping, pages, nr_pages,
3701 btrfs_get_extent);
3702 }
3703 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3704 {
3705 struct extent_io_tree *tree;
3706 struct extent_map_tree *map;
3707 int ret;
3708
3709 tree = &BTRFS_I(page->mapping->host)->io_tree;
3710 map = &BTRFS_I(page->mapping->host)->extent_tree;
3711 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
3712 if (ret == 1) {
3713 ClearPagePrivate(page);
3714 set_page_private(page, 0);
3715 page_cache_release(page);
3716 }
3717 return ret;
3718 }
3719
3720 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3721 {
3722 if (PageWriteback(page) || PageDirty(page))
3723 return 0;
3724 return __btrfs_releasepage(page, gfp_flags);
3725 }
3726
3727 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
3728 {
3729 struct extent_io_tree *tree;
3730 struct btrfs_ordered_extent *ordered;
3731 u64 page_start = page_offset(page);
3732 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
3733
3734 wait_on_page_writeback(page);
3735 tree = &BTRFS_I(page->mapping->host)->io_tree;
3736 if (offset) {
3737 btrfs_releasepage(page, GFP_NOFS);
3738 return;
3739 }
3740
3741 lock_extent(tree, page_start, page_end, GFP_NOFS);
3742 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
3743 page_offset(page));
3744 if (ordered) {
3745 /*
3746 * IO on this page will never be started, so we need
3747 * to account for any ordered extents now
3748 */
3749 clear_extent_bit(tree, page_start, page_end,
3750 EXTENT_DIRTY | EXTENT_DELALLOC |
3751 EXTENT_LOCKED, 1, 0, GFP_NOFS);
3752 btrfs_finish_ordered_io(page->mapping->host,
3753 page_start, page_end);
3754 btrfs_put_ordered_extent(ordered);
3755 lock_extent(tree, page_start, page_end, GFP_NOFS);
3756 }
3757 clear_extent_bit(tree, page_start, page_end,
3758 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3759 EXTENT_ORDERED,
3760 1, 1, GFP_NOFS);
3761 __btrfs_releasepage(page, GFP_NOFS);
3762
3763 ClearPageChecked(page);
3764 if (PagePrivate(page)) {
3765 ClearPagePrivate(page);
3766 set_page_private(page, 0);
3767 page_cache_release(page);
3768 }
3769 }
3770
3771 /*
3772 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
3773 * called from a page fault handler when a page is first dirtied. Hence we must
3774 * be careful to check for EOF conditions here. We set the page up correctly
3775 * for a written page which means we get ENOSPC checking when writing into
3776 * holes and correct delalloc and unwritten extent mapping on filesystems that
3777 * support these features.
3778 *
3779 * We are not allowed to take the i_mutex here so we have to play games to
3780 * protect against truncate races as the page could now be beyond EOF. Because
3781 * vmtruncate() writes the inode size before removing pages, once we have the
3782 * page lock we can determine safely if the page is beyond EOF. If it is not
3783 * beyond EOF, then the page is guaranteed safe against truncation until we
3784 * unlock the page.
3785 */
3786 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
3787 {
3788 struct inode *inode = fdentry(vma->vm_file)->d_inode;
3789 struct btrfs_root *root = BTRFS_I(inode)->root;
3790 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3791 struct btrfs_ordered_extent *ordered;
3792 char *kaddr;
3793 unsigned long zero_start;
3794 loff_t size;
3795 int ret;
3796 u64 page_start;
3797 u64 page_end;
3798
3799 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
3800 if (ret)
3801 goto out;
3802
3803 ret = -EINVAL;
3804 again:
3805 lock_page(page);
3806 size = i_size_read(inode);
3807 page_start = page_offset(page);
3808 page_end = page_start + PAGE_CACHE_SIZE - 1;
3809
3810 if ((page->mapping != inode->i_mapping) ||
3811 (page_start >= size)) {
3812 /* page got truncated out from underneath us */
3813 goto out_unlock;
3814 }
3815 wait_on_page_writeback(page);
3816
3817 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3818 set_page_extent_mapped(page);
3819
3820 /*
3821 * we can't set the delalloc bits if there are pending ordered
3822 * extents. Drop our locks and wait for them to finish
3823 */
3824 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3825 if (ordered) {
3826 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3827 unlock_page(page);
3828 btrfs_start_ordered_extent(inode, ordered, 1);
3829 btrfs_put_ordered_extent(ordered);
3830 goto again;
3831 }
3832
3833 btrfs_set_extent_delalloc(inode, page_start, page_end);
3834 ret = 0;
3835
3836 /* page is wholly or partially inside EOF */
3837 if (page_start + PAGE_CACHE_SIZE > size)
3838 zero_start = size & ~PAGE_CACHE_MASK;
3839 else
3840 zero_start = PAGE_CACHE_SIZE;
3841
3842 if (zero_start != PAGE_CACHE_SIZE) {
3843 kaddr = kmap(page);
3844 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
3845 flush_dcache_page(page);
3846 kunmap(page);
3847 }
3848 ClearPageChecked(page);
3849 set_page_dirty(page);
3850 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3851
3852 out_unlock:
3853 unlock_page(page);
3854 out:
3855 return ret;
3856 }
3857
3858 static void btrfs_truncate(struct inode *inode)
3859 {
3860 struct btrfs_root *root = BTRFS_I(inode)->root;
3861 int ret;
3862 struct btrfs_trans_handle *trans;
3863 unsigned long nr;
3864 u64 mask = root->sectorsize - 1;
3865
3866 if (!S_ISREG(inode->i_mode))
3867 return;
3868 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3869 return;
3870
3871 btrfs_truncate_page(inode->i_mapping, inode->i_size);
3872 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
3873
3874 trans = btrfs_start_transaction(root, 1);
3875 btrfs_set_trans_block_group(trans, inode);
3876 btrfs_i_size_write(inode, inode->i_size);
3877
3878 ret = btrfs_orphan_add(trans, inode);
3879 if (ret)
3880 goto out;
3881 /* FIXME, add redo link to tree so we don't leak on crash */
3882 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
3883 BTRFS_EXTENT_DATA_KEY);
3884 btrfs_update_inode(trans, root, inode);
3885
3886 ret = btrfs_orphan_del(trans, inode);
3887 BUG_ON(ret);
3888
3889 out:
3890 nr = trans->blocks_used;
3891 ret = btrfs_end_transaction_throttle(trans, root);
3892 BUG_ON(ret);
3893 btrfs_btree_balance_dirty(root, nr);
3894 }
3895
3896 /*
3897 * Invalidate a single dcache entry at the root of the filesystem.
3898 * Needed after creation of snapshot or subvolume.
3899 */
3900 void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
3901 int namelen)
3902 {
3903 struct dentry *alias, *entry;
3904 struct qstr qstr;
3905
3906 alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
3907 if (alias) {
3908 qstr.name = name;
3909 qstr.len = namelen;
3910 /* change me if btrfs ever gets a d_hash operation */
3911 qstr.hash = full_name_hash(qstr.name, qstr.len);
3912 entry = d_lookup(alias, &qstr);
3913 dput(alias);
3914 if (entry) {
3915 d_invalidate(entry);
3916 dput(entry);
3917 }
3918 }
3919 }
3920
3921 /*
3922 * create a new subvolume directory/inode (helper for the ioctl).
3923 */
3924 int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry,
3925 struct btrfs_trans_handle *trans, u64 new_dirid,
3926 struct btrfs_block_group_cache *block_group)
3927 {
3928 struct inode *inode;
3929 int error;
3930 u64 index = 0;
3931
3932 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
3933 new_dirid, block_group, S_IFDIR | 0700, &index);
3934 if (IS_ERR(inode))
3935 return PTR_ERR(inode);
3936 inode->i_op = &btrfs_dir_inode_operations;
3937 inode->i_fop = &btrfs_dir_file_operations;
3938 new_root->inode = inode;
3939
3940 inode->i_nlink = 1;
3941 btrfs_i_size_write(inode, 0);
3942
3943 error = btrfs_update_inode(trans, new_root, inode);
3944 if (error)
3945 return error;
3946
3947 d_instantiate(dentry, inode);
3948 return 0;
3949 }
3950
3951 /* helper function for file defrag and space balancing. This
3952 * forces readahead on a given range of bytes in an inode
3953 */
3954 unsigned long btrfs_force_ra(struct address_space *mapping,
3955 struct file_ra_state *ra, struct file *file,
3956 pgoff_t offset, pgoff_t last_index)
3957 {
3958 pgoff_t req_size = last_index - offset + 1;
3959
3960 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
3961 return offset + req_size;
3962 }
3963
3964 struct inode *btrfs_alloc_inode(struct super_block *sb)
3965 {
3966 struct btrfs_inode *ei;
3967
3968 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
3969 if (!ei)
3970 return NULL;
3971 ei->last_trans = 0;
3972 ei->logged_trans = 0;
3973 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
3974 ei->i_acl = BTRFS_ACL_NOT_CACHED;
3975 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
3976 INIT_LIST_HEAD(&ei->i_orphan);
3977 return &ei->vfs_inode;
3978 }
3979
3980 void btrfs_destroy_inode(struct inode *inode)
3981 {
3982 struct btrfs_ordered_extent *ordered;
3983 WARN_ON(!list_empty(&inode->i_dentry));
3984 WARN_ON(inode->i_data.nrpages);
3985
3986 if (BTRFS_I(inode)->i_acl &&
3987 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
3988 posix_acl_release(BTRFS_I(inode)->i_acl);
3989 if (BTRFS_I(inode)->i_default_acl &&
3990 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
3991 posix_acl_release(BTRFS_I(inode)->i_default_acl);
3992
3993 spin_lock(&BTRFS_I(inode)->root->list_lock);
3994 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
3995 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
3996 " list\n", inode->i_ino);
3997 dump_stack();
3998 }
3999 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4000
4001 while(1) {
4002 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4003 if (!ordered)
4004 break;
4005 else {
4006 printk("found ordered extent %Lu %Lu\n",
4007 ordered->file_offset, ordered->len);
4008 btrfs_remove_ordered_extent(inode, ordered);
4009 btrfs_put_ordered_extent(ordered);
4010 btrfs_put_ordered_extent(ordered);
4011 }
4012 }
4013 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4014 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4015 }
4016
4017 static void init_once(void *foo)
4018 {
4019 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4020
4021 inode_init_once(&ei->vfs_inode);
4022 }
4023
4024 void btrfs_destroy_cachep(void)
4025 {
4026 if (btrfs_inode_cachep)
4027 kmem_cache_destroy(btrfs_inode_cachep);
4028 if (btrfs_trans_handle_cachep)
4029 kmem_cache_destroy(btrfs_trans_handle_cachep);
4030 if (btrfs_transaction_cachep)
4031 kmem_cache_destroy(btrfs_transaction_cachep);
4032 if (btrfs_bit_radix_cachep)
4033 kmem_cache_destroy(btrfs_bit_radix_cachep);
4034 if (btrfs_path_cachep)
4035 kmem_cache_destroy(btrfs_path_cachep);
4036 }
4037
4038 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4039 unsigned long extra_flags,
4040 void (*ctor)(void *))
4041 {
4042 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4043 SLAB_MEM_SPREAD | extra_flags), ctor);
4044 }
4045
4046 int btrfs_init_cachep(void)
4047 {
4048 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4049 sizeof(struct btrfs_inode),
4050 0, init_once);
4051 if (!btrfs_inode_cachep)
4052 goto fail;
4053 btrfs_trans_handle_cachep =
4054 btrfs_cache_create("btrfs_trans_handle_cache",
4055 sizeof(struct btrfs_trans_handle),
4056 0, NULL);
4057 if (!btrfs_trans_handle_cachep)
4058 goto fail;
4059 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4060 sizeof(struct btrfs_transaction),
4061 0, NULL);
4062 if (!btrfs_transaction_cachep)
4063 goto fail;
4064 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4065 sizeof(struct btrfs_path),
4066 0, NULL);
4067 if (!btrfs_path_cachep)
4068 goto fail;
4069 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4070 SLAB_DESTROY_BY_RCU, NULL);
4071 if (!btrfs_bit_radix_cachep)
4072 goto fail;
4073 return 0;
4074 fail:
4075 btrfs_destroy_cachep();
4076 return -ENOMEM;
4077 }
4078
4079 static int btrfs_getattr(struct vfsmount *mnt,
4080 struct dentry *dentry, struct kstat *stat)
4081 {
4082 struct inode *inode = dentry->d_inode;
4083 generic_fillattr(inode, stat);
4084 stat->blksize = PAGE_CACHE_SIZE;
4085 stat->blocks = (inode_get_bytes(inode) +
4086 BTRFS_I(inode)->delalloc_bytes) >> 9;
4087 return 0;
4088 }
4089
4090 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
4091 struct inode * new_dir,struct dentry *new_dentry)
4092 {
4093 struct btrfs_trans_handle *trans;
4094 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4095 struct inode *new_inode = new_dentry->d_inode;
4096 struct inode *old_inode = old_dentry->d_inode;
4097 struct timespec ctime = CURRENT_TIME;
4098 u64 index = 0;
4099 int ret;
4100
4101 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4102 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4103 return -ENOTEMPTY;
4104 }
4105
4106 ret = btrfs_check_free_space(root, 1, 0);
4107 if (ret)
4108 goto out_unlock;
4109
4110 trans = btrfs_start_transaction(root, 1);
4111
4112 btrfs_set_trans_block_group(trans, new_dir);
4113
4114 btrfs_inc_nlink(old_dentry->d_inode);
4115 old_dir->i_ctime = old_dir->i_mtime = ctime;
4116 new_dir->i_ctime = new_dir->i_mtime = ctime;
4117 old_inode->i_ctime = ctime;
4118
4119 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4120 old_dentry->d_name.name,
4121 old_dentry->d_name.len);
4122 if (ret)
4123 goto out_fail;
4124
4125 if (new_inode) {
4126 new_inode->i_ctime = CURRENT_TIME;
4127 ret = btrfs_unlink_inode(trans, root, new_dir,
4128 new_dentry->d_inode,
4129 new_dentry->d_name.name,
4130 new_dentry->d_name.len);
4131 if (ret)
4132 goto out_fail;
4133 if (new_inode->i_nlink == 0) {
4134 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4135 if (ret)
4136 goto out_fail;
4137 }
4138
4139 }
4140 ret = btrfs_set_inode_index(new_dir, old_inode, &index);
4141 if (ret)
4142 goto out_fail;
4143
4144 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4145 old_inode, new_dentry->d_name.name,
4146 new_dentry->d_name.len, 1, index);
4147 if (ret)
4148 goto out_fail;
4149
4150 out_fail:
4151 btrfs_end_transaction_throttle(trans, root);
4152 out_unlock:
4153 return ret;
4154 }
4155
4156 /*
4157 * some fairly slow code that needs optimization. This walks the list
4158 * of all the inodes with pending delalloc and forces them to disk.
4159 */
4160 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4161 {
4162 struct list_head *head = &root->fs_info->delalloc_inodes;
4163 struct btrfs_inode *binode;
4164 struct inode *inode;
4165 unsigned long flags;
4166
4167 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4168 while(!list_empty(head)) {
4169 binode = list_entry(head->next, struct btrfs_inode,
4170 delalloc_inodes);
4171 inode = igrab(&binode->vfs_inode);
4172 if (!inode)
4173 list_del_init(&binode->delalloc_inodes);
4174 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4175 if (inode) {
4176 filemap_flush(inode->i_mapping);
4177 iput(inode);
4178 }
4179 cond_resched();
4180 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4181 }
4182 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4183
4184 /* the filemap_flush will queue IO into the worker threads, but
4185 * we have to make sure the IO is actually started and that
4186 * ordered extents get created before we return
4187 */
4188 atomic_inc(&root->fs_info->async_submit_draining);
4189 while(atomic_read(&root->fs_info->nr_async_submits)) {
4190 wait_event(root->fs_info->async_submit_wait,
4191 (atomic_read(&root->fs_info->nr_async_submits) == 0));
4192 }
4193 atomic_dec(&root->fs_info->async_submit_draining);
4194 return 0;
4195 }
4196
4197 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4198 const char *symname)
4199 {
4200 struct btrfs_trans_handle *trans;
4201 struct btrfs_root *root = BTRFS_I(dir)->root;
4202 struct btrfs_path *path;
4203 struct btrfs_key key;
4204 struct inode *inode = NULL;
4205 int err;
4206 int drop_inode = 0;
4207 u64 objectid;
4208 u64 index = 0 ;
4209 int name_len;
4210 int datasize;
4211 unsigned long ptr;
4212 struct btrfs_file_extent_item *ei;
4213 struct extent_buffer *leaf;
4214 unsigned long nr = 0;
4215
4216 name_len = strlen(symname) + 1;
4217 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4218 return -ENAMETOOLONG;
4219
4220 err = btrfs_check_free_space(root, 1, 0);
4221 if (err)
4222 goto out_fail;
4223
4224 trans = btrfs_start_transaction(root, 1);
4225 btrfs_set_trans_block_group(trans, dir);
4226
4227 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4228 if (err) {
4229 err = -ENOSPC;
4230 goto out_unlock;
4231 }
4232
4233 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4234 dentry->d_name.len,
4235 dentry->d_parent->d_inode->i_ino, objectid,
4236 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4237 &index);
4238 err = PTR_ERR(inode);
4239 if (IS_ERR(inode))
4240 goto out_unlock;
4241
4242 err = btrfs_init_acl(inode, dir);
4243 if (err) {
4244 drop_inode = 1;
4245 goto out_unlock;
4246 }
4247
4248 btrfs_set_trans_block_group(trans, inode);
4249 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4250 if (err)
4251 drop_inode = 1;
4252 else {
4253 inode->i_mapping->a_ops = &btrfs_aops;
4254 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4255 inode->i_fop = &btrfs_file_operations;
4256 inode->i_op = &btrfs_file_inode_operations;
4257 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4258 }
4259 dir->i_sb->s_dirt = 1;
4260 btrfs_update_inode_block_group(trans, inode);
4261 btrfs_update_inode_block_group(trans, dir);
4262 if (drop_inode)
4263 goto out_unlock;
4264
4265 path = btrfs_alloc_path();
4266 BUG_ON(!path);
4267 key.objectid = inode->i_ino;
4268 key.offset = 0;
4269 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4270 datasize = btrfs_file_extent_calc_inline_size(name_len);
4271 err = btrfs_insert_empty_item(trans, root, path, &key,
4272 datasize);
4273 if (err) {
4274 drop_inode = 1;
4275 goto out_unlock;
4276 }
4277 leaf = path->nodes[0];
4278 ei = btrfs_item_ptr(leaf, path->slots[0],
4279 struct btrfs_file_extent_item);
4280 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4281 btrfs_set_file_extent_type(leaf, ei,
4282 BTRFS_FILE_EXTENT_INLINE);
4283 btrfs_set_file_extent_encryption(leaf, ei, 0);
4284 btrfs_set_file_extent_compression(leaf, ei, 0);
4285 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4286 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4287
4288 ptr = btrfs_file_extent_inline_start(ei);
4289 write_extent_buffer(leaf, symname, ptr, name_len);
4290 btrfs_mark_buffer_dirty(leaf);
4291 btrfs_free_path(path);
4292
4293 inode->i_op = &btrfs_symlink_inode_operations;
4294 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4295 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4296 btrfs_i_size_write(inode, name_len - 1);
4297 err = btrfs_update_inode(trans, root, inode);
4298 if (err)
4299 drop_inode = 1;
4300
4301 out_unlock:
4302 nr = trans->blocks_used;
4303 btrfs_end_transaction_throttle(trans, root);
4304 out_fail:
4305 if (drop_inode) {
4306 inode_dec_link_count(inode);
4307 iput(inode);
4308 }
4309 btrfs_btree_balance_dirty(root, nr);
4310 return err;
4311 }
4312
4313 static int btrfs_set_page_dirty(struct page *page)
4314 {
4315 return __set_page_dirty_nobuffers(page);
4316 }
4317
4318 static int btrfs_permission(struct inode *inode, int mask)
4319 {
4320 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4321 return -EACCES;
4322 return generic_permission(inode, mask, btrfs_check_acl);
4323 }
4324
4325 static struct inode_operations btrfs_dir_inode_operations = {
4326 .lookup = btrfs_lookup,
4327 .create = btrfs_create,
4328 .unlink = btrfs_unlink,
4329 .link = btrfs_link,
4330 .mkdir = btrfs_mkdir,
4331 .rmdir = btrfs_rmdir,
4332 .rename = btrfs_rename,
4333 .symlink = btrfs_symlink,
4334 .setattr = btrfs_setattr,
4335 .mknod = btrfs_mknod,
4336 .setxattr = btrfs_setxattr,
4337 .getxattr = btrfs_getxattr,
4338 .listxattr = btrfs_listxattr,
4339 .removexattr = btrfs_removexattr,
4340 .permission = btrfs_permission,
4341 };
4342 static struct inode_operations btrfs_dir_ro_inode_operations = {
4343 .lookup = btrfs_lookup,
4344 .permission = btrfs_permission,
4345 };
4346 static struct file_operations btrfs_dir_file_operations = {
4347 .llseek = generic_file_llseek,
4348 .read = generic_read_dir,
4349 .readdir = btrfs_real_readdir,
4350 .unlocked_ioctl = btrfs_ioctl,
4351 #ifdef CONFIG_COMPAT
4352 .compat_ioctl = btrfs_ioctl,
4353 #endif
4354 .release = btrfs_release_file,
4355 .fsync = btrfs_sync_file,
4356 };
4357
4358 static struct extent_io_ops btrfs_extent_io_ops = {
4359 .fill_delalloc = run_delalloc_range,
4360 .submit_bio_hook = btrfs_submit_bio_hook,
4361 .merge_bio_hook = btrfs_merge_bio_hook,
4362 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4363 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4364 .writepage_start_hook = btrfs_writepage_start_hook,
4365 .readpage_io_failed_hook = btrfs_io_failed_hook,
4366 .set_bit_hook = btrfs_set_bit_hook,
4367 .clear_bit_hook = btrfs_clear_bit_hook,
4368 };
4369
4370 static struct address_space_operations btrfs_aops = {
4371 .readpage = btrfs_readpage,
4372 .writepage = btrfs_writepage,
4373 .writepages = btrfs_writepages,
4374 .readpages = btrfs_readpages,
4375 .sync_page = block_sync_page,
4376 .bmap = btrfs_bmap,
4377 .direct_IO = btrfs_direct_IO,
4378 .invalidatepage = btrfs_invalidatepage,
4379 .releasepage = btrfs_releasepage,
4380 .set_page_dirty = btrfs_set_page_dirty,
4381 };
4382
4383 static struct address_space_operations btrfs_symlink_aops = {
4384 .readpage = btrfs_readpage,
4385 .writepage = btrfs_writepage,
4386 .invalidatepage = btrfs_invalidatepage,
4387 .releasepage = btrfs_releasepage,
4388 };
4389
4390 static struct inode_operations btrfs_file_inode_operations = {
4391 .truncate = btrfs_truncate,
4392 .getattr = btrfs_getattr,
4393 .setattr = btrfs_setattr,
4394 .setxattr = btrfs_setxattr,
4395 .getxattr = btrfs_getxattr,
4396 .listxattr = btrfs_listxattr,
4397 .removexattr = btrfs_removexattr,
4398 .permission = btrfs_permission,
4399 };
4400 static struct inode_operations btrfs_special_inode_operations = {
4401 .getattr = btrfs_getattr,
4402 .setattr = btrfs_setattr,
4403 .permission = btrfs_permission,
4404 .setxattr = btrfs_setxattr,
4405 .getxattr = btrfs_getxattr,
4406 .listxattr = btrfs_listxattr,
4407 .removexattr = btrfs_removexattr,
4408 };
4409 static struct inode_operations btrfs_symlink_inode_operations = {
4410 .readlink = generic_readlink,
4411 .follow_link = page_follow_link_light,
4412 .put_link = page_put_link,
4413 .permission = btrfs_permission,
4414 };
This page took 0.130254 seconds and 6 git commands to generate.