Btrfs: forced readonly mounts on errors
[deliverable/linux.git] / fs / btrfs / file.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/statfs.h>
30 #include <linux/compat.h>
31 #include <linux/slab.h>
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "ioctl.h"
37 #include "print-tree.h"
38 #include "tree-log.h"
39 #include "locking.h"
40 #include "compat.h"
41
42
43 /* simple helper to fault in pages and copy. This should go away
44 * and be replaced with calls into generic code.
45 */
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
47 int write_bytes,
48 struct page **prepared_pages,
49 struct iov_iter *i)
50 {
51 size_t copied = 0;
52 int pg = 0;
53 int offset = pos & (PAGE_CACHE_SIZE - 1);
54 int total_copied = 0;
55
56 while (write_bytes > 0) {
57 size_t count = min_t(size_t,
58 PAGE_CACHE_SIZE - offset, write_bytes);
59 struct page *page = prepared_pages[pg];
60 /*
61 * Copy data from userspace to the current page
62 *
63 * Disable pagefault to avoid recursive lock since
64 * the pages are already locked
65 */
66 pagefault_disable();
67 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
68 pagefault_enable();
69
70 /* Flush processor's dcache for this page */
71 flush_dcache_page(page);
72 iov_iter_advance(i, copied);
73 write_bytes -= copied;
74 total_copied += copied;
75
76 /* Return to btrfs_file_aio_write to fault page */
77 if (unlikely(copied == 0)) {
78 break;
79 }
80
81 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
82 offset += copied;
83 } else {
84 pg++;
85 offset = 0;
86 }
87 }
88 return total_copied;
89 }
90
91 /*
92 * unlocks pages after btrfs_file_write is done with them
93 */
94 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
95 {
96 size_t i;
97 for (i = 0; i < num_pages; i++) {
98 if (!pages[i])
99 break;
100 /* page checked is some magic around finding pages that
101 * have been modified without going through btrfs_set_page_dirty
102 * clear it here
103 */
104 ClearPageChecked(pages[i]);
105 unlock_page(pages[i]);
106 mark_page_accessed(pages[i]);
107 page_cache_release(pages[i]);
108 }
109 }
110
111 /*
112 * after copy_from_user, pages need to be dirtied and we need to make
113 * sure holes are created between the current EOF and the start of
114 * any next extents (if required).
115 *
116 * this also makes the decision about creating an inline extent vs
117 * doing real data extents, marking pages dirty and delalloc as required.
118 */
119 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
120 struct btrfs_root *root,
121 struct file *file,
122 struct page **pages,
123 size_t num_pages,
124 loff_t pos,
125 size_t write_bytes)
126 {
127 int err = 0;
128 int i;
129 struct inode *inode = fdentry(file)->d_inode;
130 u64 num_bytes;
131 u64 start_pos;
132 u64 end_of_last_block;
133 u64 end_pos = pos + write_bytes;
134 loff_t isize = i_size_read(inode);
135
136 start_pos = pos & ~((u64)root->sectorsize - 1);
137 num_bytes = (write_bytes + pos - start_pos +
138 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
139
140 end_of_last_block = start_pos + num_bytes - 1;
141 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
142 NULL);
143 BUG_ON(err);
144
145 for (i = 0; i < num_pages; i++) {
146 struct page *p = pages[i];
147 SetPageUptodate(p);
148 ClearPageChecked(p);
149 set_page_dirty(p);
150 }
151 if (end_pos > isize) {
152 i_size_write(inode, end_pos);
153 /* we've only changed i_size in ram, and we haven't updated
154 * the disk i_size. There is no need to log the inode
155 * at this time.
156 */
157 }
158 return 0;
159 }
160
161 /*
162 * this drops all the extents in the cache that intersect the range
163 * [start, end]. Existing extents are split as required.
164 */
165 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
166 int skip_pinned)
167 {
168 struct extent_map *em;
169 struct extent_map *split = NULL;
170 struct extent_map *split2 = NULL;
171 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
172 u64 len = end - start + 1;
173 int ret;
174 int testend = 1;
175 unsigned long flags;
176 int compressed = 0;
177
178 WARN_ON(end < start);
179 if (end == (u64)-1) {
180 len = (u64)-1;
181 testend = 0;
182 }
183 while (1) {
184 if (!split)
185 split = alloc_extent_map(GFP_NOFS);
186 if (!split2)
187 split2 = alloc_extent_map(GFP_NOFS);
188
189 write_lock(&em_tree->lock);
190 em = lookup_extent_mapping(em_tree, start, len);
191 if (!em) {
192 write_unlock(&em_tree->lock);
193 break;
194 }
195 flags = em->flags;
196 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
197 if (testend && em->start + em->len >= start + len) {
198 free_extent_map(em);
199 write_unlock(&em_tree->lock);
200 break;
201 }
202 start = em->start + em->len;
203 if (testend)
204 len = start + len - (em->start + em->len);
205 free_extent_map(em);
206 write_unlock(&em_tree->lock);
207 continue;
208 }
209 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
210 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
211 remove_extent_mapping(em_tree, em);
212
213 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
214 em->start < start) {
215 split->start = em->start;
216 split->len = start - em->start;
217 split->orig_start = em->orig_start;
218 split->block_start = em->block_start;
219
220 if (compressed)
221 split->block_len = em->block_len;
222 else
223 split->block_len = split->len;
224
225 split->bdev = em->bdev;
226 split->flags = flags;
227 split->compress_type = em->compress_type;
228 ret = add_extent_mapping(em_tree, split);
229 BUG_ON(ret);
230 free_extent_map(split);
231 split = split2;
232 split2 = NULL;
233 }
234 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
235 testend && em->start + em->len > start + len) {
236 u64 diff = start + len - em->start;
237
238 split->start = start + len;
239 split->len = em->start + em->len - (start + len);
240 split->bdev = em->bdev;
241 split->flags = flags;
242 split->compress_type = em->compress_type;
243
244 if (compressed) {
245 split->block_len = em->block_len;
246 split->block_start = em->block_start;
247 split->orig_start = em->orig_start;
248 } else {
249 split->block_len = split->len;
250 split->block_start = em->block_start + diff;
251 split->orig_start = split->start;
252 }
253
254 ret = add_extent_mapping(em_tree, split);
255 BUG_ON(ret);
256 free_extent_map(split);
257 split = NULL;
258 }
259 write_unlock(&em_tree->lock);
260
261 /* once for us */
262 free_extent_map(em);
263 /* once for the tree*/
264 free_extent_map(em);
265 }
266 if (split)
267 free_extent_map(split);
268 if (split2)
269 free_extent_map(split2);
270 return 0;
271 }
272
273 /*
274 * this is very complex, but the basic idea is to drop all extents
275 * in the range start - end. hint_block is filled in with a block number
276 * that would be a good hint to the block allocator for this file.
277 *
278 * If an extent intersects the range but is not entirely inside the range
279 * it is either truncated or split. Anything entirely inside the range
280 * is deleted from the tree.
281 */
282 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
283 u64 start, u64 end, u64 *hint_byte, int drop_cache)
284 {
285 struct btrfs_root *root = BTRFS_I(inode)->root;
286 struct extent_buffer *leaf;
287 struct btrfs_file_extent_item *fi;
288 struct btrfs_path *path;
289 struct btrfs_key key;
290 struct btrfs_key new_key;
291 u64 search_start = start;
292 u64 disk_bytenr = 0;
293 u64 num_bytes = 0;
294 u64 extent_offset = 0;
295 u64 extent_end = 0;
296 int del_nr = 0;
297 int del_slot = 0;
298 int extent_type;
299 int recow;
300 int ret;
301
302 if (drop_cache)
303 btrfs_drop_extent_cache(inode, start, end - 1, 0);
304
305 path = btrfs_alloc_path();
306 if (!path)
307 return -ENOMEM;
308
309 while (1) {
310 recow = 0;
311 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
312 search_start, -1);
313 if (ret < 0)
314 break;
315 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
316 leaf = path->nodes[0];
317 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
318 if (key.objectid == inode->i_ino &&
319 key.type == BTRFS_EXTENT_DATA_KEY)
320 path->slots[0]--;
321 }
322 ret = 0;
323 next_slot:
324 leaf = path->nodes[0];
325 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
326 BUG_ON(del_nr > 0);
327 ret = btrfs_next_leaf(root, path);
328 if (ret < 0)
329 break;
330 if (ret > 0) {
331 ret = 0;
332 break;
333 }
334 leaf = path->nodes[0];
335 recow = 1;
336 }
337
338 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
339 if (key.objectid > inode->i_ino ||
340 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
341 break;
342
343 fi = btrfs_item_ptr(leaf, path->slots[0],
344 struct btrfs_file_extent_item);
345 extent_type = btrfs_file_extent_type(leaf, fi);
346
347 if (extent_type == BTRFS_FILE_EXTENT_REG ||
348 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
349 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
350 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
351 extent_offset = btrfs_file_extent_offset(leaf, fi);
352 extent_end = key.offset +
353 btrfs_file_extent_num_bytes(leaf, fi);
354 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
355 extent_end = key.offset +
356 btrfs_file_extent_inline_len(leaf, fi);
357 } else {
358 WARN_ON(1);
359 extent_end = search_start;
360 }
361
362 if (extent_end <= search_start) {
363 path->slots[0]++;
364 goto next_slot;
365 }
366
367 search_start = max(key.offset, start);
368 if (recow) {
369 btrfs_release_path(root, path);
370 continue;
371 }
372
373 /*
374 * | - range to drop - |
375 * | -------- extent -------- |
376 */
377 if (start > key.offset && end < extent_end) {
378 BUG_ON(del_nr > 0);
379 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
380
381 memcpy(&new_key, &key, sizeof(new_key));
382 new_key.offset = start;
383 ret = btrfs_duplicate_item(trans, root, path,
384 &new_key);
385 if (ret == -EAGAIN) {
386 btrfs_release_path(root, path);
387 continue;
388 }
389 if (ret < 0)
390 break;
391
392 leaf = path->nodes[0];
393 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
394 struct btrfs_file_extent_item);
395 btrfs_set_file_extent_num_bytes(leaf, fi,
396 start - key.offset);
397
398 fi = btrfs_item_ptr(leaf, path->slots[0],
399 struct btrfs_file_extent_item);
400
401 extent_offset += start - key.offset;
402 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
403 btrfs_set_file_extent_num_bytes(leaf, fi,
404 extent_end - start);
405 btrfs_mark_buffer_dirty(leaf);
406
407 if (disk_bytenr > 0) {
408 ret = btrfs_inc_extent_ref(trans, root,
409 disk_bytenr, num_bytes, 0,
410 root->root_key.objectid,
411 new_key.objectid,
412 start - extent_offset);
413 BUG_ON(ret);
414 *hint_byte = disk_bytenr;
415 }
416 key.offset = start;
417 }
418 /*
419 * | ---- range to drop ----- |
420 * | -------- extent -------- |
421 */
422 if (start <= key.offset && end < extent_end) {
423 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
424
425 memcpy(&new_key, &key, sizeof(new_key));
426 new_key.offset = end;
427 btrfs_set_item_key_safe(trans, root, path, &new_key);
428
429 extent_offset += end - key.offset;
430 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
431 btrfs_set_file_extent_num_bytes(leaf, fi,
432 extent_end - end);
433 btrfs_mark_buffer_dirty(leaf);
434 if (disk_bytenr > 0) {
435 inode_sub_bytes(inode, end - key.offset);
436 *hint_byte = disk_bytenr;
437 }
438 break;
439 }
440
441 search_start = extent_end;
442 /*
443 * | ---- range to drop ----- |
444 * | -------- extent -------- |
445 */
446 if (start > key.offset && end >= extent_end) {
447 BUG_ON(del_nr > 0);
448 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
449
450 btrfs_set_file_extent_num_bytes(leaf, fi,
451 start - key.offset);
452 btrfs_mark_buffer_dirty(leaf);
453 if (disk_bytenr > 0) {
454 inode_sub_bytes(inode, extent_end - start);
455 *hint_byte = disk_bytenr;
456 }
457 if (end == extent_end)
458 break;
459
460 path->slots[0]++;
461 goto next_slot;
462 }
463
464 /*
465 * | ---- range to drop ----- |
466 * | ------ extent ------ |
467 */
468 if (start <= key.offset && end >= extent_end) {
469 if (del_nr == 0) {
470 del_slot = path->slots[0];
471 del_nr = 1;
472 } else {
473 BUG_ON(del_slot + del_nr != path->slots[0]);
474 del_nr++;
475 }
476
477 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
478 inode_sub_bytes(inode,
479 extent_end - key.offset);
480 extent_end = ALIGN(extent_end,
481 root->sectorsize);
482 } else if (disk_bytenr > 0) {
483 ret = btrfs_free_extent(trans, root,
484 disk_bytenr, num_bytes, 0,
485 root->root_key.objectid,
486 key.objectid, key.offset -
487 extent_offset);
488 BUG_ON(ret);
489 inode_sub_bytes(inode,
490 extent_end - key.offset);
491 *hint_byte = disk_bytenr;
492 }
493
494 if (end == extent_end)
495 break;
496
497 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
498 path->slots[0]++;
499 goto next_slot;
500 }
501
502 ret = btrfs_del_items(trans, root, path, del_slot,
503 del_nr);
504 BUG_ON(ret);
505
506 del_nr = 0;
507 del_slot = 0;
508
509 btrfs_release_path(root, path);
510 continue;
511 }
512
513 BUG_ON(1);
514 }
515
516 if (del_nr > 0) {
517 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
518 BUG_ON(ret);
519 }
520
521 btrfs_free_path(path);
522 return ret;
523 }
524
525 static int extent_mergeable(struct extent_buffer *leaf, int slot,
526 u64 objectid, u64 bytenr, u64 orig_offset,
527 u64 *start, u64 *end)
528 {
529 struct btrfs_file_extent_item *fi;
530 struct btrfs_key key;
531 u64 extent_end;
532
533 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
534 return 0;
535
536 btrfs_item_key_to_cpu(leaf, &key, slot);
537 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
538 return 0;
539
540 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
541 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
542 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
543 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
544 btrfs_file_extent_compression(leaf, fi) ||
545 btrfs_file_extent_encryption(leaf, fi) ||
546 btrfs_file_extent_other_encoding(leaf, fi))
547 return 0;
548
549 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
550 if ((*start && *start != key.offset) || (*end && *end != extent_end))
551 return 0;
552
553 *start = key.offset;
554 *end = extent_end;
555 return 1;
556 }
557
558 /*
559 * Mark extent in the range start - end as written.
560 *
561 * This changes extent type from 'pre-allocated' to 'regular'. If only
562 * part of extent is marked as written, the extent will be split into
563 * two or three.
564 */
565 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
566 struct inode *inode, u64 start, u64 end)
567 {
568 struct btrfs_root *root = BTRFS_I(inode)->root;
569 struct extent_buffer *leaf;
570 struct btrfs_path *path;
571 struct btrfs_file_extent_item *fi;
572 struct btrfs_key key;
573 struct btrfs_key new_key;
574 u64 bytenr;
575 u64 num_bytes;
576 u64 extent_end;
577 u64 orig_offset;
578 u64 other_start;
579 u64 other_end;
580 u64 split;
581 int del_nr = 0;
582 int del_slot = 0;
583 int recow;
584 int ret;
585
586 btrfs_drop_extent_cache(inode, start, end - 1, 0);
587
588 path = btrfs_alloc_path();
589 BUG_ON(!path);
590 again:
591 recow = 0;
592 split = start;
593 key.objectid = inode->i_ino;
594 key.type = BTRFS_EXTENT_DATA_KEY;
595 key.offset = split;
596
597 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
598 if (ret > 0 && path->slots[0] > 0)
599 path->slots[0]--;
600
601 leaf = path->nodes[0];
602 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
603 BUG_ON(key.objectid != inode->i_ino ||
604 key.type != BTRFS_EXTENT_DATA_KEY);
605 fi = btrfs_item_ptr(leaf, path->slots[0],
606 struct btrfs_file_extent_item);
607 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
608 BTRFS_FILE_EXTENT_PREALLOC);
609 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
610 BUG_ON(key.offset > start || extent_end < end);
611
612 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
613 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
614 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
615 memcpy(&new_key, &key, sizeof(new_key));
616
617 if (start == key.offset && end < extent_end) {
618 other_start = 0;
619 other_end = start;
620 if (extent_mergeable(leaf, path->slots[0] - 1,
621 inode->i_ino, bytenr, orig_offset,
622 &other_start, &other_end)) {
623 new_key.offset = end;
624 btrfs_set_item_key_safe(trans, root, path, &new_key);
625 fi = btrfs_item_ptr(leaf, path->slots[0],
626 struct btrfs_file_extent_item);
627 btrfs_set_file_extent_num_bytes(leaf, fi,
628 extent_end - end);
629 btrfs_set_file_extent_offset(leaf, fi,
630 end - orig_offset);
631 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
632 struct btrfs_file_extent_item);
633 btrfs_set_file_extent_num_bytes(leaf, fi,
634 end - other_start);
635 btrfs_mark_buffer_dirty(leaf);
636 goto out;
637 }
638 }
639
640 if (start > key.offset && end == extent_end) {
641 other_start = end;
642 other_end = 0;
643 if (extent_mergeable(leaf, path->slots[0] + 1,
644 inode->i_ino, bytenr, orig_offset,
645 &other_start, &other_end)) {
646 fi = btrfs_item_ptr(leaf, path->slots[0],
647 struct btrfs_file_extent_item);
648 btrfs_set_file_extent_num_bytes(leaf, fi,
649 start - key.offset);
650 path->slots[0]++;
651 new_key.offset = start;
652 btrfs_set_item_key_safe(trans, root, path, &new_key);
653
654 fi = btrfs_item_ptr(leaf, path->slots[0],
655 struct btrfs_file_extent_item);
656 btrfs_set_file_extent_num_bytes(leaf, fi,
657 other_end - start);
658 btrfs_set_file_extent_offset(leaf, fi,
659 start - orig_offset);
660 btrfs_mark_buffer_dirty(leaf);
661 goto out;
662 }
663 }
664
665 while (start > key.offset || end < extent_end) {
666 if (key.offset == start)
667 split = end;
668
669 new_key.offset = split;
670 ret = btrfs_duplicate_item(trans, root, path, &new_key);
671 if (ret == -EAGAIN) {
672 btrfs_release_path(root, path);
673 goto again;
674 }
675 BUG_ON(ret < 0);
676
677 leaf = path->nodes[0];
678 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
679 struct btrfs_file_extent_item);
680 btrfs_set_file_extent_num_bytes(leaf, fi,
681 split - key.offset);
682
683 fi = btrfs_item_ptr(leaf, path->slots[0],
684 struct btrfs_file_extent_item);
685
686 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
687 btrfs_set_file_extent_num_bytes(leaf, fi,
688 extent_end - split);
689 btrfs_mark_buffer_dirty(leaf);
690
691 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
692 root->root_key.objectid,
693 inode->i_ino, orig_offset);
694 BUG_ON(ret);
695
696 if (split == start) {
697 key.offset = start;
698 } else {
699 BUG_ON(start != key.offset);
700 path->slots[0]--;
701 extent_end = end;
702 }
703 recow = 1;
704 }
705
706 other_start = end;
707 other_end = 0;
708 if (extent_mergeable(leaf, path->slots[0] + 1,
709 inode->i_ino, bytenr, orig_offset,
710 &other_start, &other_end)) {
711 if (recow) {
712 btrfs_release_path(root, path);
713 goto again;
714 }
715 extent_end = other_end;
716 del_slot = path->slots[0] + 1;
717 del_nr++;
718 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
719 0, root->root_key.objectid,
720 inode->i_ino, orig_offset);
721 BUG_ON(ret);
722 }
723 other_start = 0;
724 other_end = start;
725 if (extent_mergeable(leaf, path->slots[0] - 1,
726 inode->i_ino, bytenr, orig_offset,
727 &other_start, &other_end)) {
728 if (recow) {
729 btrfs_release_path(root, path);
730 goto again;
731 }
732 key.offset = other_start;
733 del_slot = path->slots[0];
734 del_nr++;
735 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
736 0, root->root_key.objectid,
737 inode->i_ino, orig_offset);
738 BUG_ON(ret);
739 }
740 if (del_nr == 0) {
741 fi = btrfs_item_ptr(leaf, path->slots[0],
742 struct btrfs_file_extent_item);
743 btrfs_set_file_extent_type(leaf, fi,
744 BTRFS_FILE_EXTENT_REG);
745 btrfs_mark_buffer_dirty(leaf);
746 } else {
747 fi = btrfs_item_ptr(leaf, del_slot - 1,
748 struct btrfs_file_extent_item);
749 btrfs_set_file_extent_type(leaf, fi,
750 BTRFS_FILE_EXTENT_REG);
751 btrfs_set_file_extent_num_bytes(leaf, fi,
752 extent_end - key.offset);
753 btrfs_mark_buffer_dirty(leaf);
754
755 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
756 BUG_ON(ret);
757 }
758 out:
759 btrfs_free_path(path);
760 return 0;
761 }
762
763 /*
764 * this gets pages into the page cache and locks them down, it also properly
765 * waits for data=ordered extents to finish before allowing the pages to be
766 * modified.
767 */
768 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
769 struct page **pages, size_t num_pages,
770 loff_t pos, unsigned long first_index,
771 unsigned long last_index, size_t write_bytes)
772 {
773 struct extent_state *cached_state = NULL;
774 int i;
775 unsigned long index = pos >> PAGE_CACHE_SHIFT;
776 struct inode *inode = fdentry(file)->d_inode;
777 int err = 0;
778 u64 start_pos;
779 u64 last_pos;
780
781 start_pos = pos & ~((u64)root->sectorsize - 1);
782 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
783
784 if (start_pos > inode->i_size) {
785 err = btrfs_cont_expand(inode, start_pos);
786 if (err)
787 return err;
788 }
789
790 memset(pages, 0, num_pages * sizeof(struct page *));
791 again:
792 for (i = 0; i < num_pages; i++) {
793 pages[i] = grab_cache_page(inode->i_mapping, index + i);
794 if (!pages[i]) {
795 err = -ENOMEM;
796 BUG_ON(1);
797 }
798 wait_on_page_writeback(pages[i]);
799 }
800 if (start_pos < inode->i_size) {
801 struct btrfs_ordered_extent *ordered;
802 lock_extent_bits(&BTRFS_I(inode)->io_tree,
803 start_pos, last_pos - 1, 0, &cached_state,
804 GFP_NOFS);
805 ordered = btrfs_lookup_first_ordered_extent(inode,
806 last_pos - 1);
807 if (ordered &&
808 ordered->file_offset + ordered->len > start_pos &&
809 ordered->file_offset < last_pos) {
810 btrfs_put_ordered_extent(ordered);
811 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
812 start_pos, last_pos - 1,
813 &cached_state, GFP_NOFS);
814 for (i = 0; i < num_pages; i++) {
815 unlock_page(pages[i]);
816 page_cache_release(pages[i]);
817 }
818 btrfs_wait_ordered_range(inode, start_pos,
819 last_pos - start_pos);
820 goto again;
821 }
822 if (ordered)
823 btrfs_put_ordered_extent(ordered);
824
825 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
826 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
827 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
828 GFP_NOFS);
829 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
830 start_pos, last_pos - 1, &cached_state,
831 GFP_NOFS);
832 }
833 for (i = 0; i < num_pages; i++) {
834 clear_page_dirty_for_io(pages[i]);
835 set_page_extent_mapped(pages[i]);
836 WARN_ON(!PageLocked(pages[i]));
837 }
838 return 0;
839 }
840
841 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
842 const struct iovec *iov,
843 unsigned long nr_segs, loff_t pos)
844 {
845 struct file *file = iocb->ki_filp;
846 struct inode *inode = fdentry(file)->d_inode;
847 struct btrfs_root *root = BTRFS_I(inode)->root;
848 struct page *pinned[2];
849 struct page **pages = NULL;
850 struct iov_iter i;
851 loff_t *ppos = &iocb->ki_pos;
852 loff_t start_pos;
853 ssize_t num_written = 0;
854 ssize_t err = 0;
855 size_t count;
856 size_t ocount;
857 int ret = 0;
858 int nrptrs;
859 unsigned long first_index;
860 unsigned long last_index;
861 int will_write;
862 int buffered = 0;
863 int copied = 0;
864 int dirty_pages = 0;
865
866 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
867 (file->f_flags & O_DIRECT));
868
869 pinned[0] = NULL;
870 pinned[1] = NULL;
871
872 start_pos = pos;
873
874 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
875
876 mutex_lock(&inode->i_mutex);
877
878 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
879 if (err)
880 goto out;
881 count = ocount;
882
883 current->backing_dev_info = inode->i_mapping->backing_dev_info;
884 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
885 if (err)
886 goto out;
887
888 if (count == 0)
889 goto out;
890
891 err = file_remove_suid(file);
892 if (err)
893 goto out;
894
895 /*
896 * If BTRFS flips readonly due to some impossible error
897 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
898 * although we have opened a file as writable, we have
899 * to stop this write operation to ensure FS consistency.
900 */
901 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
902 err = -EROFS;
903 goto out;
904 }
905
906 file_update_time(file);
907 BTRFS_I(inode)->sequence++;
908
909 if (unlikely(file->f_flags & O_DIRECT)) {
910 num_written = generic_file_direct_write(iocb, iov, &nr_segs,
911 pos, ppos, count,
912 ocount);
913 /*
914 * the generic O_DIRECT will update in-memory i_size after the
915 * DIOs are done. But our endio handlers that update the on
916 * disk i_size never update past the in memory i_size. So we
917 * need one more update here to catch any additions to the
918 * file
919 */
920 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
921 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
922 mark_inode_dirty(inode);
923 }
924
925 if (num_written < 0) {
926 ret = num_written;
927 num_written = 0;
928 goto out;
929 } else if (num_written == count) {
930 /* pick up pos changes done by the generic code */
931 pos = *ppos;
932 goto out;
933 }
934 /*
935 * We are going to do buffered for the rest of the range, so we
936 * need to make sure to invalidate the buffered pages when we're
937 * done.
938 */
939 buffered = 1;
940 pos += num_written;
941 }
942
943 iov_iter_init(&i, iov, nr_segs, count, num_written);
944 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
945 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
946 (sizeof(struct page *)));
947 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
948
949 /* generic_write_checks can change our pos */
950 start_pos = pos;
951
952 first_index = pos >> PAGE_CACHE_SHIFT;
953 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
954
955 /*
956 * there are lots of better ways to do this, but this code
957 * makes sure the first and last page in the file range are
958 * up to date and ready for cow
959 */
960 if ((pos & (PAGE_CACHE_SIZE - 1))) {
961 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
962 if (!PageUptodate(pinned[0])) {
963 ret = btrfs_readpage(NULL, pinned[0]);
964 BUG_ON(ret);
965 wait_on_page_locked(pinned[0]);
966 } else {
967 unlock_page(pinned[0]);
968 }
969 }
970 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
971 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
972 if (!PageUptodate(pinned[1])) {
973 ret = btrfs_readpage(NULL, pinned[1]);
974 BUG_ON(ret);
975 wait_on_page_locked(pinned[1]);
976 } else {
977 unlock_page(pinned[1]);
978 }
979 }
980
981 while (iov_iter_count(&i) > 0) {
982 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
983 size_t write_bytes = min(iov_iter_count(&i),
984 nrptrs * (size_t)PAGE_CACHE_SIZE -
985 offset);
986 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
987 PAGE_CACHE_SHIFT;
988
989 WARN_ON(num_pages > nrptrs);
990 memset(pages, 0, sizeof(struct page *) * nrptrs);
991
992 /*
993 * Fault pages before locking them in prepare_pages
994 * to avoid recursive lock
995 */
996 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
997 ret = -EFAULT;
998 goto out;
999 }
1000
1001 ret = btrfs_delalloc_reserve_space(inode,
1002 num_pages << PAGE_CACHE_SHIFT);
1003 if (ret)
1004 goto out;
1005
1006 ret = prepare_pages(root, file, pages, num_pages,
1007 pos, first_index, last_index,
1008 write_bytes);
1009 if (ret) {
1010 btrfs_delalloc_release_space(inode,
1011 num_pages << PAGE_CACHE_SHIFT);
1012 goto out;
1013 }
1014
1015 copied = btrfs_copy_from_user(pos, num_pages,
1016 write_bytes, pages, &i);
1017 dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
1018 PAGE_CACHE_SHIFT;
1019
1020 if (num_pages > dirty_pages) {
1021 if (copied > 0)
1022 atomic_inc(
1023 &BTRFS_I(inode)->outstanding_extents);
1024 btrfs_delalloc_release_space(inode,
1025 (num_pages - dirty_pages) <<
1026 PAGE_CACHE_SHIFT);
1027 }
1028
1029 if (copied > 0) {
1030 dirty_and_release_pages(NULL, root, file, pages,
1031 dirty_pages, pos, copied);
1032 }
1033
1034 btrfs_drop_pages(pages, num_pages);
1035
1036 if (copied > 0) {
1037 if (will_write) {
1038 filemap_fdatawrite_range(inode->i_mapping, pos,
1039 pos + copied - 1);
1040 } else {
1041 balance_dirty_pages_ratelimited_nr(
1042 inode->i_mapping,
1043 dirty_pages);
1044 if (dirty_pages <
1045 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1046 btrfs_btree_balance_dirty(root, 1);
1047 btrfs_throttle(root);
1048 }
1049 }
1050
1051 pos += copied;
1052 num_written += copied;
1053
1054 cond_resched();
1055 }
1056 out:
1057 mutex_unlock(&inode->i_mutex);
1058 if (ret)
1059 err = ret;
1060
1061 kfree(pages);
1062 if (pinned[0])
1063 page_cache_release(pinned[0]);
1064 if (pinned[1])
1065 page_cache_release(pinned[1]);
1066 *ppos = pos;
1067
1068 /*
1069 * we want to make sure fsync finds this change
1070 * but we haven't joined a transaction running right now.
1071 *
1072 * Later on, someone is sure to update the inode and get the
1073 * real transid recorded.
1074 *
1075 * We set last_trans now to the fs_info generation + 1,
1076 * this will either be one more than the running transaction
1077 * or the generation used for the next transaction if there isn't
1078 * one running right now.
1079 */
1080 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1081
1082 if (num_written > 0 && will_write) {
1083 struct btrfs_trans_handle *trans;
1084
1085 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1086 if (err)
1087 num_written = err;
1088
1089 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1090 trans = btrfs_start_transaction(root, 0);
1091 if (IS_ERR(trans)) {
1092 num_written = PTR_ERR(trans);
1093 goto done;
1094 }
1095 mutex_lock(&inode->i_mutex);
1096 ret = btrfs_log_dentry_safe(trans, root,
1097 file->f_dentry);
1098 mutex_unlock(&inode->i_mutex);
1099 if (ret == 0) {
1100 ret = btrfs_sync_log(trans, root);
1101 if (ret == 0)
1102 btrfs_end_transaction(trans, root);
1103 else
1104 btrfs_commit_transaction(trans, root);
1105 } else if (ret != BTRFS_NO_LOG_SYNC) {
1106 btrfs_commit_transaction(trans, root);
1107 } else {
1108 btrfs_end_transaction(trans, root);
1109 }
1110 }
1111 if (file->f_flags & O_DIRECT && buffered) {
1112 invalidate_mapping_pages(inode->i_mapping,
1113 start_pos >> PAGE_CACHE_SHIFT,
1114 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1115 }
1116 }
1117 done:
1118 current->backing_dev_info = NULL;
1119 return num_written ? num_written : err;
1120 }
1121
1122 int btrfs_release_file(struct inode *inode, struct file *filp)
1123 {
1124 /*
1125 * ordered_data_close is set by settattr when we are about to truncate
1126 * a file from a non-zero size to a zero size. This tries to
1127 * flush down new bytes that may have been written if the
1128 * application were using truncate to replace a file in place.
1129 */
1130 if (BTRFS_I(inode)->ordered_data_close) {
1131 BTRFS_I(inode)->ordered_data_close = 0;
1132 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1133 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1134 filemap_flush(inode->i_mapping);
1135 }
1136 if (filp->private_data)
1137 btrfs_ioctl_trans_end(filp);
1138 return 0;
1139 }
1140
1141 /*
1142 * fsync call for both files and directories. This logs the inode into
1143 * the tree log instead of forcing full commits whenever possible.
1144 *
1145 * It needs to call filemap_fdatawait so that all ordered extent updates are
1146 * in the metadata btree are up to date for copying to the log.
1147 *
1148 * It drops the inode mutex before doing the tree log commit. This is an
1149 * important optimization for directories because holding the mutex prevents
1150 * new operations on the dir while we write to disk.
1151 */
1152 int btrfs_sync_file(struct file *file, int datasync)
1153 {
1154 struct dentry *dentry = file->f_path.dentry;
1155 struct inode *inode = dentry->d_inode;
1156 struct btrfs_root *root = BTRFS_I(inode)->root;
1157 int ret = 0;
1158 struct btrfs_trans_handle *trans;
1159
1160
1161 /* we wait first, since the writeback may change the inode */
1162 root->log_batch++;
1163 /* the VFS called filemap_fdatawrite for us */
1164 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1165 root->log_batch++;
1166
1167 /*
1168 * check the transaction that last modified this inode
1169 * and see if its already been committed
1170 */
1171 if (!BTRFS_I(inode)->last_trans)
1172 goto out;
1173
1174 /*
1175 * if the last transaction that changed this file was before
1176 * the current transaction, we can bail out now without any
1177 * syncing
1178 */
1179 mutex_lock(&root->fs_info->trans_mutex);
1180 if (BTRFS_I(inode)->last_trans <=
1181 root->fs_info->last_trans_committed) {
1182 BTRFS_I(inode)->last_trans = 0;
1183 mutex_unlock(&root->fs_info->trans_mutex);
1184 goto out;
1185 }
1186 mutex_unlock(&root->fs_info->trans_mutex);
1187
1188 /*
1189 * ok we haven't committed the transaction yet, lets do a commit
1190 */
1191 if (file->private_data)
1192 btrfs_ioctl_trans_end(file);
1193
1194 trans = btrfs_start_transaction(root, 0);
1195 if (IS_ERR(trans)) {
1196 ret = PTR_ERR(trans);
1197 goto out;
1198 }
1199
1200 ret = btrfs_log_dentry_safe(trans, root, dentry);
1201 if (ret < 0)
1202 goto out;
1203
1204 /* we've logged all the items and now have a consistent
1205 * version of the file in the log. It is possible that
1206 * someone will come in and modify the file, but that's
1207 * fine because the log is consistent on disk, and we
1208 * have references to all of the file's extents
1209 *
1210 * It is possible that someone will come in and log the
1211 * file again, but that will end up using the synchronization
1212 * inside btrfs_sync_log to keep things safe.
1213 */
1214 mutex_unlock(&dentry->d_inode->i_mutex);
1215
1216 if (ret != BTRFS_NO_LOG_SYNC) {
1217 if (ret > 0) {
1218 ret = btrfs_commit_transaction(trans, root);
1219 } else {
1220 ret = btrfs_sync_log(trans, root);
1221 if (ret == 0)
1222 ret = btrfs_end_transaction(trans, root);
1223 else
1224 ret = btrfs_commit_transaction(trans, root);
1225 }
1226 } else {
1227 ret = btrfs_end_transaction(trans, root);
1228 }
1229 mutex_lock(&dentry->d_inode->i_mutex);
1230 out:
1231 return ret > 0 ? -EIO : ret;
1232 }
1233
1234 static const struct vm_operations_struct btrfs_file_vm_ops = {
1235 .fault = filemap_fault,
1236 .page_mkwrite = btrfs_page_mkwrite,
1237 };
1238
1239 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1240 {
1241 struct address_space *mapping = filp->f_mapping;
1242
1243 if (!mapping->a_ops->readpage)
1244 return -ENOEXEC;
1245
1246 file_accessed(filp);
1247 vma->vm_ops = &btrfs_file_vm_ops;
1248 vma->vm_flags |= VM_CAN_NONLINEAR;
1249
1250 return 0;
1251 }
1252
1253 const struct file_operations btrfs_file_operations = {
1254 .llseek = generic_file_llseek,
1255 .read = do_sync_read,
1256 .write = do_sync_write,
1257 .aio_read = generic_file_aio_read,
1258 .splice_read = generic_file_splice_read,
1259 .aio_write = btrfs_file_aio_write,
1260 .mmap = btrfs_file_mmap,
1261 .open = generic_file_open,
1262 .release = btrfs_release_file,
1263 .fsync = btrfs_sync_file,
1264 .unlocked_ioctl = btrfs_ioctl,
1265 #ifdef CONFIG_COMPAT
1266 .compat_ioctl = btrfs_ioctl,
1267 #endif
1268 };
This page took 0.077708 seconds and 6 git commands to generate.