4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
26 #include <trace/events/f2fs.h>
28 static struct kmem_cache
*extent_tree_slab
;
29 static struct kmem_cache
*extent_node_slab
;
31 static void f2fs_read_end_io(struct bio
*bio
, int err
)
36 bio_for_each_segment_all(bvec
, bio
, i
) {
37 struct page
*page
= bvec
->bv_page
;
40 SetPageUptodate(page
);
42 ClearPageUptodate(page
);
50 static void f2fs_write_end_io(struct bio
*bio
, int err
)
52 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
56 bio_for_each_segment_all(bvec
, bio
, i
) {
57 struct page
*page
= bvec
->bv_page
;
61 set_bit(AS_EIO
, &page
->mapping
->flags
);
62 f2fs_stop_checkpoint(sbi
);
64 end_page_writeback(page
);
65 dec_page_count(sbi
, F2FS_WRITEBACK
);
68 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
69 !list_empty(&sbi
->cp_wait
.task_list
))
70 wake_up(&sbi
->cp_wait
);
76 * Low-level block read/write IO operations.
78 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
79 int npages
, bool is_read
)
83 /* No failure on bio allocation */
84 bio
= bio_alloc(GFP_NOIO
, npages
);
86 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
87 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
88 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
89 bio
->bi_private
= sbi
;
94 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
96 struct f2fs_io_info
*fio
= &io
->fio
;
101 if (is_read_io(fio
->rw
))
102 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
104 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
106 submit_bio(fio
->rw
, io
->bio
);
110 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
111 enum page_type type
, int rw
)
113 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
114 struct f2fs_bio_info
*io
;
116 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
118 down_write(&io
->io_rwsem
);
120 /* change META to META_FLUSH in the checkpoint procedure */
121 if (type
>= META_FLUSH
) {
122 io
->fio
.type
= META_FLUSH
;
123 if (test_opt(sbi
, NOBARRIER
))
124 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
126 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
128 __submit_merged_bio(io
);
129 up_write(&io
->io_rwsem
);
133 * Fill the locked page with data located in the block address.
134 * Return unlocked page.
136 int f2fs_submit_page_bio(struct f2fs_sb_info
*sbi
, struct page
*page
,
137 struct f2fs_io_info
*fio
)
141 trace_f2fs_submit_page_bio(page
, fio
);
142 f2fs_trace_ios(page
, fio
, 0);
144 /* Allocate a new bio */
145 bio
= __bio_alloc(sbi
, fio
->blk_addr
, 1, is_read_io(fio
->rw
));
147 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
149 f2fs_put_page(page
, 1);
153 submit_bio(fio
->rw
, bio
);
157 void f2fs_submit_page_mbio(struct f2fs_sb_info
*sbi
, struct page
*page
,
158 struct f2fs_io_info
*fio
)
160 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
161 struct f2fs_bio_info
*io
;
162 bool is_read
= is_read_io(fio
->rw
);
164 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
166 verify_block_addr(sbi
, fio
->blk_addr
);
168 down_write(&io
->io_rwsem
);
171 inc_page_count(sbi
, F2FS_WRITEBACK
);
173 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->blk_addr
- 1 ||
174 io
->fio
.rw
!= fio
->rw
))
175 __submit_merged_bio(io
);
177 if (io
->bio
== NULL
) {
178 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
180 io
->bio
= __bio_alloc(sbi
, fio
->blk_addr
, bio_blocks
, is_read
);
184 if (bio_add_page(io
->bio
, page
, PAGE_CACHE_SIZE
, 0) <
186 __submit_merged_bio(io
);
190 io
->last_block_in_bio
= fio
->blk_addr
;
191 f2fs_trace_ios(page
, fio
, 0);
193 up_write(&io
->io_rwsem
);
194 trace_f2fs_submit_page_mbio(page
, fio
);
198 * Lock ordering for the change of data block address:
201 * update block addresses in the node page
203 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
205 struct f2fs_node
*rn
;
207 struct page
*node_page
= dn
->node_page
;
208 unsigned int ofs_in_node
= dn
->ofs_in_node
;
210 f2fs_wait_on_page_writeback(node_page
, NODE
);
212 rn
= F2FS_NODE(node_page
);
214 /* Get physical address of data block */
215 addr_array
= blkaddr_in_node(rn
);
216 addr_array
[ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
217 set_page_dirty(node_page
);
220 int reserve_new_block(struct dnode_of_data
*dn
)
222 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
224 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
226 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
229 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
231 dn
->data_blkaddr
= NEW_ADDR
;
232 __set_data_blkaddr(dn
);
233 mark_inode_dirty(dn
->inode
);
238 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
240 bool need_put
= dn
->inode_page
? false : true;
243 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
247 if (dn
->data_blkaddr
== NULL_ADDR
)
248 err
= reserve_new_block(dn
);
254 static void f2fs_map_bh(struct super_block
*sb
, pgoff_t pgofs
,
255 struct extent_info
*ei
, struct buffer_head
*bh_result
)
257 unsigned int blkbits
= sb
->s_blocksize_bits
;
260 clear_buffer_new(bh_result
);
261 map_bh(bh_result
, sb
, ei
->blk
+ pgofs
- ei
->fofs
);
262 count
= ei
->fofs
+ ei
->len
- pgofs
;
263 if (count
< (UINT_MAX
>> blkbits
))
264 bh_result
->b_size
= (count
<< blkbits
);
266 bh_result
->b_size
= UINT_MAX
;
269 static bool lookup_extent_info(struct inode
*inode
, pgoff_t pgofs
,
270 struct extent_info
*ei
)
272 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
273 pgoff_t start_fofs
, end_fofs
;
274 block_t start_blkaddr
;
276 read_lock(&fi
->ext_lock
);
277 if (fi
->ext
.len
== 0) {
278 read_unlock(&fi
->ext_lock
);
282 stat_inc_total_hit(inode
->i_sb
);
284 start_fofs
= fi
->ext
.fofs
;
285 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
286 start_blkaddr
= fi
->ext
.blk
;
288 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
290 stat_inc_read_hit(inode
->i_sb
);
291 read_unlock(&fi
->ext_lock
);
294 read_unlock(&fi
->ext_lock
);
298 static bool update_extent_info(struct inode
*inode
, pgoff_t fofs
,
301 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
302 pgoff_t start_fofs
, end_fofs
;
303 block_t start_blkaddr
, end_blkaddr
;
304 int need_update
= true;
306 write_lock(&fi
->ext_lock
);
308 start_fofs
= fi
->ext
.fofs
;
309 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
310 start_blkaddr
= fi
->ext
.blk
;
311 end_blkaddr
= fi
->ext
.blk
+ fi
->ext
.len
- 1;
313 /* Drop and initialize the matched extent */
314 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
318 if (fi
->ext
.len
== 0) {
319 if (blkaddr
!= NULL_ADDR
) {
321 fi
->ext
.blk
= blkaddr
;
328 if (fofs
== start_fofs
- 1 && blkaddr
== start_blkaddr
- 1) {
336 if (fofs
== end_fofs
+ 1 && blkaddr
== end_blkaddr
+ 1) {
341 /* Split the existing extent */
342 if (fi
->ext
.len
> 1 &&
343 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
344 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
345 fi
->ext
.len
= fofs
- start_fofs
;
347 fi
->ext
.fofs
= fofs
+ 1;
348 fi
->ext
.blk
= start_blkaddr
+ fofs
- start_fofs
+ 1;
349 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
355 /* Finally, if the extent is very fragmented, let's drop the cache. */
356 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
358 set_inode_flag(fi
, FI_NO_EXTENT
);
362 write_unlock(&fi
->ext_lock
);
366 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
367 struct extent_tree
*et
, struct extent_info
*ei
,
368 struct rb_node
*parent
, struct rb_node
**p
)
370 struct extent_node
*en
;
372 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
377 INIT_LIST_HEAD(&en
->list
);
379 rb_link_node(&en
->rb_node
, parent
, p
);
380 rb_insert_color(&en
->rb_node
, &et
->root
);
382 atomic_inc(&sbi
->total_ext_node
);
386 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
387 struct extent_tree
*et
, struct extent_node
*en
)
389 rb_erase(&en
->rb_node
, &et
->root
);
391 atomic_dec(&sbi
->total_ext_node
);
393 if (et
->cached_en
== en
)
394 et
->cached_en
= NULL
;
397 static struct extent_node
*__lookup_extent_tree(struct extent_tree
*et
,
400 struct rb_node
*node
= et
->root
.rb_node
;
401 struct extent_node
*en
;
404 struct extent_info
*cei
= &et
->cached_en
->ei
;
406 if (cei
->fofs
<= fofs
&& cei
->fofs
+ cei
->len
> fofs
)
407 return et
->cached_en
;
411 en
= rb_entry(node
, struct extent_node
, rb_node
);
413 if (fofs
< en
->ei
.fofs
) {
414 node
= node
->rb_left
;
415 } else if (fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
416 node
= node
->rb_right
;
425 static struct extent_node
*__try_back_merge(struct f2fs_sb_info
*sbi
,
426 struct extent_tree
*et
, struct extent_node
*en
)
428 struct extent_node
*prev
;
429 struct rb_node
*node
;
431 node
= rb_prev(&en
->rb_node
);
435 prev
= rb_entry(node
, struct extent_node
, rb_node
);
436 if (__is_back_mergeable(&en
->ei
, &prev
->ei
)) {
437 en
->ei
.fofs
= prev
->ei
.fofs
;
438 en
->ei
.blk
= prev
->ei
.blk
;
439 en
->ei
.len
+= prev
->ei
.len
;
440 __detach_extent_node(sbi
, et
, prev
);
446 static struct extent_node
*__try_front_merge(struct f2fs_sb_info
*sbi
,
447 struct extent_tree
*et
, struct extent_node
*en
)
449 struct extent_node
*next
;
450 struct rb_node
*node
;
452 node
= rb_next(&en
->rb_node
);
456 next
= rb_entry(node
, struct extent_node
, rb_node
);
457 if (__is_front_mergeable(&en
->ei
, &next
->ei
)) {
458 en
->ei
.len
+= next
->ei
.len
;
459 __detach_extent_node(sbi
, et
, next
);
465 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
466 struct extent_tree
*et
, struct extent_info
*ei
,
467 struct extent_node
**den
)
469 struct rb_node
**p
= &et
->root
.rb_node
;
470 struct rb_node
*parent
= NULL
;
471 struct extent_node
*en
;
475 en
= rb_entry(parent
, struct extent_node
, rb_node
);
477 if (ei
->fofs
< en
->ei
.fofs
) {
478 if (__is_front_mergeable(ei
, &en
->ei
)) {
479 f2fs_bug_on(sbi
, !den
);
480 en
->ei
.fofs
= ei
->fofs
;
481 en
->ei
.blk
= ei
->blk
;
482 en
->ei
.len
+= ei
->len
;
483 *den
= __try_back_merge(sbi
, et
, en
);
487 } else if (ei
->fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
488 if (__is_back_mergeable(ei
, &en
->ei
)) {
489 f2fs_bug_on(sbi
, !den
);
490 en
->ei
.len
+= ei
->len
;
491 *den
= __try_front_merge(sbi
, et
, en
);
500 return __attach_extent_node(sbi
, et
, ei
, parent
, p
);
503 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
504 struct extent_tree
*et
, bool free_all
)
506 struct rb_node
*node
, *next
;
507 struct extent_node
*en
;
508 unsigned int count
= et
->count
;
510 node
= rb_first(&et
->root
);
512 next
= rb_next(node
);
513 en
= rb_entry(node
, struct extent_node
, rb_node
);
516 spin_lock(&sbi
->extent_lock
);
517 if (!list_empty(&en
->list
))
518 list_del_init(&en
->list
);
519 spin_unlock(&sbi
->extent_lock
);
522 if (free_all
|| list_empty(&en
->list
)) {
523 __detach_extent_node(sbi
, et
, en
);
524 kmem_cache_free(extent_node_slab
, en
);
529 return count
- et
->count
;
532 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
533 struct extent_info
*ei
)
535 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
536 struct extent_tree
*et
;
537 struct extent_node
*en
;
539 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
541 down_read(&sbi
->extent_tree_lock
);
542 et
= radix_tree_lookup(&sbi
->extent_tree_root
, inode
->i_ino
);
544 up_read(&sbi
->extent_tree_lock
);
547 atomic_inc(&et
->refcount
);
548 up_read(&sbi
->extent_tree_lock
);
550 read_lock(&et
->lock
);
551 en
= __lookup_extent_tree(et
, pgofs
);
554 spin_lock(&sbi
->extent_lock
);
555 if (!list_empty(&en
->list
))
556 list_move_tail(&en
->list
, &sbi
->extent_list
);
557 spin_unlock(&sbi
->extent_lock
);
558 stat_inc_read_hit(sbi
->sb
);
560 stat_inc_total_hit(sbi
->sb
);
561 read_unlock(&et
->lock
);
563 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, en
);
565 atomic_dec(&et
->refcount
);
566 return en
? true : false;
569 static void f2fs_update_extent_tree(struct inode
*inode
, pgoff_t fofs
,
572 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
573 nid_t ino
= inode
->i_ino
;
574 struct extent_tree
*et
;
575 struct extent_node
*en
= NULL
, *en1
= NULL
, *en2
= NULL
, *en3
= NULL
;
576 struct extent_node
*den
= NULL
;
577 struct extent_info ei
, dei
;
580 trace_f2fs_update_extent_tree(inode
, fofs
, blkaddr
);
582 down_write(&sbi
->extent_tree_lock
);
583 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
585 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
586 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
587 memset(et
, 0, sizeof(struct extent_tree
));
590 et
->cached_en
= NULL
;
591 rwlock_init(&et
->lock
);
592 atomic_set(&et
->refcount
, 0);
594 sbi
->total_ext_tree
++;
596 atomic_inc(&et
->refcount
);
597 up_write(&sbi
->extent_tree_lock
);
599 write_lock(&et
->lock
);
601 /* 1. lookup and remove existing extent info in cache */
602 en
= __lookup_extent_tree(et
, fofs
);
607 __detach_extent_node(sbi
, et
, en
);
609 /* 2. if extent can be split more, split and insert the left part */
611 /* insert left part of split extent into cache */
612 if (fofs
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
613 set_extent_info(&ei
, dei
.fofs
, dei
.blk
,
615 en1
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
618 /* insert right part of split extent into cache */
619 endofs
= dei
.fofs
+ dei
.len
- 1;
620 if (endofs
- fofs
>= F2FS_MIN_EXTENT_LEN
) {
621 set_extent_info(&ei
, fofs
+ 1,
622 fofs
- dei
.fofs
+ dei
.blk
, endofs
- fofs
);
623 en2
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
628 /* 3. update extent in extent cache */
630 set_extent_info(&ei
, fofs
, blkaddr
, 1);
631 en3
= __insert_extent_tree(sbi
, et
, &ei
, &den
);
634 /* 4. update in global extent list */
635 spin_lock(&sbi
->extent_lock
);
636 if (en
&& !list_empty(&en
->list
))
639 * en1 and en2 split from en, they will become more and more smaller
640 * fragments after splitting several times. So if the length is smaller
641 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
644 list_add_tail(&en1
->list
, &sbi
->extent_list
);
646 list_add_tail(&en2
->list
, &sbi
->extent_list
);
648 if (list_empty(&en3
->list
))
649 list_add_tail(&en3
->list
, &sbi
->extent_list
);
651 list_move_tail(&en3
->list
, &sbi
->extent_list
);
653 if (den
&& !list_empty(&den
->list
))
654 list_del(&den
->list
);
655 spin_unlock(&sbi
->extent_lock
);
657 /* 5. release extent node */
659 kmem_cache_free(extent_node_slab
, en
);
661 kmem_cache_free(extent_node_slab
, den
);
663 write_unlock(&et
->lock
);
664 atomic_dec(&et
->refcount
);
667 void f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
669 struct extent_tree
*treevec
[EXT_TREE_VEC_SIZE
];
670 struct extent_node
*en
, *tmp
;
671 unsigned long ino
= F2FS_ROOT_INO(sbi
);
672 struct radix_tree_iter iter
;
675 unsigned int node_cnt
= 0, tree_cnt
= 0;
677 if (!test_opt(sbi
, EXTENT_CACHE
))
680 if (available_free_memory(sbi
, EXTENT_CACHE
))
683 spin_lock(&sbi
->extent_lock
);
684 list_for_each_entry_safe(en
, tmp
, &sbi
->extent_list
, list
) {
687 list_del_init(&en
->list
);
689 spin_unlock(&sbi
->extent_lock
);
691 down_read(&sbi
->extent_tree_lock
);
692 while ((found
= radix_tree_gang_lookup(&sbi
->extent_tree_root
,
693 (void **)treevec
, ino
, EXT_TREE_VEC_SIZE
))) {
696 ino
= treevec
[found
- 1]->ino
+ 1;
697 for (i
= 0; i
< found
; i
++) {
698 struct extent_tree
*et
= treevec
[i
];
700 atomic_inc(&et
->refcount
);
701 write_lock(&et
->lock
);
702 node_cnt
+= __free_extent_tree(sbi
, et
, false);
703 write_unlock(&et
->lock
);
704 atomic_dec(&et
->refcount
);
707 up_read(&sbi
->extent_tree_lock
);
709 down_write(&sbi
->extent_tree_lock
);
710 radix_tree_for_each_slot(slot
, &sbi
->extent_tree_root
, &iter
,
711 F2FS_ROOT_INO(sbi
)) {
712 struct extent_tree
*et
= (struct extent_tree
*)*slot
;
714 if (!atomic_read(&et
->refcount
) && !et
->count
) {
715 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
716 kmem_cache_free(extent_tree_slab
, et
);
717 sbi
->total_ext_tree
--;
721 up_write(&sbi
->extent_tree_lock
);
723 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
726 void f2fs_destroy_extent_tree(struct inode
*inode
)
728 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
729 struct extent_tree
*et
;
730 unsigned int node_cnt
= 0;
732 if (!test_opt(sbi
, EXTENT_CACHE
))
735 down_read(&sbi
->extent_tree_lock
);
736 et
= radix_tree_lookup(&sbi
->extent_tree_root
, inode
->i_ino
);
738 up_read(&sbi
->extent_tree_lock
);
741 atomic_inc(&et
->refcount
);
742 up_read(&sbi
->extent_tree_lock
);
744 /* free all extent info belong to this extent tree */
745 write_lock(&et
->lock
);
746 node_cnt
= __free_extent_tree(sbi
, et
, true);
747 write_unlock(&et
->lock
);
749 atomic_dec(&et
->refcount
);
751 /* try to find and delete extent tree entry in radix tree */
752 down_write(&sbi
->extent_tree_lock
);
753 et
= radix_tree_lookup(&sbi
->extent_tree_root
, inode
->i_ino
);
755 up_write(&sbi
->extent_tree_lock
);
758 f2fs_bug_on(sbi
, atomic_read(&et
->refcount
) || et
->count
);
759 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
760 kmem_cache_free(extent_tree_slab
, et
);
761 sbi
->total_ext_tree
--;
762 up_write(&sbi
->extent_tree_lock
);
764 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
768 static bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
769 struct extent_info
*ei
)
771 if (is_inode_flag_set(F2FS_I(inode
), FI_NO_EXTENT
))
774 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
775 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
777 return lookup_extent_info(inode
, pgofs
, ei
);
780 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
782 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
785 f2fs_bug_on(F2FS_I_SB(dn
->inode
), dn
->data_blkaddr
== NEW_ADDR
);
787 /* Update the page address in the parent node */
788 __set_data_blkaddr(dn
);
790 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
793 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
796 if (test_opt(F2FS_I_SB(dn
->inode
), EXTENT_CACHE
))
797 return f2fs_update_extent_tree(dn
->inode
, fofs
,
800 if (update_extent_info(dn
->inode
, fofs
, dn
->data_blkaddr
))
804 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
806 struct address_space
*mapping
= inode
->i_mapping
;
807 struct dnode_of_data dn
;
809 struct extent_info ei
;
811 struct f2fs_io_info fio
= {
813 .rw
= sync
? READ_SYNC
: READA
,
817 * If sync is false, it needs to check its block allocation.
818 * This is need and triggered by two flows:
819 * gc and truncate_partial_data_page.
824 page
= find_get_page(mapping
, index
);
825 if (page
&& PageUptodate(page
))
827 f2fs_put_page(page
, 0);
829 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
830 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
834 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
835 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
840 if (dn
.data_blkaddr
== NULL_ADDR
)
841 return ERR_PTR(-ENOENT
);
843 /* By fallocate(), there is no cached page, but with NEW_ADDR */
844 if (unlikely(dn
.data_blkaddr
== NEW_ADDR
))
845 return ERR_PTR(-EINVAL
);
848 page
= grab_cache_page(mapping
, index
);
850 return ERR_PTR(-ENOMEM
);
852 if (PageUptodate(page
)) {
857 fio
.blk_addr
= dn
.data_blkaddr
;
858 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
863 wait_on_page_locked(page
);
864 if (unlikely(!PageUptodate(page
))) {
865 f2fs_put_page(page
, 0);
866 return ERR_PTR(-EIO
);
873 * If it tries to access a hole, return an error.
874 * Because, the callers, functions in dir.c and GC, should be able to know
875 * whether this page exists or not.
877 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
879 struct address_space
*mapping
= inode
->i_mapping
;
880 struct dnode_of_data dn
;
882 struct extent_info ei
;
884 struct f2fs_io_info fio
= {
889 page
= grab_cache_page(mapping
, index
);
891 return ERR_PTR(-ENOMEM
);
893 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
894 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
898 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
899 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
901 f2fs_put_page(page
, 1);
906 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
907 f2fs_put_page(page
, 1);
908 return ERR_PTR(-ENOENT
);
912 if (PageUptodate(page
))
916 * A new dentry page is allocated but not able to be written, since its
917 * new inode page couldn't be allocated due to -ENOSPC.
918 * In such the case, its blkaddr can be remained as NEW_ADDR.
919 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
921 if (dn
.data_blkaddr
== NEW_ADDR
) {
922 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
923 SetPageUptodate(page
);
927 fio
.blk_addr
= dn
.data_blkaddr
;
928 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
933 if (unlikely(!PageUptodate(page
))) {
934 f2fs_put_page(page
, 1);
935 return ERR_PTR(-EIO
);
937 if (unlikely(page
->mapping
!= mapping
)) {
938 f2fs_put_page(page
, 1);
945 * Caller ensures that this data page is never allocated.
946 * A new zero-filled data page is allocated in the page cache.
948 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
950 * Note that, ipage is set only by make_empty_dir.
952 struct page
*get_new_data_page(struct inode
*inode
,
953 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
955 struct address_space
*mapping
= inode
->i_mapping
;
957 struct dnode_of_data dn
;
960 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
961 err
= f2fs_reserve_block(&dn
, index
);
965 page
= grab_cache_page(mapping
, index
);
971 if (PageUptodate(page
))
974 if (dn
.data_blkaddr
== NEW_ADDR
) {
975 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
976 SetPageUptodate(page
);
978 struct f2fs_io_info fio
= {
981 .blk_addr
= dn
.data_blkaddr
,
983 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
988 if (unlikely(!PageUptodate(page
))) {
989 f2fs_put_page(page
, 1);
993 if (unlikely(page
->mapping
!= mapping
)) {
994 f2fs_put_page(page
, 1);
1000 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
1001 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
1002 /* Only the directory inode sets new_i_size */
1003 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
1008 f2fs_put_dnode(&dn
);
1009 return ERR_PTR(err
);
1012 static int __allocate_data_block(struct dnode_of_data
*dn
)
1014 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1015 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
1016 struct f2fs_summary sum
;
1017 struct node_info ni
;
1018 int seg
= CURSEG_WARM_DATA
;
1021 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
1023 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
1026 get_node_info(sbi
, dn
->nid
, &ni
);
1027 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
1029 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
1030 seg
= CURSEG_DIRECT_IO
;
1032 allocate_data_block(sbi
, NULL
, NULL_ADDR
, &dn
->data_blkaddr
, &sum
, seg
);
1034 /* direct IO doesn't use extent cache to maximize the performance */
1035 __set_data_blkaddr(dn
);
1038 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
1040 if (i_size_read(dn
->inode
) < ((fofs
+ 1) << PAGE_CACHE_SHIFT
))
1041 i_size_write(dn
->inode
, ((fofs
+ 1) << PAGE_CACHE_SHIFT
));
1046 static void __allocate_data_blocks(struct inode
*inode
, loff_t offset
,
1049 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1050 struct dnode_of_data dn
;
1051 u64 start
= F2FS_BYTES_TO_BLK(offset
);
1052 u64 len
= F2FS_BYTES_TO_BLK(count
);
1057 f2fs_balance_fs(sbi
);
1060 /* When reading holes, we need its node page */
1061 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1062 if (get_dnode_of_data(&dn
, start
, ALLOC_NODE
))
1066 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1068 while (dn
.ofs_in_node
< end_offset
&& len
) {
1071 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1072 if (blkaddr
== NULL_ADDR
) {
1073 if (__allocate_data_block(&dn
))
1083 sync_inode_page(&dn
);
1085 f2fs_put_dnode(&dn
);
1086 f2fs_unlock_op(sbi
);
1092 sync_inode_page(&dn
);
1093 f2fs_put_dnode(&dn
);
1095 f2fs_unlock_op(sbi
);
1100 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
1101 * If original data blocks are allocated, then give them to blockdev.
1103 * a. preallocate requested block addresses
1104 * b. do not use extent cache for better performance
1105 * c. give the block addresses to blockdev
1107 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1108 struct buffer_head
*bh_result
, int create
, bool fiemap
)
1110 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
1111 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
1112 struct dnode_of_data dn
;
1113 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
1114 pgoff_t pgofs
, end_offset
;
1115 int err
= 0, ofs
= 1;
1116 struct extent_info ei
;
1117 bool allocated
= false;
1119 /* Get the page offset from the block offset(iblock) */
1120 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
1122 if (f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1123 f2fs_map_bh(inode
->i_sb
, pgofs
, &ei
, bh_result
);
1128 f2fs_lock_op(F2FS_I_SB(inode
));
1130 /* When reading holes, we need its node page */
1131 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1132 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1138 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1141 if (dn
.data_blkaddr
!= NULL_ADDR
) {
1142 clear_buffer_new(bh_result
);
1143 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
1144 } else if (create
) {
1145 err
= __allocate_data_block(&dn
);
1149 set_buffer_new(bh_result
);
1150 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
1155 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1156 bh_result
->b_size
= (((size_t)1) << blkbits
);
1161 if (dn
.ofs_in_node
>= end_offset
) {
1163 sync_inode_page(&dn
);
1165 f2fs_put_dnode(&dn
);
1167 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1168 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1174 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1177 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1180 if (maxblocks
> (bh_result
->b_size
>> blkbits
)) {
1181 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1182 if (blkaddr
== NULL_ADDR
&& create
) {
1183 err
= __allocate_data_block(&dn
);
1187 set_buffer_new(bh_result
);
1188 blkaddr
= dn
.data_blkaddr
;
1190 /* Give more consecutive addresses for the readahead */
1191 if (blkaddr
== (bh_result
->b_blocknr
+ ofs
)) {
1195 bh_result
->b_size
+= (((size_t)1) << blkbits
);
1201 sync_inode_page(&dn
);
1203 f2fs_put_dnode(&dn
);
1206 f2fs_unlock_op(F2FS_I_SB(inode
));
1208 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
1212 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1213 struct buffer_head
*bh_result
, int create
)
1215 return __get_data_block(inode
, iblock
, bh_result
, create
, false);
1218 static int get_data_block_fiemap(struct inode
*inode
, sector_t iblock
,
1219 struct buffer_head
*bh_result
, int create
)
1221 return __get_data_block(inode
, iblock
, bh_result
, create
, true);
1224 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1227 return generic_block_fiemap(inode
, fieinfo
,
1228 start
, len
, get_data_block_fiemap
);
1231 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1233 struct inode
*inode
= page
->mapping
->host
;
1236 trace_f2fs_readpage(page
, DATA
);
1238 /* If the file has inline data, try to read it directly */
1239 if (f2fs_has_inline_data(inode
))
1240 ret
= f2fs_read_inline_data(inode
, page
);
1242 ret
= mpage_readpage(page
, get_data_block
);
1247 static int f2fs_read_data_pages(struct file
*file
,
1248 struct address_space
*mapping
,
1249 struct list_head
*pages
, unsigned nr_pages
)
1251 struct inode
*inode
= file
->f_mapping
->host
;
1253 /* If the file has inline data, skip readpages */
1254 if (f2fs_has_inline_data(inode
))
1257 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block
);
1260 int do_write_data_page(struct page
*page
, struct f2fs_io_info
*fio
)
1262 struct inode
*inode
= page
->mapping
->host
;
1263 struct dnode_of_data dn
;
1266 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1267 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1271 fio
->blk_addr
= dn
.data_blkaddr
;
1273 /* This page is already truncated */
1274 if (fio
->blk_addr
== NULL_ADDR
) {
1275 ClearPageUptodate(page
);
1279 set_page_writeback(page
);
1282 * If current allocation needs SSR,
1283 * it had better in-place writes for updated data.
1285 if (unlikely(fio
->blk_addr
!= NEW_ADDR
&&
1286 !is_cold_data(page
) &&
1287 need_inplace_update(inode
))) {
1288 rewrite_data_page(page
, fio
);
1289 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
1291 write_data_page(page
, &dn
, fio
);
1292 f2fs_update_extent_cache(&dn
);
1293 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
1294 if (page
->index
== 0)
1295 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
1298 f2fs_put_dnode(&dn
);
1302 static int f2fs_write_data_page(struct page
*page
,
1303 struct writeback_control
*wbc
)
1305 struct inode
*inode
= page
->mapping
->host
;
1306 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1307 loff_t i_size
= i_size_read(inode
);
1308 const pgoff_t end_index
= ((unsigned long long) i_size
)
1309 >> PAGE_CACHE_SHIFT
;
1310 unsigned offset
= 0;
1311 bool need_balance_fs
= false;
1313 struct f2fs_io_info fio
= {
1315 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1318 trace_f2fs_writepage(page
, DATA
);
1320 if (page
->index
< end_index
)
1324 * If the offset is out-of-range of file size,
1325 * this page does not have to be written to disk.
1327 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
1328 if ((page
->index
>= end_index
+ 1) || !offset
)
1331 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
1333 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1335 if (f2fs_is_drop_cache(inode
))
1337 if (f2fs_is_volatile_file(inode
) && !wbc
->for_reclaim
&&
1338 available_free_memory(sbi
, BASE_CHECK
))
1341 /* Dentry blocks are controlled by checkpoint */
1342 if (S_ISDIR(inode
->i_mode
)) {
1343 if (unlikely(f2fs_cp_error(sbi
)))
1345 err
= do_write_data_page(page
, &fio
);
1349 /* we should bypass data pages to proceed the kworkder jobs */
1350 if (unlikely(f2fs_cp_error(sbi
))) {
1355 if (!wbc
->for_reclaim
)
1356 need_balance_fs
= true;
1357 else if (has_not_enough_free_secs(sbi
, 0))
1362 if (f2fs_has_inline_data(inode
))
1363 err
= f2fs_write_inline_data(inode
, page
);
1365 err
= do_write_data_page(page
, &fio
);
1366 f2fs_unlock_op(sbi
);
1368 if (err
&& err
!= -ENOENT
)
1371 clear_cold_data(page
);
1373 inode_dec_dirty_pages(inode
);
1375 ClearPageUptodate(page
);
1377 if (need_balance_fs
)
1378 f2fs_balance_fs(sbi
);
1379 if (wbc
->for_reclaim
)
1380 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1384 redirty_page_for_writepage(wbc
, page
);
1385 return AOP_WRITEPAGE_ACTIVATE
;
1388 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
1391 struct address_space
*mapping
= data
;
1392 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
1393 mapping_set_error(mapping
, ret
);
1397 static int f2fs_write_data_pages(struct address_space
*mapping
,
1398 struct writeback_control
*wbc
)
1400 struct inode
*inode
= mapping
->host
;
1401 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1405 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1407 /* deal with chardevs and other special file */
1408 if (!mapping
->a_ops
->writepage
)
1411 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1412 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1413 available_free_memory(sbi
, DIRTY_DENTS
))
1416 /* during POR, we don't need to trigger writepage at all. */
1417 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1420 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
1422 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
1424 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1426 remove_dirty_dir_inode(inode
);
1428 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1432 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1436 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1438 struct inode
*inode
= mapping
->host
;
1440 if (to
> inode
->i_size
) {
1441 truncate_pagecache(inode
, inode
->i_size
);
1442 truncate_blocks(inode
, inode
->i_size
, true);
1446 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1447 loff_t pos
, unsigned len
, unsigned flags
,
1448 struct page
**pagep
, void **fsdata
)
1450 struct inode
*inode
= mapping
->host
;
1451 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1452 struct page
*page
, *ipage
;
1453 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
1454 struct dnode_of_data dn
;
1457 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1459 f2fs_balance_fs(sbi
);
1462 * We should check this at this moment to avoid deadlock on inode page
1463 * and #0 page. The locking rule for inline_data conversion should be:
1464 * lock_page(page #0) -> lock_page(inode_page)
1467 err
= f2fs_convert_inline_inode(inode
);
1472 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1482 /* check inline_data */
1483 ipage
= get_node_page(sbi
, inode
->i_ino
);
1484 if (IS_ERR(ipage
)) {
1485 err
= PTR_ERR(ipage
);
1489 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1491 if (f2fs_has_inline_data(inode
)) {
1492 if (pos
+ len
<= MAX_INLINE_DATA
) {
1493 read_inline_data(page
, ipage
);
1494 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
1495 sync_inode_page(&dn
);
1498 err
= f2fs_convert_inline_page(&dn
, page
);
1502 err
= f2fs_reserve_block(&dn
, index
);
1506 f2fs_put_dnode(&dn
);
1507 f2fs_unlock_op(sbi
);
1509 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
1512 f2fs_wait_on_page_writeback(page
, DATA
);
1514 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1515 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1516 unsigned end
= start
+ len
;
1518 /* Reading beyond i_size is simple: memset to zero */
1519 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1523 if (dn
.data_blkaddr
== NEW_ADDR
) {
1524 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1526 struct f2fs_io_info fio
= {
1529 .blk_addr
= dn
.data_blkaddr
,
1531 err
= f2fs_submit_page_bio(sbi
, page
, &fio
);
1536 if (unlikely(!PageUptodate(page
))) {
1537 f2fs_put_page(page
, 1);
1541 if (unlikely(page
->mapping
!= mapping
)) {
1542 f2fs_put_page(page
, 1);
1547 SetPageUptodate(page
);
1548 clear_cold_data(page
);
1552 f2fs_put_dnode(&dn
);
1554 f2fs_unlock_op(sbi
);
1555 f2fs_put_page(page
, 1);
1557 f2fs_write_failed(mapping
, pos
+ len
);
1561 static int f2fs_write_end(struct file
*file
,
1562 struct address_space
*mapping
,
1563 loff_t pos
, unsigned len
, unsigned copied
,
1564 struct page
*page
, void *fsdata
)
1566 struct inode
*inode
= page
->mapping
->host
;
1568 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1570 set_page_dirty(page
);
1572 if (pos
+ copied
> i_size_read(inode
)) {
1573 i_size_write(inode
, pos
+ copied
);
1574 mark_inode_dirty(inode
);
1575 update_inode_page(inode
);
1578 f2fs_put_page(page
, 1);
1582 static int check_direct_IO(struct inode
*inode
, int rw
,
1583 struct iov_iter
*iter
, loff_t offset
)
1585 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1590 if (offset
& blocksize_mask
)
1593 if (iov_iter_alignment(iter
) & blocksize_mask
)
1599 static ssize_t
f2fs_direct_IO(int rw
, struct kiocb
*iocb
,
1600 struct iov_iter
*iter
, loff_t offset
)
1602 struct file
*file
= iocb
->ki_filp
;
1603 struct address_space
*mapping
= file
->f_mapping
;
1604 struct inode
*inode
= mapping
->host
;
1605 size_t count
= iov_iter_count(iter
);
1608 /* we don't need to use inline_data strictly */
1609 if (f2fs_has_inline_data(inode
)) {
1610 err
= f2fs_convert_inline_inode(inode
);
1615 if (check_direct_IO(inode
, rw
, iter
, offset
))
1618 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
1621 __allocate_data_blocks(inode
, offset
, count
);
1623 err
= blockdev_direct_IO(rw
, iocb
, inode
, iter
, offset
, get_data_block
);
1624 if (err
< 0 && (rw
& WRITE
))
1625 f2fs_write_failed(mapping
, offset
+ count
);
1627 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
1632 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1633 unsigned int length
)
1635 struct inode
*inode
= page
->mapping
->host
;
1636 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1638 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1639 (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
))
1642 if (PageDirty(page
)) {
1643 if (inode
->i_ino
== F2FS_META_INO(sbi
))
1644 dec_page_count(sbi
, F2FS_DIRTY_META
);
1645 else if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
1646 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1648 inode_dec_dirty_pages(inode
);
1650 ClearPagePrivate(page
);
1653 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1655 /* If this is dirty page, keep PagePrivate */
1656 if (PageDirty(page
))
1659 ClearPagePrivate(page
);
1663 static int f2fs_set_data_page_dirty(struct page
*page
)
1665 struct address_space
*mapping
= page
->mapping
;
1666 struct inode
*inode
= mapping
->host
;
1668 trace_f2fs_set_page_dirty(page
, DATA
);
1670 SetPageUptodate(page
);
1672 if (f2fs_is_atomic_file(inode
)) {
1673 register_inmem_page(inode
, page
);
1677 mark_inode_dirty(inode
);
1679 if (!PageDirty(page
)) {
1680 __set_page_dirty_nobuffers(page
);
1681 update_dirty_page(inode
, page
);
1687 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1689 struct inode
*inode
= mapping
->host
;
1691 /* we don't need to use inline_data strictly */
1692 if (f2fs_has_inline_data(inode
)) {
1693 int err
= f2fs_convert_inline_inode(inode
);
1697 return generic_block_bmap(mapping
, block
, get_data_block
);
1700 void init_extent_cache_info(struct f2fs_sb_info
*sbi
)
1702 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
1703 init_rwsem(&sbi
->extent_tree_lock
);
1704 INIT_LIST_HEAD(&sbi
->extent_list
);
1705 spin_lock_init(&sbi
->extent_lock
);
1706 sbi
->total_ext_tree
= 0;
1707 atomic_set(&sbi
->total_ext_node
, 0);
1710 int __init
create_extent_cache(void)
1712 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
1713 sizeof(struct extent_tree
));
1714 if (!extent_tree_slab
)
1716 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
1717 sizeof(struct extent_node
));
1718 if (!extent_node_slab
) {
1719 kmem_cache_destroy(extent_tree_slab
);
1725 void destroy_extent_cache(void)
1727 kmem_cache_destroy(extent_node_slab
);
1728 kmem_cache_destroy(extent_tree_slab
);
1731 const struct address_space_operations f2fs_dblock_aops
= {
1732 .readpage
= f2fs_read_data_page
,
1733 .readpages
= f2fs_read_data_pages
,
1734 .writepage
= f2fs_write_data_page
,
1735 .writepages
= f2fs_write_data_pages
,
1736 .write_begin
= f2fs_write_begin
,
1737 .write_end
= f2fs_write_end
,
1738 .set_page_dirty
= f2fs_set_data_page_dirty
,
1739 .invalidatepage
= f2fs_invalidate_page
,
1740 .releasepage
= f2fs_release_page
,
1741 .direct_IO
= f2fs_direct_IO
,