4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
27 static void f2fs_read_end_io(struct bio
*bio
, int err
)
29 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
30 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
33 struct page
*page
= bvec
->bv_page
;
35 if (--bvec
>= bio
->bi_io_vec
)
36 prefetchw(&bvec
->bv_page
->flags
);
38 if (unlikely(!uptodate
)) {
39 ClearPageUptodate(page
);
42 SetPageUptodate(page
);
45 } while (bvec
>= bio
->bi_io_vec
);
50 static void f2fs_write_end_io(struct bio
*bio
, int err
)
52 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
53 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
54 struct f2fs_sb_info
*sbi
= F2FS_SB(bvec
->bv_page
->mapping
->host
->i_sb
);
57 struct page
*page
= bvec
->bv_page
;
59 if (--bvec
>= bio
->bi_io_vec
)
60 prefetchw(&bvec
->bv_page
->flags
);
62 if (unlikely(!uptodate
)) {
64 set_bit(AS_EIO
, &page
->mapping
->flags
);
65 set_ckpt_flags(sbi
->ckpt
, CP_ERROR_FLAG
);
66 sbi
->sb
->s_flags
|= MS_RDONLY
;
68 end_page_writeback(page
);
69 dec_page_count(sbi
, F2FS_WRITEBACK
);
70 } while (bvec
>= bio
->bi_io_vec
);
73 complete(bio
->bi_private
);
75 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
76 !list_empty(&sbi
->cp_wait
.task_list
))
77 wake_up(&sbi
->cp_wait
);
83 * Low-level block read/write IO operations.
85 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
86 int npages
, bool is_read
)
90 /* No failure on bio allocation */
91 bio
= bio_alloc(GFP_NOIO
, npages
);
93 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
94 bio
->bi_sector
= SECTOR_FROM_BLOCK(sbi
, blk_addr
);
95 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
100 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
102 struct f2fs_io_info
*fio
= &io
->fio
;
110 if (is_read_io(rw
)) {
111 trace_f2fs_submit_read_bio(io
->sbi
->sb
, rw
,
113 submit_bio(rw
, io
->bio
);
115 trace_f2fs_submit_write_bio(io
->sbi
->sb
, rw
,
118 * META_FLUSH is only from the checkpoint procedure, and we
119 * should wait this metadata bio for FS consistency.
121 if (fio
->type
== META_FLUSH
) {
122 DECLARE_COMPLETION_ONSTACK(wait
);
123 io
->bio
->bi_private
= &wait
;
124 submit_bio(rw
, io
->bio
);
125 wait_for_completion(&wait
);
127 submit_bio(rw
, io
->bio
);
134 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
135 enum page_type type
, int rw
)
137 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
138 struct f2fs_bio_info
*io
;
140 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
142 mutex_lock(&io
->io_mutex
);
144 /* change META to META_FLUSH in the checkpoint procedure */
145 if (type
>= META_FLUSH
) {
146 io
->fio
.type
= META_FLUSH
;
147 io
->fio
.rw
= WRITE_FLUSH_FUA
;
149 __submit_merged_bio(io
);
150 mutex_unlock(&io
->io_mutex
);
154 * Fill the locked page with data located in the block address.
155 * Return unlocked page.
157 int f2fs_submit_page_bio(struct f2fs_sb_info
*sbi
, struct page
*page
,
158 block_t blk_addr
, int rw
)
162 trace_f2fs_submit_page_bio(page
, blk_addr
, rw
);
164 /* Allocate a new bio */
165 bio
= __bio_alloc(sbi
, blk_addr
, 1, is_read_io(rw
));
167 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
169 f2fs_put_page(page
, 1);
177 void f2fs_submit_page_mbio(struct f2fs_sb_info
*sbi
, struct page
*page
,
178 block_t blk_addr
, struct f2fs_io_info
*fio
)
180 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
181 struct f2fs_bio_info
*io
;
182 bool is_read
= is_read_io(fio
->rw
);
184 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
186 verify_block_addr(sbi
, blk_addr
);
188 mutex_lock(&io
->io_mutex
);
191 inc_page_count(sbi
, F2FS_WRITEBACK
);
193 if (io
->bio
&& (io
->last_block_in_bio
!= blk_addr
- 1 ||
194 io
->fio
.rw
!= fio
->rw
))
195 __submit_merged_bio(io
);
197 if (io
->bio
== NULL
) {
198 int bio_blocks
= MAX_BIO_BLOCKS(max_hw_blocks(sbi
));
200 io
->bio
= __bio_alloc(sbi
, blk_addr
, bio_blocks
, is_read
);
204 if (bio_add_page(io
->bio
, page
, PAGE_CACHE_SIZE
, 0) <
206 __submit_merged_bio(io
);
210 io
->last_block_in_bio
= blk_addr
;
212 mutex_unlock(&io
->io_mutex
);
213 trace_f2fs_submit_page_mbio(page
, fio
->rw
, fio
->type
, blk_addr
);
217 * Lock ordering for the change of data block address:
220 * update block addresses in the node page
222 static void __set_data_blkaddr(struct dnode_of_data
*dn
, block_t new_addr
)
224 struct f2fs_node
*rn
;
226 struct page
*node_page
= dn
->node_page
;
227 unsigned int ofs_in_node
= dn
->ofs_in_node
;
229 f2fs_wait_on_page_writeback(node_page
, NODE
);
231 rn
= F2FS_NODE(node_page
);
233 /* Get physical address of data block */
234 addr_array
= blkaddr_in_node(rn
);
235 addr_array
[ofs_in_node
] = cpu_to_le32(new_addr
);
236 set_page_dirty(node_page
);
239 int reserve_new_block(struct dnode_of_data
*dn
)
241 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
243 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
245 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
248 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
250 __set_data_blkaddr(dn
, NEW_ADDR
);
251 dn
->data_blkaddr
= NEW_ADDR
;
256 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
258 bool need_put
= dn
->inode_page
? false : true;
261 /* if inode_page exists, index should be zero */
262 f2fs_bug_on(!need_put
&& index
);
264 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
268 if (dn
->data_blkaddr
== NULL_ADDR
)
269 err
= reserve_new_block(dn
);
275 static int check_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
276 struct buffer_head
*bh_result
)
278 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
279 pgoff_t start_fofs
, end_fofs
;
280 block_t start_blkaddr
;
282 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
285 read_lock(&fi
->ext
.ext_lock
);
286 if (fi
->ext
.len
== 0) {
287 read_unlock(&fi
->ext
.ext_lock
);
291 stat_inc_total_hit(inode
->i_sb
);
293 start_fofs
= fi
->ext
.fofs
;
294 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
295 start_blkaddr
= fi
->ext
.blk_addr
;
297 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
298 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
301 clear_buffer_new(bh_result
);
302 map_bh(bh_result
, inode
->i_sb
,
303 start_blkaddr
+ pgofs
- start_fofs
);
304 count
= end_fofs
- pgofs
+ 1;
305 if (count
< (UINT_MAX
>> blkbits
))
306 bh_result
->b_size
= (count
<< blkbits
);
308 bh_result
->b_size
= UINT_MAX
;
310 stat_inc_read_hit(inode
->i_sb
);
311 read_unlock(&fi
->ext
.ext_lock
);
314 read_unlock(&fi
->ext
.ext_lock
);
318 void update_extent_cache(block_t blk_addr
, struct dnode_of_data
*dn
)
320 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
321 pgoff_t fofs
, start_fofs
, end_fofs
;
322 block_t start_blkaddr
, end_blkaddr
;
323 int need_update
= true;
325 f2fs_bug_on(blk_addr
== NEW_ADDR
);
326 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
329 /* Update the page address in the parent node */
330 __set_data_blkaddr(dn
, blk_addr
);
332 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
335 write_lock(&fi
->ext
.ext_lock
);
337 start_fofs
= fi
->ext
.fofs
;
338 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
339 start_blkaddr
= fi
->ext
.blk_addr
;
340 end_blkaddr
= fi
->ext
.blk_addr
+ fi
->ext
.len
- 1;
342 /* Drop and initialize the matched extent */
343 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
347 if (fi
->ext
.len
== 0) {
348 if (blk_addr
!= NULL_ADDR
) {
350 fi
->ext
.blk_addr
= blk_addr
;
357 if (fofs
== start_fofs
- 1 && blk_addr
== start_blkaddr
- 1) {
365 if (fofs
== end_fofs
+ 1 && blk_addr
== end_blkaddr
+ 1) {
370 /* Split the existing extent */
371 if (fi
->ext
.len
> 1 &&
372 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
373 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
374 fi
->ext
.len
= fofs
- start_fofs
;
376 fi
->ext
.fofs
= fofs
+ 1;
377 fi
->ext
.blk_addr
= start_blkaddr
+
378 fofs
- start_fofs
+ 1;
379 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
385 /* Finally, if the extent is very fragmented, let's drop the cache. */
386 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
388 set_inode_flag(fi
, FI_NO_EXTENT
);
392 write_unlock(&fi
->ext
.ext_lock
);
398 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
400 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
401 struct address_space
*mapping
= inode
->i_mapping
;
402 struct dnode_of_data dn
;
406 page
= find_get_page(mapping
, index
);
407 if (page
&& PageUptodate(page
))
409 f2fs_put_page(page
, 0);
411 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
412 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
417 if (dn
.data_blkaddr
== NULL_ADDR
)
418 return ERR_PTR(-ENOENT
);
420 /* By fallocate(), there is no cached page, but with NEW_ADDR */
421 if (unlikely(dn
.data_blkaddr
== NEW_ADDR
))
422 return ERR_PTR(-EINVAL
);
424 page
= grab_cache_page_write_begin(mapping
, index
, AOP_FLAG_NOFS
);
426 return ERR_PTR(-ENOMEM
);
428 if (PageUptodate(page
)) {
433 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
434 sync
? READ_SYNC
: READA
);
439 wait_on_page_locked(page
);
440 if (unlikely(!PageUptodate(page
))) {
441 f2fs_put_page(page
, 0);
442 return ERR_PTR(-EIO
);
449 * If it tries to access a hole, return an error.
450 * Because, the callers, functions in dir.c and GC, should be able to know
451 * whether this page exists or not.
453 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
455 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
456 struct address_space
*mapping
= inode
->i_mapping
;
457 struct dnode_of_data dn
;
462 page
= grab_cache_page_write_begin(mapping
, index
, AOP_FLAG_NOFS
);
464 return ERR_PTR(-ENOMEM
);
466 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
467 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
469 f2fs_put_page(page
, 1);
474 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
475 f2fs_put_page(page
, 1);
476 return ERR_PTR(-ENOENT
);
479 if (PageUptodate(page
))
483 * A new dentry page is allocated but not able to be written, since its
484 * new inode page couldn't be allocated due to -ENOSPC.
485 * In such the case, its blkaddr can be remained as NEW_ADDR.
486 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
488 if (dn
.data_blkaddr
== NEW_ADDR
) {
489 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
490 SetPageUptodate(page
);
494 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
499 if (unlikely(!PageUptodate(page
))) {
500 f2fs_put_page(page
, 1);
501 return ERR_PTR(-EIO
);
503 if (unlikely(page
->mapping
!= mapping
)) {
504 f2fs_put_page(page
, 1);
511 * Caller ensures that this data page is never allocated.
512 * A new zero-filled data page is allocated in the page cache.
514 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
516 * Note that, ipage is set only by make_empty_dir.
518 struct page
*get_new_data_page(struct inode
*inode
,
519 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
521 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
522 struct address_space
*mapping
= inode
->i_mapping
;
524 struct dnode_of_data dn
;
527 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
528 err
= f2fs_reserve_block(&dn
, index
);
532 page
= grab_cache_page(mapping
, index
);
538 if (PageUptodate(page
))
541 if (dn
.data_blkaddr
== NEW_ADDR
) {
542 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
543 SetPageUptodate(page
);
545 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
551 if (unlikely(!PageUptodate(page
))) {
552 f2fs_put_page(page
, 1);
556 if (unlikely(page
->mapping
!= mapping
)) {
557 f2fs_put_page(page
, 1);
563 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
564 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
565 /* Only the directory inode sets new_i_size */
566 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
567 mark_inode_dirty_sync(inode
);
576 static int __allocate_data_block(struct dnode_of_data
*dn
)
578 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
579 struct f2fs_summary sum
;
584 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
586 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
589 __set_data_blkaddr(dn
, NEW_ADDR
);
590 dn
->data_blkaddr
= NEW_ADDR
;
592 get_node_info(sbi
, dn
->nid
, &ni
);
593 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
595 type
= CURSEG_WARM_DATA
;
597 allocate_data_block(sbi
, NULL
, NULL_ADDR
, &new_blkaddr
, &sum
, type
);
599 /* direct IO doesn't use extent cache to maximize the performance */
600 set_inode_flag(F2FS_I(dn
->inode
), FI_NO_EXTENT
);
601 update_extent_cache(new_blkaddr
, dn
);
602 clear_inode_flag(F2FS_I(dn
->inode
), FI_NO_EXTENT
);
604 dn
->data_blkaddr
= new_blkaddr
;
609 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
610 * If original data blocks are allocated, then give them to blockdev.
612 * a. preallocate requested block addresses
613 * b. do not use extent cache for better performance
614 * c. give the block addresses to blockdev
616 static int get_data_block(struct inode
*inode
, sector_t iblock
,
617 struct buffer_head
*bh_result
, int create
)
619 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
620 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
621 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
622 struct dnode_of_data dn
;
623 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
624 pgoff_t pgofs
, end_offset
;
625 int err
= 0, ofs
= 1;
626 bool allocated
= false;
628 /* Get the page offset from the block offset(iblock) */
629 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
631 if (check_extent_cache(inode
, pgofs
, bh_result
))
637 /* When reading holes, we need its node page */
638 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
639 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
645 if (dn
.data_blkaddr
== NEW_ADDR
)
648 if (dn
.data_blkaddr
!= NULL_ADDR
) {
649 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
651 err
= __allocate_data_block(&dn
);
655 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
660 end_offset
= IS_INODE(dn
.node_page
) ?
661 ADDRS_PER_INODE(F2FS_I(inode
)) : ADDRS_PER_BLOCK
;
662 bh_result
->b_size
= (((size_t)1) << blkbits
);
667 if (dn
.ofs_in_node
>= end_offset
) {
669 sync_inode_page(&dn
);
673 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
674 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
680 if (dn
.data_blkaddr
== NEW_ADDR
)
683 end_offset
= IS_INODE(dn
.node_page
) ?
684 ADDRS_PER_INODE(F2FS_I(inode
)) : ADDRS_PER_BLOCK
;
687 if (maxblocks
> (bh_result
->b_size
>> blkbits
)) {
688 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
689 if (blkaddr
== NULL_ADDR
&& create
) {
690 err
= __allocate_data_block(&dn
);
694 blkaddr
= dn
.data_blkaddr
;
696 /* Give more consecutive addresses for the read ahead */
697 if (blkaddr
== (bh_result
->b_blocknr
+ ofs
)) {
701 bh_result
->b_size
+= (((size_t)1) << blkbits
);
707 sync_inode_page(&dn
);
714 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
718 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
720 struct inode
*inode
= page
->mapping
->host
;
723 /* If the file has inline data, try to read it directlly */
724 if (f2fs_has_inline_data(inode
))
725 ret
= f2fs_read_inline_data(inode
, page
);
727 ret
= mpage_readpage(page
, get_data_block
);
732 static int f2fs_read_data_pages(struct file
*file
,
733 struct address_space
*mapping
,
734 struct list_head
*pages
, unsigned nr_pages
)
736 struct inode
*inode
= file
->f_mapping
->host
;
738 /* If the file has inline data, skip readpages */
739 if (f2fs_has_inline_data(inode
))
742 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block
);
745 int do_write_data_page(struct page
*page
, struct f2fs_io_info
*fio
)
747 struct inode
*inode
= page
->mapping
->host
;
748 block_t old_blkaddr
, new_blkaddr
;
749 struct dnode_of_data dn
;
752 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
753 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
757 old_blkaddr
= dn
.data_blkaddr
;
759 /* This page is already truncated */
760 if (old_blkaddr
== NULL_ADDR
)
763 set_page_writeback(page
);
766 * If current allocation needs SSR,
767 * it had better in-place writes for updated data.
769 if (unlikely(old_blkaddr
!= NEW_ADDR
&&
770 !is_cold_data(page
) &&
771 need_inplace_update(inode
))) {
772 rewrite_data_page(page
, old_blkaddr
, fio
);
774 write_data_page(page
, &dn
, &new_blkaddr
, fio
);
775 update_extent_cache(new_blkaddr
, &dn
);
782 static int f2fs_write_data_page(struct page
*page
,
783 struct writeback_control
*wbc
)
785 struct inode
*inode
= page
->mapping
->host
;
786 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
787 loff_t i_size
= i_size_read(inode
);
788 const pgoff_t end_index
= ((unsigned long long) i_size
)
791 bool need_balance_fs
= false;
793 struct f2fs_io_info fio
= {
795 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
798 if (page
->index
< end_index
)
802 * If the offset is out-of-range of file size,
803 * this page does not have to be written to disk.
805 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
806 if ((page
->index
>= end_index
+ 1) || !offset
) {
807 if (S_ISDIR(inode
->i_mode
)) {
808 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
809 inode_dec_dirty_dents(inode
);
814 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
816 if (unlikely(sbi
->por_doing
)) {
817 err
= AOP_WRITEPAGE_ACTIVATE
;
821 /* Dentry blocks are controlled by checkpoint */
822 if (S_ISDIR(inode
->i_mode
)) {
823 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
824 inode_dec_dirty_dents(inode
);
825 err
= do_write_data_page(page
, &fio
);
829 if (f2fs_has_inline_data(inode
) || f2fs_may_inline(inode
)) {
830 err
= f2fs_write_inline_data(inode
, page
, offset
);
834 err
= do_write_data_page(page
, &fio
);
838 need_balance_fs
= true;
845 if (wbc
->for_reclaim
) {
846 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
847 need_balance_fs
= false;
850 clear_cold_data(page
);
854 f2fs_balance_fs(sbi
);
858 wbc
->pages_skipped
++;
859 set_page_dirty(page
);
863 #define MAX_DESIRED_PAGES_WP 4096
865 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
868 struct address_space
*mapping
= data
;
869 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
870 mapping_set_error(mapping
, ret
);
874 static int f2fs_write_data_pages(struct address_space
*mapping
,
875 struct writeback_control
*wbc
)
877 struct inode
*inode
= mapping
->host
;
878 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
881 long excess_nrtw
= 0, desired_nrtw
;
883 /* deal with chardevs and other special file */
884 if (!mapping
->a_ops
->writepage
)
887 if (wbc
->nr_to_write
< MAX_DESIRED_PAGES_WP
) {
888 desired_nrtw
= MAX_DESIRED_PAGES_WP
;
889 excess_nrtw
= desired_nrtw
- wbc
->nr_to_write
;
890 wbc
->nr_to_write
= desired_nrtw
;
893 if (!S_ISDIR(inode
->i_mode
)) {
894 mutex_lock(&sbi
->writepages
);
897 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
899 mutex_unlock(&sbi
->writepages
);
901 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
903 remove_dirty_dir_inode(inode
);
905 wbc
->nr_to_write
-= excess_nrtw
;
909 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
910 loff_t pos
, unsigned len
, unsigned flags
,
911 struct page
**pagep
, void **fsdata
)
913 struct inode
*inode
= mapping
->host
;
914 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
916 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
917 struct dnode_of_data dn
;
920 f2fs_balance_fs(sbi
);
922 err
= f2fs_convert_inline_data(inode
, pos
+ len
);
926 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
931 if (f2fs_has_inline_data(inode
) && (pos
+ len
) <= MAX_INLINE_DATA
)
935 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
936 err
= f2fs_reserve_block(&dn
, index
);
940 f2fs_put_page(page
, 1);
944 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
947 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
948 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
949 unsigned end
= start
+ len
;
951 /* Reading beyond i_size is simple: memset to zero */
952 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
956 if (dn
.data_blkaddr
== NEW_ADDR
) {
957 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
959 if (f2fs_has_inline_data(inode
))
960 err
= f2fs_read_inline_data(inode
, page
);
962 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
967 if (unlikely(!PageUptodate(page
))) {
968 f2fs_put_page(page
, 1);
971 if (unlikely(page
->mapping
!= mapping
)) {
972 f2fs_put_page(page
, 1);
977 SetPageUptodate(page
);
978 clear_cold_data(page
);
982 static int f2fs_write_end(struct file
*file
,
983 struct address_space
*mapping
,
984 loff_t pos
, unsigned len
, unsigned copied
,
985 struct page
*page
, void *fsdata
)
987 struct inode
*inode
= page
->mapping
->host
;
989 SetPageUptodate(page
);
990 set_page_dirty(page
);
992 if (pos
+ copied
> i_size_read(inode
)) {
993 i_size_write(inode
, pos
+ copied
);
994 mark_inode_dirty(inode
);
995 update_inode_page(inode
);
998 f2fs_put_page(page
, 1);
1002 static int check_direct_IO(struct inode
*inode
, int rw
,
1003 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
1005 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1011 if (offset
& blocksize_mask
)
1014 for (i
= 0; i
< nr_segs
; i
++)
1015 if (iov
[i
].iov_len
& blocksize_mask
)
1020 static ssize_t
f2fs_direct_IO(int rw
, struct kiocb
*iocb
,
1021 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
1023 struct file
*file
= iocb
->ki_filp
;
1024 struct inode
*inode
= file
->f_mapping
->host
;
1026 /* Let buffer I/O handle the inline data case. */
1027 if (f2fs_has_inline_data(inode
))
1030 if (check_direct_IO(inode
, rw
, iov
, offset
, nr_segs
))
1033 return blockdev_direct_IO(rw
, iocb
, inode
, iov
, offset
, nr_segs
,
1037 static void f2fs_invalidate_data_page(struct page
*page
, unsigned int offset
,
1038 unsigned int length
)
1040 struct inode
*inode
= page
->mapping
->host
;
1041 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1042 if (S_ISDIR(inode
->i_mode
) && PageDirty(page
)) {
1043 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
1044 inode_dec_dirty_dents(inode
);
1046 ClearPagePrivate(page
);
1049 static int f2fs_release_data_page(struct page
*page
, gfp_t wait
)
1051 ClearPagePrivate(page
);
1055 static int f2fs_set_data_page_dirty(struct page
*page
)
1057 struct address_space
*mapping
= page
->mapping
;
1058 struct inode
*inode
= mapping
->host
;
1060 trace_f2fs_set_page_dirty(page
, DATA
);
1062 SetPageUptodate(page
);
1063 if (!PageDirty(page
)) {
1064 __set_page_dirty_nobuffers(page
);
1065 set_dirty_dir_page(inode
, page
);
1071 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1073 return generic_block_bmap(mapping
, block
, get_data_block
);
1076 const struct address_space_operations f2fs_dblock_aops
= {
1077 .readpage
= f2fs_read_data_page
,
1078 .readpages
= f2fs_read_data_pages
,
1079 .writepage
= f2fs_write_data_page
,
1080 .writepages
= f2fs_write_data_pages
,
1081 .write_begin
= f2fs_write_begin
,
1082 .write_end
= f2fs_write_end
,
1083 .set_page_dirty
= f2fs_set_data_page_dirty
,
1084 .invalidatepage
= f2fs_invalidate_data_page
,
1085 .releasepage
= f2fs_release_data_page
,
1086 .direct_IO
= f2fs_direct_IO
,