f2fs: reuse inode_entry_slab in gc procedure for using slab more effectively
[deliverable/linux.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
a27bb332 15#include <linux/aio.h>
eb47b800
JK
16#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
eb47b800
JK
21
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
db9f7c1a 25#include "trace.h"
848753aa 26#include <trace/events/f2fs.h>
eb47b800 27
93dfe2ac
JK
28static void f2fs_read_end_io(struct bio *bio, int err)
29{
f568849e
LT
30 struct bio_vec *bvec;
31 int i;
93dfe2ac 32
f568849e 33 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
34 struct page *page = bvec->bv_page;
35
f568849e
LT
36 if (!err) {
37 SetPageUptodate(page);
38 } else {
93dfe2ac
JK
39 ClearPageUptodate(page);
40 SetPageError(page);
41 }
42 unlock_page(page);
f568849e 43 }
93dfe2ac
JK
44 bio_put(bio);
45}
46
47static void f2fs_write_end_io(struct bio *bio, int err)
48{
1b1f559f 49 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
50 struct bio_vec *bvec;
51 int i;
93dfe2ac 52
f568849e 53 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
54 struct page *page = bvec->bv_page;
55
f568849e 56 if (unlikely(err)) {
cf779cab 57 set_page_dirty(page);
93dfe2ac 58 set_bit(AS_EIO, &page->mapping->flags);
744602cf 59 f2fs_stop_checkpoint(sbi);
93dfe2ac
JK
60 }
61 end_page_writeback(page);
62 dec_page_count(sbi, F2FS_WRITEBACK);
f568849e 63 }
93dfe2ac 64
93dfe2ac
JK
65 if (!get_pages(sbi, F2FS_WRITEBACK) &&
66 !list_empty(&sbi->cp_wait.task_list))
67 wake_up(&sbi->cp_wait);
68
69 bio_put(bio);
70}
71
940a6d34
GZ
72/*
73 * Low-level block read/write IO operations.
74 */
75static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
76 int npages, bool is_read)
77{
78 struct bio *bio;
79
80 /* No failure on bio allocation */
81 bio = bio_alloc(GFP_NOIO, npages);
82
83 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 84 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 85 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
1b1f559f 86 bio->bi_private = sbi;
940a6d34
GZ
87
88 return bio;
89}
90
458e6197 91static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 92{
458e6197 93 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
94
95 if (!io->bio)
96 return;
97
6a8f8ca5 98 if (is_read_io(fio->rw))
2ace38e0 99 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 100 else
2ace38e0 101 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 102
6a8f8ca5 103 submit_bio(fio->rw, io->bio);
93dfe2ac
JK
104 io->bio = NULL;
105}
106
107void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
458e6197 108 enum page_type type, int rw)
93dfe2ac
JK
109{
110 enum page_type btype = PAGE_TYPE_OF_BIO(type);
111 struct f2fs_bio_info *io;
112
113 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
114
df0f8dc0 115 down_write(&io->io_rwsem);
458e6197
JK
116
117 /* change META to META_FLUSH in the checkpoint procedure */
118 if (type >= META_FLUSH) {
119 io->fio.type = META_FLUSH;
0f7b2abd
JK
120 if (test_opt(sbi, NOBARRIER))
121 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
122 else
123 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
124 }
125 __submit_merged_bio(io);
df0f8dc0 126 up_write(&io->io_rwsem);
93dfe2ac
JK
127}
128
129/*
130 * Fill the locked page with data located in the block address.
131 * Return unlocked page.
132 */
133int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
cf04e8eb 134 struct f2fs_io_info *fio)
93dfe2ac 135{
93dfe2ac
JK
136 struct bio *bio;
137
2ace38e0 138 trace_f2fs_submit_page_bio(page, fio);
db9f7c1a 139 f2fs_trace_ios(page, fio, 0);
93dfe2ac
JK
140
141 /* Allocate a new bio */
cf04e8eb 142 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
93dfe2ac
JK
143
144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
145 bio_put(bio);
146 f2fs_put_page(page, 1);
147 return -EFAULT;
148 }
149
cf04e8eb 150 submit_bio(fio->rw, bio);
93dfe2ac
JK
151 return 0;
152}
153
154void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
cf04e8eb 155 struct f2fs_io_info *fio)
93dfe2ac 156{
458e6197 157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 158 struct f2fs_bio_info *io;
940a6d34 159 bool is_read = is_read_io(fio->rw);
93dfe2ac 160
940a6d34 161 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 162
cf04e8eb 163 verify_block_addr(sbi, fio->blk_addr);
93dfe2ac 164
df0f8dc0 165 down_write(&io->io_rwsem);
93dfe2ac 166
940a6d34 167 if (!is_read)
93dfe2ac
JK
168 inc_page_count(sbi, F2FS_WRITEBACK);
169
cf04e8eb 170 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
458e6197
JK
171 io->fio.rw != fio->rw))
172 __submit_merged_bio(io);
93dfe2ac
JK
173alloc_new:
174 if (io->bio == NULL) {
90a893c7 175 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 176
cf04e8eb 177 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
458e6197 178 io->fio = *fio;
93dfe2ac
JK
179 }
180
181 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
182 PAGE_CACHE_SIZE) {
458e6197 183 __submit_merged_bio(io);
93dfe2ac
JK
184 goto alloc_new;
185 }
186
cf04e8eb 187 io->last_block_in_bio = fio->blk_addr;
db9f7c1a 188 f2fs_trace_ios(page, fio, 0);
93dfe2ac 189
df0f8dc0 190 up_write(&io->io_rwsem);
2ace38e0 191 trace_f2fs_submit_page_mbio(page, fio);
93dfe2ac
JK
192}
193
0a8165d7 194/*
eb47b800
JK
195 * Lock ordering for the change of data block address:
196 * ->data_page
197 * ->node_page
198 * update block addresses in the node page
199 */
200static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
201{
202 struct f2fs_node *rn;
203 __le32 *addr_array;
204 struct page *node_page = dn->node_page;
205 unsigned int ofs_in_node = dn->ofs_in_node;
206
5514f0aa 207 f2fs_wait_on_page_writeback(node_page, NODE);
eb47b800 208
45590710 209 rn = F2FS_NODE(node_page);
eb47b800
JK
210
211 /* Get physical address of data block */
212 addr_array = blkaddr_in_node(rn);
213 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
214 set_page_dirty(node_page);
215}
216
217int reserve_new_block(struct dnode_of_data *dn)
218{
4081363f 219 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 220
6bacf52f 221 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
eb47b800 222 return -EPERM;
cfb271d4 223 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
eb47b800
JK
224 return -ENOSPC;
225
c01e2853
NJ
226 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
227
eb47b800
JK
228 __set_data_blkaddr(dn, NEW_ADDR);
229 dn->data_blkaddr = NEW_ADDR;
a18ff063 230 mark_inode_dirty(dn->inode);
eb47b800
JK
231 sync_inode_page(dn);
232 return 0;
233}
234
b600965c
HL
235int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
236{
237 bool need_put = dn->inode_page ? false : true;
238 int err;
239
240 err = get_dnode_of_data(dn, index, ALLOC_NODE);
241 if (err)
242 return err;
a8865372 243
b600965c
HL
244 if (dn->data_blkaddr == NULL_ADDR)
245 err = reserve_new_block(dn);
a8865372 246 if (err || need_put)
b600965c
HL
247 f2fs_put_dnode(dn);
248 return err;
249}
250
eb47b800
JK
251static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
252 struct buffer_head *bh_result)
253{
254 struct f2fs_inode_info *fi = F2FS_I(inode);
eb47b800
JK
255 pgoff_t start_fofs, end_fofs;
256 block_t start_blkaddr;
257
c11abd1a
JK
258 if (is_inode_flag_set(fi, FI_NO_EXTENT))
259 return 0;
260
eb47b800
JK
261 read_lock(&fi->ext.ext_lock);
262 if (fi->ext.len == 0) {
263 read_unlock(&fi->ext.ext_lock);
264 return 0;
265 }
266
dcdfff65
JK
267 stat_inc_total_hit(inode->i_sb);
268
eb47b800
JK
269 start_fofs = fi->ext.fofs;
270 end_fofs = fi->ext.fofs + fi->ext.len - 1;
271 start_blkaddr = fi->ext.blk_addr;
272
273 if (pgofs >= start_fofs && pgofs <= end_fofs) {
274 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
275 size_t count;
276
277 clear_buffer_new(bh_result);
278 map_bh(bh_result, inode->i_sb,
279 start_blkaddr + pgofs - start_fofs);
280 count = end_fofs - pgofs + 1;
281 if (count < (UINT_MAX >> blkbits))
282 bh_result->b_size = (count << blkbits);
283 else
284 bh_result->b_size = UINT_MAX;
285
dcdfff65 286 stat_inc_read_hit(inode->i_sb);
eb47b800
JK
287 read_unlock(&fi->ext.ext_lock);
288 return 1;
289 }
290 read_unlock(&fi->ext.ext_lock);
291 return 0;
292}
293
294void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
295{
296 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
297 pgoff_t fofs, start_fofs, end_fofs;
298 block_t start_blkaddr, end_blkaddr;
c11abd1a 299 int need_update = true;
eb47b800 300
9850cf4a 301 f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR);
de93653f
JK
302 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
303 dn->ofs_in_node;
eb47b800
JK
304
305 /* Update the page address in the parent node */
306 __set_data_blkaddr(dn, blk_addr);
307
c11abd1a
JK
308 if (is_inode_flag_set(fi, FI_NO_EXTENT))
309 return;
310
eb47b800
JK
311 write_lock(&fi->ext.ext_lock);
312
313 start_fofs = fi->ext.fofs;
314 end_fofs = fi->ext.fofs + fi->ext.len - 1;
315 start_blkaddr = fi->ext.blk_addr;
316 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
317
318 /* Drop and initialize the matched extent */
319 if (fi->ext.len == 1 && fofs == start_fofs)
320 fi->ext.len = 0;
321
322 /* Initial extent */
323 if (fi->ext.len == 0) {
324 if (blk_addr != NULL_ADDR) {
325 fi->ext.fofs = fofs;
326 fi->ext.blk_addr = blk_addr;
327 fi->ext.len = 1;
328 }
329 goto end_update;
330 }
331
6224da87 332 /* Front merge */
eb47b800
JK
333 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
334 fi->ext.fofs--;
335 fi->ext.blk_addr--;
336 fi->ext.len++;
337 goto end_update;
338 }
339
340 /* Back merge */
341 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
342 fi->ext.len++;
343 goto end_update;
344 }
345
346 /* Split the existing extent */
347 if (fi->ext.len > 1 &&
348 fofs >= start_fofs && fofs <= end_fofs) {
349 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
350 fi->ext.len = fofs - start_fofs;
351 } else {
352 fi->ext.fofs = fofs + 1;
353 fi->ext.blk_addr = start_blkaddr +
354 fofs - start_fofs + 1;
355 fi->ext.len -= fofs - start_fofs + 1;
356 }
c11abd1a
JK
357 } else {
358 need_update = false;
eb47b800 359 }
eb47b800 360
c11abd1a
JK
361 /* Finally, if the extent is very fragmented, let's drop the cache. */
362 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
363 fi->ext.len = 0;
364 set_inode_flag(fi, FI_NO_EXTENT);
365 need_update = true;
366 }
eb47b800
JK
367end_update:
368 write_unlock(&fi->ext.ext_lock);
c11abd1a
JK
369 if (need_update)
370 sync_inode_page(dn);
371 return;
eb47b800
JK
372}
373
c718379b 374struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
eb47b800 375{
eb47b800
JK
376 struct address_space *mapping = inode->i_mapping;
377 struct dnode_of_data dn;
378 struct page *page;
379 int err;
cf04e8eb
JK
380 struct f2fs_io_info fio = {
381 .type = DATA,
382 .rw = sync ? READ_SYNC : READA,
383 };
eb47b800
JK
384
385 page = find_get_page(mapping, index);
386 if (page && PageUptodate(page))
387 return page;
388 f2fs_put_page(page, 0);
389
390 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 391 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
eb47b800
JK
392 if (err)
393 return ERR_PTR(err);
394 f2fs_put_dnode(&dn);
395
396 if (dn.data_blkaddr == NULL_ADDR)
397 return ERR_PTR(-ENOENT);
398
399 /* By fallocate(), there is no cached page, but with NEW_ADDR */
6bacf52f 400 if (unlikely(dn.data_blkaddr == NEW_ADDR))
eb47b800
JK
401 return ERR_PTR(-EINVAL);
402
9ac1349a 403 page = grab_cache_page(mapping, index);
eb47b800
JK
404 if (!page)
405 return ERR_PTR(-ENOMEM);
406
393ff91f
JK
407 if (PageUptodate(page)) {
408 unlock_page(page);
409 return page;
410 }
411
cf04e8eb
JK
412 fio.blk_addr = dn.data_blkaddr;
413 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
1069bbf7
CY
414 if (err)
415 return ERR_PTR(err);
416
c718379b
JK
417 if (sync) {
418 wait_on_page_locked(page);
6bacf52f 419 if (unlikely(!PageUptodate(page))) {
c718379b
JK
420 f2fs_put_page(page, 0);
421 return ERR_PTR(-EIO);
422 }
eb47b800 423 }
eb47b800
JK
424 return page;
425}
426
0a8165d7 427/*
eb47b800
JK
428 * If it tries to access a hole, return an error.
429 * Because, the callers, functions in dir.c and GC, should be able to know
430 * whether this page exists or not.
431 */
432struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
433{
eb47b800
JK
434 struct address_space *mapping = inode->i_mapping;
435 struct dnode_of_data dn;
436 struct page *page;
437 int err;
cf04e8eb
JK
438 struct f2fs_io_info fio = {
439 .type = DATA,
440 .rw = READ_SYNC,
441 };
650495de 442repeat:
9ac1349a 443 page = grab_cache_page(mapping, index);
650495de
JK
444 if (!page)
445 return ERR_PTR(-ENOMEM);
446
eb47b800 447 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 448 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
650495de
JK
449 if (err) {
450 f2fs_put_page(page, 1);
eb47b800 451 return ERR_PTR(err);
650495de 452 }
eb47b800
JK
453 f2fs_put_dnode(&dn);
454
6bacf52f 455 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
650495de 456 f2fs_put_page(page, 1);
eb47b800 457 return ERR_PTR(-ENOENT);
650495de 458 }
eb47b800
JK
459
460 if (PageUptodate(page))
461 return page;
462
d59ff4df
JK
463 /*
464 * A new dentry page is allocated but not able to be written, since its
465 * new inode page couldn't be allocated due to -ENOSPC.
466 * In such the case, its blkaddr can be remained as NEW_ADDR.
467 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
468 */
469 if (dn.data_blkaddr == NEW_ADDR) {
470 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
471 SetPageUptodate(page);
472 return page;
473 }
eb47b800 474
cf04e8eb
JK
475 fio.blk_addr = dn.data_blkaddr;
476 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
393ff91f 477 if (err)
eb47b800 478 return ERR_PTR(err);
393ff91f
JK
479
480 lock_page(page);
6bacf52f 481 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
482 f2fs_put_page(page, 1);
483 return ERR_PTR(-EIO);
eb47b800 484 }
6bacf52f 485 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
486 f2fs_put_page(page, 1);
487 goto repeat;
eb47b800
JK
488 }
489 return page;
490}
491
0a8165d7 492/*
eb47b800
JK
493 * Caller ensures that this data page is never allocated.
494 * A new zero-filled data page is allocated in the page cache.
39936837 495 *
4f4124d0
CY
496 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
497 * f2fs_unlock_op().
a8865372 498 * Note that, ipage is set only by make_empty_dir.
eb47b800 499 */
64aa7ed9 500struct page *get_new_data_page(struct inode *inode,
a8865372 501 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 502{
eb47b800
JK
503 struct address_space *mapping = inode->i_mapping;
504 struct page *page;
505 struct dnode_of_data dn;
506 int err;
507
a8865372 508 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 509 err = f2fs_reserve_block(&dn, index);
eb47b800
JK
510 if (err)
511 return ERR_PTR(err);
afcb7ca0 512repeat:
eb47b800 513 page = grab_cache_page(mapping, index);
a8865372
JK
514 if (!page) {
515 err = -ENOMEM;
516 goto put_err;
517 }
eb47b800
JK
518
519 if (PageUptodate(page))
520 return page;
521
522 if (dn.data_blkaddr == NEW_ADDR) {
523 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 524 SetPageUptodate(page);
eb47b800 525 } else {
cf04e8eb
JK
526 struct f2fs_io_info fio = {
527 .type = DATA,
528 .rw = READ_SYNC,
529 .blk_addr = dn.data_blkaddr,
530 };
531 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
393ff91f 532 if (err)
a8865372
JK
533 goto put_err;
534
393ff91f 535 lock_page(page);
6bacf52f 536 if (unlikely(!PageUptodate(page))) {
393ff91f 537 f2fs_put_page(page, 1);
a8865372
JK
538 err = -EIO;
539 goto put_err;
eb47b800 540 }
6bacf52f 541 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
542 f2fs_put_page(page, 1);
543 goto repeat;
eb47b800
JK
544 }
545 }
eb47b800
JK
546
547 if (new_i_size &&
548 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
549 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
699489bb
JK
550 /* Only the directory inode sets new_i_size */
551 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
eb47b800
JK
552 }
553 return page;
a8865372
JK
554
555put_err:
556 f2fs_put_dnode(&dn);
557 return ERR_PTR(err);
eb47b800
JK
558}
559
bfad7c2d
JK
560static int __allocate_data_block(struct dnode_of_data *dn)
561{
4081363f 562 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
976e4c50 563 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
bfad7c2d
JK
564 struct f2fs_summary sum;
565 block_t new_blkaddr;
566 struct node_info ni;
976e4c50 567 pgoff_t fofs;
bfad7c2d
JK
568 int type;
569
570 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
571 return -EPERM;
572 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
573 return -ENOSPC;
574
575 __set_data_blkaddr(dn, NEW_ADDR);
576 dn->data_blkaddr = NEW_ADDR;
577
578 get_node_info(sbi, dn->nid, &ni);
579 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
580
581 type = CURSEG_WARM_DATA;
582
583 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
584
585 /* direct IO doesn't use extent cache to maximize the performance */
586 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
587 update_extent_cache(new_blkaddr, dn);
588 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
589
976e4c50
JK
590 /* update i_size */
591 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
592 dn->ofs_in_node;
593 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
594 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
595
bfad7c2d
JK
596 dn->data_blkaddr = new_blkaddr;
597 return 0;
598}
599
0a8165d7 600/*
4f4124d0
CY
601 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
602 * If original data blocks are allocated, then give them to blockdev.
603 * Otherwise,
604 * a. preallocate requested block addresses
605 * b. do not use extent cache for better performance
606 * c. give the block addresses to blockdev
eb47b800 607 */
ccfb3000
JK
608static int __get_data_block(struct inode *inode, sector_t iblock,
609 struct buffer_head *bh_result, int create, bool fiemap)
eb47b800
JK
610{
611 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
612 unsigned maxblocks = bh_result->b_size >> blkbits;
613 struct dnode_of_data dn;
bfad7c2d
JK
614 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
615 pgoff_t pgofs, end_offset;
616 int err = 0, ofs = 1;
617 bool allocated = false;
eb47b800
JK
618
619 /* Get the page offset from the block offset(iblock) */
620 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
621
bfad7c2d
JK
622 if (check_extent_cache(inode, pgofs, bh_result))
623 goto out;
624
79e35dc3 625 if (create) {
4081363f
JK
626 f2fs_balance_fs(F2FS_I_SB(inode));
627 f2fs_lock_op(F2FS_I_SB(inode));
79e35dc3 628 }
eb47b800
JK
629
630 /* When reading holes, we need its node page */
631 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 632 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 633 if (err) {
bfad7c2d
JK
634 if (err == -ENOENT)
635 err = 0;
636 goto unlock_out;
848753aa 637 }
ccfb3000 638 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083 639 goto put_out;
eb47b800 640
bfad7c2d
JK
641 if (dn.data_blkaddr != NULL_ADDR) {
642 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
643 } else if (create) {
644 err = __allocate_data_block(&dn);
645 if (err)
646 goto put_out;
647 allocated = true;
648 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
649 } else {
650 goto put_out;
651 }
652
6403eb1f 653 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d
JK
654 bh_result->b_size = (((size_t)1) << blkbits);
655 dn.ofs_in_node++;
656 pgofs++;
657
658get_next:
659 if (dn.ofs_in_node >= end_offset) {
660 if (allocated)
661 sync_inode_page(&dn);
662 allocated = false;
663 f2fs_put_dnode(&dn);
664
665 set_new_dnode(&dn, inode, NULL, NULL, 0);
666 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 667 if (err) {
bfad7c2d
JK
668 if (err == -ENOENT)
669 err = 0;
670 goto unlock_out;
671 }
ccfb3000 672 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083
JK
673 goto put_out;
674
6403eb1f 675 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d 676 }
eb47b800 677
bfad7c2d
JK
678 if (maxblocks > (bh_result->b_size >> blkbits)) {
679 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
680 if (blkaddr == NULL_ADDR && create) {
681 err = __allocate_data_block(&dn);
682 if (err)
683 goto sync_out;
684 allocated = true;
685 blkaddr = dn.data_blkaddr;
686 }
e1c42045 687 /* Give more consecutive addresses for the readahead */
bfad7c2d
JK
688 if (blkaddr == (bh_result->b_blocknr + ofs)) {
689 ofs++;
690 dn.ofs_in_node++;
691 pgofs++;
692 bh_result->b_size += (((size_t)1) << blkbits);
693 goto get_next;
694 }
eb47b800 695 }
bfad7c2d
JK
696sync_out:
697 if (allocated)
698 sync_inode_page(&dn);
699put_out:
eb47b800 700 f2fs_put_dnode(&dn);
bfad7c2d
JK
701unlock_out:
702 if (create)
4081363f 703 f2fs_unlock_op(F2FS_I_SB(inode));
bfad7c2d
JK
704out:
705 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
706 return err;
eb47b800
JK
707}
708
ccfb3000
JK
709static int get_data_block(struct inode *inode, sector_t iblock,
710 struct buffer_head *bh_result, int create)
711{
712 return __get_data_block(inode, iblock, bh_result, create, false);
713}
714
715static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
716 struct buffer_head *bh_result, int create)
717{
718 return __get_data_block(inode, iblock, bh_result, create, true);
719}
720
9ab70134
JK
721int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
722 u64 start, u64 len)
723{
ccfb3000
JK
724 return generic_block_fiemap(inode, fieinfo,
725 start, len, get_data_block_fiemap);
9ab70134
JK
726}
727
eb47b800
JK
728static int f2fs_read_data_page(struct file *file, struct page *page)
729{
9ffe0fb5 730 struct inode *inode = page->mapping->host;
b3d208f9 731 int ret = -EAGAIN;
9ffe0fb5 732
c20e89cd
CY
733 trace_f2fs_readpage(page, DATA);
734
e1c42045 735 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
736 if (f2fs_has_inline_data(inode))
737 ret = f2fs_read_inline_data(inode, page);
b3d208f9 738 if (ret == -EAGAIN)
9ffe0fb5
HL
739 ret = mpage_readpage(page, get_data_block);
740
741 return ret;
eb47b800
JK
742}
743
744static int f2fs_read_data_pages(struct file *file,
745 struct address_space *mapping,
746 struct list_head *pages, unsigned nr_pages)
747{
9ffe0fb5
HL
748 struct inode *inode = file->f_mapping->host;
749
750 /* If the file has inline data, skip readpages */
751 if (f2fs_has_inline_data(inode))
752 return 0;
753
bfad7c2d 754 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
eb47b800
JK
755}
756
458e6197 757int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
eb47b800
JK
758{
759 struct inode *inode = page->mapping->host;
eb47b800
JK
760 struct dnode_of_data dn;
761 int err = 0;
762
763 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 764 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
765 if (err)
766 return err;
767
cf04e8eb 768 fio->blk_addr = dn.data_blkaddr;
eb47b800
JK
769
770 /* This page is already truncated */
cf04e8eb 771 if (fio->blk_addr == NULL_ADDR)
eb47b800
JK
772 goto out_writepage;
773
774 set_page_writeback(page);
775
776 /*
777 * If current allocation needs SSR,
778 * it had better in-place writes for updated data.
779 */
cf04e8eb 780 if (unlikely(fio->blk_addr != NEW_ADDR &&
b25958b6
HL
781 !is_cold_data(page) &&
782 need_inplace_update(inode))) {
cf04e8eb 783 rewrite_data_page(page, fio);
fff04f90 784 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
eb47b800 785 } else {
cf04e8eb
JK
786 write_data_page(page, &dn, fio);
787 update_extent_cache(fio->blk_addr, &dn);
fff04f90 788 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
eb47b800
JK
789 }
790out_writepage:
791 f2fs_put_dnode(&dn);
792 return err;
793}
794
795static int f2fs_write_data_page(struct page *page,
796 struct writeback_control *wbc)
797{
798 struct inode *inode = page->mapping->host;
4081363f 799 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
800 loff_t i_size = i_size_read(inode);
801 const pgoff_t end_index = ((unsigned long long) i_size)
802 >> PAGE_CACHE_SHIFT;
9ffe0fb5 803 unsigned offset = 0;
39936837 804 bool need_balance_fs = false;
eb47b800 805 int err = 0;
458e6197
JK
806 struct f2fs_io_info fio = {
807 .type = DATA,
6c311ec6 808 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
458e6197 809 };
eb47b800 810
ecda0de3
CY
811 trace_f2fs_writepage(page, DATA);
812
eb47b800 813 if (page->index < end_index)
39936837 814 goto write;
eb47b800
JK
815
816 /*
817 * If the offset is out-of-range of file size,
818 * this page does not have to be written to disk.
819 */
820 offset = i_size & (PAGE_CACHE_SIZE - 1);
76f60268 821 if ((page->index >= end_index + 1) || !offset)
39936837 822 goto out;
eb47b800
JK
823
824 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837 825write:
8618b881 826 if (unlikely(sbi->por_doing))
eb47b800 827 goto redirty_out;
1e84371f
JK
828 if (f2fs_is_drop_cache(inode))
829 goto out;
830 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
831 available_free_memory(sbi, BASE_CHECK))
832 goto redirty_out;
eb47b800 833
39936837 834 /* Dentry blocks are controlled by checkpoint */
eb47b800 835 if (S_ISDIR(inode->i_mode)) {
cf779cab
JK
836 if (unlikely(f2fs_cp_error(sbi)))
837 goto redirty_out;
458e6197 838 err = do_write_data_page(page, &fio);
8618b881
JK
839 goto done;
840 }
9ffe0fb5 841
cf779cab
JK
842 /* we should bypass data pages to proceed the kworkder jobs */
843 if (unlikely(f2fs_cp_error(sbi))) {
844 SetPageError(page);
845 unlock_page(page);
a7ffdbe2 846 goto out;
cf779cab
JK
847 }
848
8618b881 849 if (!wbc->for_reclaim)
39936837 850 need_balance_fs = true;
8618b881 851 else if (has_not_enough_free_secs(sbi, 0))
39936837 852 goto redirty_out;
eb47b800 853
b3d208f9 854 err = -EAGAIN;
8618b881 855 f2fs_lock_op(sbi);
b3d208f9
JK
856 if (f2fs_has_inline_data(inode))
857 err = f2fs_write_inline_data(inode, page);
858 if (err == -EAGAIN)
8618b881
JK
859 err = do_write_data_page(page, &fio);
860 f2fs_unlock_op(sbi);
861done:
862 if (err && err != -ENOENT)
863 goto redirty_out;
eb47b800 864
eb47b800 865 clear_cold_data(page);
39936837 866out:
a7ffdbe2 867 inode_dec_dirty_pages(inode);
eb47b800 868 unlock_page(page);
39936837 869 if (need_balance_fs)
eb47b800 870 f2fs_balance_fs(sbi);
2aea39ec
JK
871 if (wbc->for_reclaim)
872 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
873 return 0;
874
eb47b800 875redirty_out:
76f60268 876 redirty_page_for_writepage(wbc, page);
8618b881 877 return AOP_WRITEPAGE_ACTIVATE;
eb47b800
JK
878}
879
fa9150a8
NJ
880static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
881 void *data)
882{
883 struct address_space *mapping = data;
884 int ret = mapping->a_ops->writepage(page, wbc);
885 mapping_set_error(mapping, ret);
886 return ret;
887}
888
25ca923b 889static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
890 struct writeback_control *wbc)
891{
892 struct inode *inode = mapping->host;
4081363f 893 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
531ad7d5 894 bool locked = false;
eb47b800 895 int ret;
50c8cdb3 896 long diff;
eb47b800 897
e5748434
CY
898 trace_f2fs_writepages(mapping->host, wbc, DATA);
899
cfb185a1 900 /* deal with chardevs and other special file */
901 if (!mapping->a_ops->writepage)
902 return 0;
903
87d6f890 904 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
a7ffdbe2 905 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
6fb03f3a 906 available_free_memory(sbi, DIRTY_DENTS))
d3baf95d 907 goto skip_write;
87d6f890 908
50c8cdb3 909 diff = nr_pages_to_write(sbi, DATA, wbc);
eb47b800 910
531ad7d5 911 if (!S_ISDIR(inode->i_mode)) {
eb47b800 912 mutex_lock(&sbi->writepages);
531ad7d5
JK
913 locked = true;
914 }
fa9150a8 915 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
531ad7d5 916 if (locked)
eb47b800 917 mutex_unlock(&sbi->writepages);
458e6197
JK
918
919 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
920
921 remove_dirty_dir_inode(inode);
922
50c8cdb3 923 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
eb47b800 924 return ret;
d3baf95d
JK
925
926skip_write:
a7ffdbe2 927 wbc->pages_skipped += get_dirty_pages(inode);
d3baf95d 928 return 0;
eb47b800
JK
929}
930
3aab8f82
CY
931static void f2fs_write_failed(struct address_space *mapping, loff_t to)
932{
933 struct inode *inode = mapping->host;
934
935 if (to > inode->i_size) {
936 truncate_pagecache(inode, inode->i_size);
764aa3e9 937 truncate_blocks(inode, inode->i_size, true);
3aab8f82
CY
938 }
939}
940
eb47b800
JK
941static int f2fs_write_begin(struct file *file, struct address_space *mapping,
942 loff_t pos, unsigned len, unsigned flags,
943 struct page **pagep, void **fsdata)
944{
945 struct inode *inode = mapping->host;
4081363f 946 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9ba69cf9 947 struct page *page, *ipage;
eb47b800
JK
948 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
949 struct dnode_of_data dn;
950 int err = 0;
951
62aed044
CY
952 trace_f2fs_write_begin(inode, pos, len, flags);
953
eb47b800 954 f2fs_balance_fs(sbi);
5f727395
JK
955
956 /*
957 * We should check this at this moment to avoid deadlock on inode page
958 * and #0 page. The locking rule for inline_data conversion should be:
959 * lock_page(page #0) -> lock_page(inode_page)
960 */
961 if (index != 0) {
962 err = f2fs_convert_inline_inode(inode);
963 if (err)
964 goto fail;
965 }
afcb7ca0 966repeat:
eb47b800 967 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
968 if (!page) {
969 err = -ENOMEM;
970 goto fail;
971 }
d5f66990 972
eb47b800
JK
973 *pagep = page;
974
e479556b 975 f2fs_lock_op(sbi);
9ba69cf9
JK
976
977 /* check inline_data */
978 ipage = get_node_page(sbi, inode->i_ino);
cd34e296
CY
979 if (IS_ERR(ipage)) {
980 err = PTR_ERR(ipage);
9ba69cf9 981 goto unlock_fail;
cd34e296 982 }
9ba69cf9 983
b3d208f9
JK
984 set_new_dnode(&dn, inode, ipage, ipage, 0);
985
9ba69cf9 986 if (f2fs_has_inline_data(inode)) {
b3d208f9
JK
987 if (pos + len <= MAX_INLINE_DATA) {
988 read_inline_data(page, ipage);
989 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
990 sync_inode_page(&dn);
991 goto put_next;
b3d208f9 992 }
5f727395
JK
993 err = f2fs_convert_inline_page(&dn, page);
994 if (err)
995 goto put_fail;
b600965c 996 }
9ba69cf9
JK
997 err = f2fs_reserve_block(&dn, index);
998 if (err)
8cdcb713 999 goto put_fail;
b3d208f9 1000put_next:
9ba69cf9
JK
1001 f2fs_put_dnode(&dn);
1002 f2fs_unlock_op(sbi);
1003
eb47b800
JK
1004 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
1005 return 0;
1006
b3d208f9
JK
1007 f2fs_wait_on_page_writeback(page, DATA);
1008
eb47b800
JK
1009 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1010 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1011 unsigned end = start + len;
1012
1013 /* Reading beyond i_size is simple: memset to zero */
1014 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
393ff91f 1015 goto out;
eb47b800
JK
1016 }
1017
b3d208f9 1018 if (dn.data_blkaddr == NEW_ADDR) {
eb47b800
JK
1019 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1020 } else {
cf04e8eb
JK
1021 struct f2fs_io_info fio = {
1022 .type = DATA,
1023 .rw = READ_SYNC,
1024 .blk_addr = dn.data_blkaddr,
1025 };
1026 err = f2fs_submit_page_bio(sbi, page, &fio);
9234f319
JK
1027 if (err)
1028 goto fail;
d54c795b 1029
393ff91f 1030 lock_page(page);
6bacf52f 1031 if (unlikely(!PageUptodate(page))) {
393ff91f 1032 f2fs_put_page(page, 1);
3aab8f82
CY
1033 err = -EIO;
1034 goto fail;
eb47b800 1035 }
6bacf52f 1036 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1037 f2fs_put_page(page, 1);
1038 goto repeat;
eb47b800
JK
1039 }
1040 }
393ff91f 1041out:
eb47b800
JK
1042 SetPageUptodate(page);
1043 clear_cold_data(page);
1044 return 0;
9ba69cf9 1045
8cdcb713
JK
1046put_fail:
1047 f2fs_put_dnode(&dn);
9ba69cf9
JK
1048unlock_fail:
1049 f2fs_unlock_op(sbi);
b3d208f9 1050 f2fs_put_page(page, 1);
3aab8f82
CY
1051fail:
1052 f2fs_write_failed(mapping, pos + len);
1053 return err;
eb47b800
JK
1054}
1055
a1dd3c13
JK
1056static int f2fs_write_end(struct file *file,
1057 struct address_space *mapping,
1058 loff_t pos, unsigned len, unsigned copied,
1059 struct page *page, void *fsdata)
1060{
1061 struct inode *inode = page->mapping->host;
1062
dfb2bf38
CY
1063 trace_f2fs_write_end(inode, pos, len, copied);
1064
34ba94ba 1065 set_page_dirty(page);
a1dd3c13
JK
1066
1067 if (pos + copied > i_size_read(inode)) {
1068 i_size_write(inode, pos + copied);
1069 mark_inode_dirty(inode);
1070 update_inode_page(inode);
1071 }
1072
75c3c8bc 1073 f2fs_put_page(page, 1);
a1dd3c13
JK
1074 return copied;
1075}
1076
944fcfc1 1077static int check_direct_IO(struct inode *inode, int rw,
5b46f25d 1078 struct iov_iter *iter, loff_t offset)
944fcfc1
JK
1079{
1080 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1
JK
1081
1082 if (rw == READ)
1083 return 0;
1084
1085 if (offset & blocksize_mask)
1086 return -EINVAL;
1087
5b46f25d
AV
1088 if (iov_iter_alignment(iter) & blocksize_mask)
1089 return -EINVAL;
1090
944fcfc1
JK
1091 return 0;
1092}
1093
eb47b800 1094static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
d8d3d94b 1095 struct iov_iter *iter, loff_t offset)
eb47b800
JK
1096{
1097 struct file *file = iocb->ki_filp;
3aab8f82
CY
1098 struct address_space *mapping = file->f_mapping;
1099 struct inode *inode = mapping->host;
1100 size_t count = iov_iter_count(iter);
1101 int err;
944fcfc1 1102
b3d208f9
JK
1103 /* we don't need to use inline_data strictly */
1104 if (f2fs_has_inline_data(inode)) {
1105 err = f2fs_convert_inline_inode(inode);
1106 if (err)
1107 return err;
1108 }
9ffe0fb5 1109
5b46f25d 1110 if (check_direct_IO(inode, rw, iter, offset))
944fcfc1
JK
1111 return 0;
1112
70407fad
CY
1113 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1114
3aab8f82
CY
1115 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1116 if (err < 0 && (rw & WRITE))
1117 f2fs_write_failed(mapping, offset + count);
70407fad
CY
1118
1119 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1120
3aab8f82 1121 return err;
eb47b800
JK
1122}
1123
d47992f8
LC
1124static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
1125 unsigned int length)
eb47b800
JK
1126{
1127 struct inode *inode = page->mapping->host;
a7ffdbe2
JK
1128
1129 if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)
1130 return;
1131
1fe54f9d 1132 if (PageDirty(page))
a7ffdbe2 1133 inode_dec_dirty_pages(inode);
eb47b800
JK
1134 ClearPagePrivate(page);
1135}
1136
1137static int f2fs_release_data_page(struct page *page, gfp_t wait)
1138{
1139 ClearPagePrivate(page);
c3850aa1 1140 return 1;
eb47b800
JK
1141}
1142
1143static int f2fs_set_data_page_dirty(struct page *page)
1144{
1145 struct address_space *mapping = page->mapping;
1146 struct inode *inode = mapping->host;
1147
26c6b887
JK
1148 trace_f2fs_set_page_dirty(page, DATA);
1149
eb47b800 1150 SetPageUptodate(page);
34ba94ba 1151
1e84371f 1152 if (f2fs_is_atomic_file(inode)) {
34ba94ba
JK
1153 register_inmem_page(inode, page);
1154 return 1;
1155 }
1156
a18ff063
JK
1157 mark_inode_dirty(inode);
1158
eb47b800
JK
1159 if (!PageDirty(page)) {
1160 __set_page_dirty_nobuffers(page);
a7ffdbe2 1161 update_dirty_page(inode, page);
eb47b800
JK
1162 return 1;
1163 }
1164 return 0;
1165}
1166
c01e54b7
JK
1167static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1168{
454ae7e5
CY
1169 struct inode *inode = mapping->host;
1170
b3d208f9
JK
1171 /* we don't need to use inline_data strictly */
1172 if (f2fs_has_inline_data(inode)) {
1173 int err = f2fs_convert_inline_inode(inode);
1174 if (err)
1175 return err;
1176 }
bfad7c2d 1177 return generic_block_bmap(mapping, block, get_data_block);
c01e54b7
JK
1178}
1179
eb47b800
JK
1180const struct address_space_operations f2fs_dblock_aops = {
1181 .readpage = f2fs_read_data_page,
1182 .readpages = f2fs_read_data_pages,
1183 .writepage = f2fs_write_data_page,
1184 .writepages = f2fs_write_data_pages,
1185 .write_begin = f2fs_write_begin,
a1dd3c13 1186 .write_end = f2fs_write_end,
eb47b800
JK
1187 .set_page_dirty = f2fs_set_data_page_dirty,
1188 .invalidatepage = f2fs_invalidate_data_page,
1189 .releasepage = f2fs_release_data_page,
1190 .direct_IO = f2fs_direct_IO,
c01e54b7 1191 .bmap = f2fs_bmap,
eb47b800 1192};
This page took 0.172661 seconds and 5 git commands to generate.