2 * inode.c - NILFS inode operations.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
37 struct nilfs_iget_args
{
40 struct nilfs_root
*root
;
44 void nilfs_inode_add_blocks(struct inode
*inode
, int n
)
46 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
48 inode_add_bytes(inode
, (1 << inode
->i_blkbits
) * n
);
50 atomic_add(n
, &root
->blocks_count
);
53 void nilfs_inode_sub_blocks(struct inode
*inode
, int n
)
55 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
57 inode_sub_bytes(inode
, (1 << inode
->i_blkbits
) * n
);
59 atomic_sub(n
, &root
->blocks_count
);
63 * nilfs_get_block() - get a file block on the filesystem (callback function)
64 * @inode - inode struct of the target file
65 * @blkoff - file block number
66 * @bh_result - buffer head to be mapped on
67 * @create - indicate whether allocating the block or not when it has not
70 * This function does not issue actual read request of the specified data
71 * block. It is done by VFS.
73 int nilfs_get_block(struct inode
*inode
, sector_t blkoff
,
74 struct buffer_head
*bh_result
, int create
)
76 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
79 struct inode
*dat
= NILFS_I_NILFS(inode
)->ns_dat
;
80 unsigned maxblocks
= bh_result
->b_size
>> inode
->i_blkbits
;
82 down_read(&NILFS_MDT(dat
)->mi_sem
);
83 ret
= nilfs_bmap_lookup_contig(ii
->i_bmap
, blkoff
, &blknum
, maxblocks
);
84 up_read(&NILFS_MDT(dat
)->mi_sem
);
85 if (ret
>= 0) { /* found */
86 map_bh(bh_result
, inode
->i_sb
, blknum
);
88 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
91 /* data block was not found */
92 if (ret
== -ENOENT
&& create
) {
93 struct nilfs_transaction_info ti
;
95 bh_result
->b_blocknr
= 0;
96 err
= nilfs_transaction_begin(inode
->i_sb
, &ti
, 1);
99 err
= nilfs_bmap_insert(ii
->i_bmap
, (unsigned long)blkoff
,
100 (unsigned long)bh_result
);
101 if (unlikely(err
!= 0)) {
102 if (err
== -EEXIST
) {
104 * The get_block() function could be called
105 * from multiple callers for an inode.
106 * However, the page having this block must
107 * be locked in this case.
110 "nilfs_get_block: a race condition "
111 "while inserting a data block. "
112 "(inode number=%lu, file block "
115 (unsigned long long)blkoff
);
118 nilfs_transaction_abort(inode
->i_sb
);
121 nilfs_mark_inode_dirty(inode
);
122 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
123 /* Error handling should be detailed */
124 set_buffer_new(bh_result
);
125 set_buffer_delay(bh_result
);
126 map_bh(bh_result
, inode
->i_sb
, 0); /* dbn must be changed
128 } else if (ret
== -ENOENT
) {
129 /* not found is not error (e.g. hole); must return without
130 the mapped state flag. */
141 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
142 * address_space_operations.
143 * @file - file struct of the file to be read
144 * @page - the page to be read
146 static int nilfs_readpage(struct file
*file
, struct page
*page
)
148 return mpage_readpage(page
, nilfs_get_block
);
152 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
153 * address_space_operations.
154 * @file - file struct of the file to be read
155 * @mapping - address_space struct used for reading multiple pages
156 * @pages - the pages to be read
157 * @nr_pages - number of pages to be read
159 static int nilfs_readpages(struct file
*file
, struct address_space
*mapping
,
160 struct list_head
*pages
, unsigned nr_pages
)
162 return mpage_readpages(mapping
, pages
, nr_pages
, nilfs_get_block
);
165 static int nilfs_writepages(struct address_space
*mapping
,
166 struct writeback_control
*wbc
)
168 struct inode
*inode
= mapping
->host
;
171 if (wbc
->sync_mode
== WB_SYNC_ALL
)
172 err
= nilfs_construct_dsync_segment(inode
->i_sb
, inode
,
178 static int nilfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
180 struct inode
*inode
= page
->mapping
->host
;
183 redirty_page_for_writepage(wbc
, page
);
186 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
187 err
= nilfs_construct_segment(inode
->i_sb
);
190 } else if (wbc
->for_reclaim
)
191 nilfs_flush_segment(inode
->i_sb
, inode
->i_ino
);
196 static int nilfs_set_page_dirty(struct page
*page
)
198 int ret
= __set_page_dirty_buffers(page
);
201 struct inode
*inode
= page
->mapping
->host
;
202 unsigned nr_dirty
= 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
204 nilfs_set_file_dirty(inode
, nr_dirty
);
209 static int nilfs_write_begin(struct file
*file
, struct address_space
*mapping
,
210 loff_t pos
, unsigned len
, unsigned flags
,
211 struct page
**pagep
, void **fsdata
)
214 struct inode
*inode
= mapping
->host
;
215 int err
= nilfs_transaction_begin(inode
->i_sb
, NULL
, 1);
220 err
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
223 loff_t isize
= mapping
->host
->i_size
;
224 if (pos
+ len
> isize
)
225 vmtruncate(mapping
->host
, isize
);
227 nilfs_transaction_abort(inode
->i_sb
);
232 static int nilfs_write_end(struct file
*file
, struct address_space
*mapping
,
233 loff_t pos
, unsigned len
, unsigned copied
,
234 struct page
*page
, void *fsdata
)
236 struct inode
*inode
= mapping
->host
;
237 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
241 nr_dirty
= nilfs_page_count_clean_buffers(page
, start
,
243 copied
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
,
245 nilfs_set_file_dirty(inode
, nr_dirty
);
246 err
= nilfs_transaction_commit(inode
->i_sb
);
247 return err
? : copied
;
251 nilfs_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
252 loff_t offset
, unsigned long nr_segs
)
254 struct file
*file
= iocb
->ki_filp
;
255 struct inode
*inode
= file
->f_mapping
->host
;
261 /* Needs synchronization with the cleaner */
262 size
= blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
263 offset
, nr_segs
, nilfs_get_block
, NULL
);
266 * In case of error extending write may have instantiated a few
267 * blocks outside i_size. Trim these off again.
269 if (unlikely((rw
& WRITE
) && size
< 0)) {
270 loff_t isize
= i_size_read(inode
);
271 loff_t end
= offset
+ iov_length(iov
, nr_segs
);
274 vmtruncate(inode
, isize
);
280 const struct address_space_operations nilfs_aops
= {
281 .writepage
= nilfs_writepage
,
282 .readpage
= nilfs_readpage
,
283 .sync_page
= block_sync_page
,
284 .writepages
= nilfs_writepages
,
285 .set_page_dirty
= nilfs_set_page_dirty
,
286 .readpages
= nilfs_readpages
,
287 .write_begin
= nilfs_write_begin
,
288 .write_end
= nilfs_write_end
,
289 /* .releasepage = nilfs_releasepage, */
290 .invalidatepage
= block_invalidatepage
,
291 .direct_IO
= nilfs_direct_IO
,
292 .is_partially_uptodate
= block_is_partially_uptodate
,
295 struct inode
*nilfs_new_inode(struct inode
*dir
, int mode
)
297 struct super_block
*sb
= dir
->i_sb
;
298 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
300 struct nilfs_inode_info
*ii
;
301 struct nilfs_root
*root
;
305 inode
= new_inode(sb
);
306 if (unlikely(!inode
))
309 mapping_set_gfp_mask(inode
->i_mapping
,
310 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
312 root
= NILFS_I(dir
)->i_root
;
314 ii
->i_state
= 1 << NILFS_I_NEW
;
317 err
= nilfs_ifile_create_inode(root
->ifile
, &ino
, &ii
->i_bh
);
319 goto failed_ifile_create_inode
;
320 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
322 atomic_inc(&root
->inodes_count
);
323 inode_init_owner(inode
, dir
, mode
);
325 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
327 if (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
)) {
328 err
= nilfs_bmap_read(ii
->i_bmap
, NULL
);
332 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
333 /* No lock is needed; iget() ensures it. */
336 ii
->i_flags
= nilfs_mask_flags(
337 mode
, NILFS_I(dir
)->i_flags
& NILFS_FL_INHERITED
);
339 /* ii->i_file_acl = 0; */
340 /* ii->i_dir_acl = 0; */
341 ii
->i_dir_start_lookup
= 0;
342 nilfs_set_inode_flags(inode
);
343 spin_lock(&nilfs
->ns_next_gen_lock
);
344 inode
->i_generation
= nilfs
->ns_next_generation
++;
345 spin_unlock(&nilfs
->ns_next_gen_lock
);
346 insert_inode_hash(inode
);
348 err
= nilfs_init_acl(inode
, dir
);
350 goto failed_acl
; /* never occur. When supporting
351 nilfs_init_acl(), proper cancellation of
352 above jobs should be considered */
359 iput(inode
); /* raw_inode will be deleted through
360 generic_delete_inode() */
363 failed_ifile_create_inode
:
364 make_bad_inode(inode
);
365 iput(inode
); /* if i_nlink == 1, generic_forget_inode() will be
371 void nilfs_set_inode_flags(struct inode
*inode
)
373 unsigned int flags
= NILFS_I(inode
)->i_flags
;
375 inode
->i_flags
&= ~(S_SYNC
| S_APPEND
| S_IMMUTABLE
| S_NOATIME
|
377 if (flags
& FS_SYNC_FL
)
378 inode
->i_flags
|= S_SYNC
;
379 if (flags
& FS_APPEND_FL
)
380 inode
->i_flags
|= S_APPEND
;
381 if (flags
& FS_IMMUTABLE_FL
)
382 inode
->i_flags
|= S_IMMUTABLE
;
383 if (flags
& FS_NOATIME_FL
)
384 inode
->i_flags
|= S_NOATIME
;
385 if (flags
& FS_DIRSYNC_FL
)
386 inode
->i_flags
|= S_DIRSYNC
;
387 mapping_set_gfp_mask(inode
->i_mapping
,
388 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
391 int nilfs_read_inode_common(struct inode
*inode
,
392 struct nilfs_inode
*raw_inode
)
394 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
397 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
398 inode
->i_uid
= (uid_t
)le32_to_cpu(raw_inode
->i_uid
);
399 inode
->i_gid
= (gid_t
)le32_to_cpu(raw_inode
->i_gid
);
400 inode
->i_nlink
= le16_to_cpu(raw_inode
->i_links_count
);
401 inode
->i_size
= le64_to_cpu(raw_inode
->i_size
);
402 inode
->i_atime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
403 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw_inode
->i_ctime
);
404 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
405 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
406 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw_inode
->i_ctime_nsec
);
407 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
408 if (inode
->i_nlink
== 0 && inode
->i_mode
== 0)
409 return -EINVAL
; /* this inode is deleted */
411 inode
->i_blocks
= le64_to_cpu(raw_inode
->i_blocks
);
412 ii
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
414 ii
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
415 ii
->i_dir_acl
= S_ISREG(inode
->i_mode
) ?
416 0 : le32_to_cpu(raw_inode
->i_dir_acl
);
418 ii
->i_dir_start_lookup
= 0;
419 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
421 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
422 S_ISLNK(inode
->i_mode
)) {
423 err
= nilfs_bmap_read(ii
->i_bmap
, raw_inode
);
426 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
427 /* No lock is needed; iget() ensures it. */
432 static int __nilfs_read_inode(struct super_block
*sb
,
433 struct nilfs_root
*root
, unsigned long ino
,
436 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
437 struct buffer_head
*bh
;
438 struct nilfs_inode
*raw_inode
;
441 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
442 err
= nilfs_ifile_get_inode_block(root
->ifile
, ino
, &bh
);
446 raw_inode
= nilfs_ifile_map_inode(root
->ifile
, ino
, bh
);
448 err
= nilfs_read_inode_common(inode
, raw_inode
);
452 if (S_ISREG(inode
->i_mode
)) {
453 inode
->i_op
= &nilfs_file_inode_operations
;
454 inode
->i_fop
= &nilfs_file_operations
;
455 inode
->i_mapping
->a_ops
= &nilfs_aops
;
456 } else if (S_ISDIR(inode
->i_mode
)) {
457 inode
->i_op
= &nilfs_dir_inode_operations
;
458 inode
->i_fop
= &nilfs_dir_operations
;
459 inode
->i_mapping
->a_ops
= &nilfs_aops
;
460 } else if (S_ISLNK(inode
->i_mode
)) {
461 inode
->i_op
= &nilfs_symlink_inode_operations
;
462 inode
->i_mapping
->a_ops
= &nilfs_aops
;
464 inode
->i_op
= &nilfs_special_inode_operations
;
466 inode
, inode
->i_mode
,
467 huge_decode_dev(le64_to_cpu(raw_inode
->i_device_code
)));
469 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
471 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
472 nilfs_set_inode_flags(inode
);
476 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
480 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
484 static int nilfs_iget_test(struct inode
*inode
, void *opaque
)
486 struct nilfs_iget_args
*args
= opaque
;
487 struct nilfs_inode_info
*ii
;
489 if (args
->ino
!= inode
->i_ino
|| args
->root
!= NILFS_I(inode
)->i_root
)
493 if (!test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
494 return !args
->for_gc
;
496 return args
->for_gc
&& args
->cno
== ii
->i_cno
;
499 static int nilfs_iget_set(struct inode
*inode
, void *opaque
)
501 struct nilfs_iget_args
*args
= opaque
;
503 inode
->i_ino
= args
->ino
;
505 NILFS_I(inode
)->i_state
= 1 << NILFS_I_GCINODE
;
506 NILFS_I(inode
)->i_cno
= args
->cno
;
507 NILFS_I(inode
)->i_root
= NULL
;
509 if (args
->root
&& args
->ino
== NILFS_ROOT_INO
)
510 nilfs_get_root(args
->root
);
511 NILFS_I(inode
)->i_root
= args
->root
;
516 struct inode
*nilfs_ilookup(struct super_block
*sb
, struct nilfs_root
*root
,
519 struct nilfs_iget_args args
= {
520 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
523 return ilookup5(sb
, ino
, nilfs_iget_test
, &args
);
526 struct inode
*nilfs_iget_locked(struct super_block
*sb
, struct nilfs_root
*root
,
529 struct nilfs_iget_args args
= {
530 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
533 return iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
536 struct inode
*nilfs_iget(struct super_block
*sb
, struct nilfs_root
*root
,
542 inode
= nilfs_iget_locked(sb
, root
, ino
);
543 if (unlikely(!inode
))
544 return ERR_PTR(-ENOMEM
);
545 if (!(inode
->i_state
& I_NEW
))
548 err
= __nilfs_read_inode(sb
, root
, ino
, inode
);
553 unlock_new_inode(inode
);
557 struct inode
*nilfs_iget_for_gc(struct super_block
*sb
, unsigned long ino
,
560 struct nilfs_iget_args args
= {
561 .ino
= ino
, .root
= NULL
, .cno
= cno
, .for_gc
= 1
566 inode
= iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
567 if (unlikely(!inode
))
568 return ERR_PTR(-ENOMEM
);
569 if (!(inode
->i_state
& I_NEW
))
572 err
= nilfs_init_gcinode(inode
);
577 unlock_new_inode(inode
);
581 void nilfs_write_inode_common(struct inode
*inode
,
582 struct nilfs_inode
*raw_inode
, int has_bmap
)
584 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
586 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
587 raw_inode
->i_uid
= cpu_to_le32(inode
->i_uid
);
588 raw_inode
->i_gid
= cpu_to_le32(inode
->i_gid
);
589 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
590 raw_inode
->i_size
= cpu_to_le64(inode
->i_size
);
591 raw_inode
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
592 raw_inode
->i_mtime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
593 raw_inode
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
594 raw_inode
->i_mtime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
595 raw_inode
->i_blocks
= cpu_to_le64(inode
->i_blocks
);
597 raw_inode
->i_flags
= cpu_to_le32(ii
->i_flags
);
598 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
601 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
602 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
603 raw_inode
->i_device_code
=
604 cpu_to_le64(huge_encode_dev(inode
->i_rdev
));
605 /* When extending inode, nilfs->ns_inode_size should be checked
606 for substitutions of appended fields */
609 void nilfs_update_inode(struct inode
*inode
, struct buffer_head
*ibh
)
611 ino_t ino
= inode
->i_ino
;
612 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
613 struct inode
*ifile
= ii
->i_root
->ifile
;
614 struct nilfs_inode
*raw_inode
;
616 raw_inode
= nilfs_ifile_map_inode(ifile
, ino
, ibh
);
618 if (test_and_clear_bit(NILFS_I_NEW
, &ii
->i_state
))
619 memset(raw_inode
, 0, NILFS_MDT(ifile
)->mi_entry_size
);
620 set_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
);
622 nilfs_write_inode_common(inode
, raw_inode
, 0);
623 /* XXX: call with has_bmap = 0 is a workaround to avoid
624 deadlock of bmap. This delays update of i_bmap to just
626 nilfs_ifile_unmap_inode(ifile
, ino
, ibh
);
629 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
631 static void nilfs_truncate_bmap(struct nilfs_inode_info
*ii
,
637 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
640 ret
= nilfs_bmap_last_key(ii
->i_bmap
, &b
);
649 b
-= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS
, b
- from
);
650 ret
= nilfs_bmap_truncate(ii
->i_bmap
, b
);
651 nilfs_relax_pressure_in_lock(ii
->vfs_inode
.i_sb
);
652 if (!ret
|| (ret
== -ENOMEM
&&
653 nilfs_bmap_truncate(ii
->i_bmap
, b
) == 0))
657 nilfs_warning(ii
->vfs_inode
.i_sb
, __func__
,
658 "failed to truncate bmap (ino=%lu, err=%d)",
659 ii
->vfs_inode
.i_ino
, ret
);
662 void nilfs_truncate(struct inode
*inode
)
664 unsigned long blkoff
;
665 unsigned int blocksize
;
666 struct nilfs_transaction_info ti
;
667 struct super_block
*sb
= inode
->i_sb
;
668 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
670 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
672 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
675 blocksize
= sb
->s_blocksize
;
676 blkoff
= (inode
->i_size
+ blocksize
- 1) >> sb
->s_blocksize_bits
;
677 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
679 block_truncate_page(inode
->i_mapping
, inode
->i_size
, nilfs_get_block
);
681 nilfs_truncate_bmap(ii
, blkoff
);
683 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
685 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
687 nilfs_mark_inode_dirty(inode
);
688 nilfs_set_file_dirty(inode
, 0);
689 nilfs_transaction_commit(sb
);
690 /* May construct a logical segment and may fail in sync mode.
691 But truncate has no return value. */
694 static void nilfs_clear_inode(struct inode
*inode
)
696 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
697 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
700 * Free resources allocated in nilfs_read_inode(), here.
702 BUG_ON(!list_empty(&ii
->i_dirty
));
706 if (mdi
&& mdi
->mi_palloc_cache
)
707 nilfs_palloc_destroy_cache(inode
);
709 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
710 nilfs_bmap_clear(ii
->i_bmap
);
712 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
714 if (ii
->i_root
&& inode
->i_ino
== NILFS_ROOT_INO
)
715 nilfs_put_root(ii
->i_root
);
718 void nilfs_evict_inode(struct inode
*inode
)
720 struct nilfs_transaction_info ti
;
721 struct super_block
*sb
= inode
->i_sb
;
722 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
725 if (inode
->i_nlink
|| !ii
->i_root
|| unlikely(is_bad_inode(inode
))) {
726 if (inode
->i_data
.nrpages
)
727 truncate_inode_pages(&inode
->i_data
, 0);
728 end_writeback(inode
);
729 nilfs_clear_inode(inode
);
732 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
734 if (inode
->i_data
.nrpages
)
735 truncate_inode_pages(&inode
->i_data
, 0);
737 /* TODO: some of the following operations may fail. */
738 nilfs_truncate_bmap(ii
, 0);
739 nilfs_mark_inode_dirty(inode
);
740 end_writeback(inode
);
742 ret
= nilfs_ifile_delete_inode(ii
->i_root
->ifile
, inode
->i_ino
);
744 atomic_dec(&ii
->i_root
->inodes_count
);
746 nilfs_clear_inode(inode
);
749 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
750 nilfs_transaction_commit(sb
);
751 /* May construct a logical segment and may fail in sync mode.
752 But delete_inode has no return value. */
755 int nilfs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
757 struct nilfs_transaction_info ti
;
758 struct inode
*inode
= dentry
->d_inode
;
759 struct super_block
*sb
= inode
->i_sb
;
762 err
= inode_change_ok(inode
, iattr
);
766 err
= nilfs_transaction_begin(sb
, &ti
, 0);
770 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
771 iattr
->ia_size
!= i_size_read(inode
)) {
772 err
= vmtruncate(inode
, iattr
->ia_size
);
777 setattr_copy(inode
, iattr
);
778 mark_inode_dirty(inode
);
780 if (iattr
->ia_valid
& ATTR_MODE
) {
781 err
= nilfs_acl_chmod(inode
);
786 return nilfs_transaction_commit(sb
);
789 nilfs_transaction_abort(sb
);
793 int nilfs_permission(struct inode
*inode
, int mask
, unsigned int flags
)
795 struct nilfs_root
*root
;
797 if (flags
& IPERM_FLAG_RCU
)
800 root
= NILFS_I(inode
)->i_root
;
801 if ((mask
& MAY_WRITE
) && root
&&
802 root
->cno
!= NILFS_CPTREE_CURRENT_CNO
)
803 return -EROFS
; /* snapshot is not writable */
805 return generic_permission(inode
, mask
, flags
, NULL
);
808 int nilfs_load_inode_block(struct inode
*inode
, struct buffer_head
**pbh
)
810 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
811 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
814 spin_lock(&nilfs
->ns_inode_lock
);
815 if (ii
->i_bh
== NULL
) {
816 spin_unlock(&nilfs
->ns_inode_lock
);
817 err
= nilfs_ifile_get_inode_block(ii
->i_root
->ifile
,
821 spin_lock(&nilfs
->ns_inode_lock
);
822 if (ii
->i_bh
== NULL
)
832 spin_unlock(&nilfs
->ns_inode_lock
);
836 int nilfs_inode_dirty(struct inode
*inode
)
838 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
839 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
842 if (!list_empty(&ii
->i_dirty
)) {
843 spin_lock(&nilfs
->ns_inode_lock
);
844 ret
= test_bit(NILFS_I_DIRTY
, &ii
->i_state
) ||
845 test_bit(NILFS_I_BUSY
, &ii
->i_state
);
846 spin_unlock(&nilfs
->ns_inode_lock
);
851 int nilfs_set_file_dirty(struct inode
*inode
, unsigned nr_dirty
)
853 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
854 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
856 atomic_add(nr_dirty
, &nilfs
->ns_ndirtyblks
);
858 if (test_and_set_bit(NILFS_I_DIRTY
, &ii
->i_state
))
861 spin_lock(&nilfs
->ns_inode_lock
);
862 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
863 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
864 /* Because this routine may race with nilfs_dispose_list(),
865 we have to check NILFS_I_QUEUED here, too. */
866 if (list_empty(&ii
->i_dirty
) && igrab(inode
) == NULL
) {
867 /* This will happen when somebody is freeing
869 nilfs_warning(inode
->i_sb
, __func__
,
870 "cannot get inode (ino=%lu)\n",
872 spin_unlock(&nilfs
->ns_inode_lock
);
873 return -EINVAL
; /* NILFS_I_DIRTY may remain for
876 list_del(&ii
->i_dirty
);
877 list_add_tail(&ii
->i_dirty
, &nilfs
->ns_dirty_files
);
878 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
880 spin_unlock(&nilfs
->ns_inode_lock
);
884 int nilfs_mark_inode_dirty(struct inode
*inode
)
886 struct buffer_head
*ibh
;
889 err
= nilfs_load_inode_block(inode
, &ibh
);
891 nilfs_warning(inode
->i_sb
, __func__
,
892 "failed to reget inode block.\n");
895 nilfs_update_inode(inode
, ibh
);
896 nilfs_mdt_mark_buffer_dirty(ibh
);
897 nilfs_mdt_mark_dirty(NILFS_I(inode
)->i_root
->ifile
);
903 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
904 * @inode: inode of the file to be registered.
906 * nilfs_dirty_inode() loads a inode block containing the specified
907 * @inode and copies data from a nilfs_inode to a corresponding inode
908 * entry in the inode block. This operation is excluded from the segment
909 * construction. This function can be called both as a single operation
910 * and as a part of indivisible file operations.
912 void nilfs_dirty_inode(struct inode
*inode
)
914 struct nilfs_transaction_info ti
;
915 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
917 if (is_bad_inode(inode
)) {
918 nilfs_warning(inode
->i_sb
, __func__
,
919 "tried to mark bad_inode dirty. ignored.\n");
924 nilfs_mdt_mark_dirty(inode
);
927 nilfs_transaction_begin(inode
->i_sb
, &ti
, 0);
928 nilfs_mark_inode_dirty(inode
);
929 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
932 int nilfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
933 __u64 start
, __u64 len
)
935 struct the_nilfs
*nilfs
= NILFS_I_NILFS(inode
);
936 __u64 logical
= 0, phys
= 0, size
= 0;
939 sector_t blkoff
, end_blkoff
;
940 sector_t delalloc_blkoff
;
941 unsigned long delalloc_blklen
;
942 unsigned int blkbits
= inode
->i_blkbits
;
945 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
949 mutex_lock(&inode
->i_mutex
);
951 isize
= i_size_read(inode
);
953 blkoff
= start
>> blkbits
;
954 end_blkoff
= (start
+ len
- 1) >> blkbits
;
956 delalloc_blklen
= nilfs_find_uncommitted_extent(inode
, blkoff
,
961 unsigned int maxblocks
;
963 if (delalloc_blklen
&& blkoff
== delalloc_blkoff
) {
965 /* End of the current extent */
966 ret
= fiemap_fill_next_extent(
967 fieinfo
, logical
, phys
, size
, flags
);
971 if (blkoff
> end_blkoff
)
974 flags
= FIEMAP_EXTENT_MERGED
| FIEMAP_EXTENT_DELALLOC
;
975 logical
= blkoff
<< blkbits
;
977 size
= delalloc_blklen
<< blkbits
;
979 blkoff
= delalloc_blkoff
+ delalloc_blklen
;
980 delalloc_blklen
= nilfs_find_uncommitted_extent(
981 inode
, blkoff
, &delalloc_blkoff
);
986 * Limit the number of blocks that we look up so as
987 * not to get into the next delayed allocation extent.
991 maxblocks
= min_t(sector_t
, delalloc_blkoff
- blkoff
,
995 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
996 n
= nilfs_bmap_lookup_contig(
997 NILFS_I(inode
)->i_bmap
, blkoff
, &blkphy
, maxblocks
);
998 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1003 if (unlikely(n
!= -ENOENT
))
1008 past_eof
= ((blkoff
<< blkbits
) >= isize
);
1011 /* End of the current extent */
1014 flags
|= FIEMAP_EXTENT_LAST
;
1016 ret
= fiemap_fill_next_extent(
1017 fieinfo
, logical
, phys
, size
, flags
);
1022 if (blkoff
> end_blkoff
|| past_eof
)
1026 if (phys
&& blkphy
<< blkbits
== phys
+ size
) {
1027 /* The current extent goes on */
1028 size
+= n
<< blkbits
;
1030 /* Terminate the current extent */
1031 ret
= fiemap_fill_next_extent(
1032 fieinfo
, logical
, phys
, size
,
1034 if (ret
|| blkoff
> end_blkoff
)
1037 /* Start another extent */
1038 flags
= FIEMAP_EXTENT_MERGED
;
1039 logical
= blkoff
<< blkbits
;
1040 phys
= blkphy
<< blkbits
;
1041 size
= n
<< blkbits
;
1044 /* Start a new extent */
1045 flags
= FIEMAP_EXTENT_MERGED
;
1046 logical
= blkoff
<< blkbits
;
1047 phys
= blkphy
<< blkbits
;
1048 size
= n
<< blkbits
;
1055 /* If ret is 1 then we just hit the end of the extent array */
1059 mutex_unlock(&inode
->i_mutex
);