| 1 | /* |
| 2 | * linux/fs/ext4/file.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1993, 1994, 1995 |
| 5 | * Remy Card (card@masi.ibp.fr) |
| 6 | * Laboratoire MASI - Institut Blaise Pascal |
| 7 | * Universite Pierre et Marie Curie (Paris VI) |
| 8 | * |
| 9 | * from |
| 10 | * |
| 11 | * linux/fs/minix/file.c |
| 12 | * |
| 13 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 14 | * |
| 15 | * ext4 fs regular file handling primitives |
| 16 | * |
| 17 | * 64-bit file support on 64-bit platforms by Jakub Jelinek |
| 18 | * (jj@sunsite.ms.mff.cuni.cz) |
| 19 | */ |
| 20 | |
| 21 | #include <linux/time.h> |
| 22 | #include <linux/fs.h> |
| 23 | #include <linux/jbd2.h> |
| 24 | #include <linux/mount.h> |
| 25 | #include <linux/path.h> |
| 26 | #include <linux/aio.h> |
| 27 | #include <linux/quotaops.h> |
| 28 | #include <linux/pagevec.h> |
| 29 | #include "ext4.h" |
| 30 | #include "ext4_jbd2.h" |
| 31 | #include "xattr.h" |
| 32 | #include "acl.h" |
| 33 | |
| 34 | /* |
| 35 | * Called when an inode is released. Note that this is different |
| 36 | * from ext4_file_open: open gets called at every open, but release |
| 37 | * gets called only when /all/ the files are closed. |
| 38 | */ |
| 39 | static int ext4_release_file(struct inode *inode, struct file *filp) |
| 40 | { |
| 41 | if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { |
| 42 | ext4_alloc_da_blocks(inode); |
| 43 | ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
| 44 | } |
| 45 | /* if we are the last writer on the inode, drop the block reservation */ |
| 46 | if ((filp->f_mode & FMODE_WRITE) && |
| 47 | (atomic_read(&inode->i_writecount) == 1) && |
| 48 | !EXT4_I(inode)->i_reserved_data_blocks) |
| 49 | { |
| 50 | down_write(&EXT4_I(inode)->i_data_sem); |
| 51 | ext4_discard_preallocations(inode); |
| 52 | up_write(&EXT4_I(inode)->i_data_sem); |
| 53 | } |
| 54 | if (is_dx(inode) && filp->private_data) |
| 55 | ext4_htree_free_dir_info(filp->private_data); |
| 56 | |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static void ext4_unwritten_wait(struct inode *inode) |
| 61 | { |
| 62 | wait_queue_head_t *wq = ext4_ioend_wq(inode); |
| 63 | |
| 64 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); |
| 65 | } |
| 66 | |
| 67 | /* |
| 68 | * This tests whether the IO in question is block-aligned or not. |
| 69 | * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they |
| 70 | * are converted to written only after the IO is complete. Until they are |
| 71 | * mapped, these blocks appear as holes, so dio_zero_block() will assume that |
| 72 | * it needs to zero out portions of the start and/or end block. If 2 AIO |
| 73 | * threads are at work on the same unwritten block, they must be synchronized |
| 74 | * or one thread will zero the other's data, causing corruption. |
| 75 | */ |
| 76 | static int |
| 77 | ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) |
| 78 | { |
| 79 | struct super_block *sb = inode->i_sb; |
| 80 | int blockmask = sb->s_blocksize - 1; |
| 81 | |
| 82 | if (pos >= i_size_read(inode)) |
| 83 | return 0; |
| 84 | |
| 85 | if ((pos | iov_iter_alignment(from)) & blockmask) |
| 86 | return 1; |
| 87 | |
| 88 | return 0; |
| 89 | } |
| 90 | |
| 91 | static ssize_t |
| 92 | ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 93 | { |
| 94 | struct file *file = iocb->ki_filp; |
| 95 | struct inode *inode = file_inode(iocb->ki_filp); |
| 96 | struct mutex *aio_mutex = NULL; |
| 97 | struct blk_plug plug; |
| 98 | int o_direct = file->f_flags & O_DIRECT; |
| 99 | int overwrite = 0; |
| 100 | size_t length = iov_iter_count(from); |
| 101 | ssize_t ret; |
| 102 | loff_t pos = iocb->ki_pos; |
| 103 | |
| 104 | /* |
| 105 | * Unaligned direct AIO must be serialized; see comment above |
| 106 | * In the case of O_APPEND, assume that we must always serialize |
| 107 | */ |
| 108 | if (o_direct && |
| 109 | ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && |
| 110 | !is_sync_kiocb(iocb) && |
| 111 | (file->f_flags & O_APPEND || |
| 112 | ext4_unaligned_aio(inode, from, pos))) { |
| 113 | aio_mutex = ext4_aio_mutex(inode); |
| 114 | mutex_lock(aio_mutex); |
| 115 | ext4_unwritten_wait(inode); |
| 116 | } |
| 117 | |
| 118 | mutex_lock(&inode->i_mutex); |
| 119 | if (file->f_flags & O_APPEND) |
| 120 | iocb->ki_pos = pos = i_size_read(inode); |
| 121 | |
| 122 | /* |
| 123 | * If we have encountered a bitmap-format file, the size limit |
| 124 | * is smaller than s_maxbytes, which is for extent-mapped files. |
| 125 | */ |
| 126 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { |
| 127 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 128 | |
| 129 | if ((pos > sbi->s_bitmap_maxbytes) || |
| 130 | (pos == sbi->s_bitmap_maxbytes && length > 0)) { |
| 131 | mutex_unlock(&inode->i_mutex); |
| 132 | ret = -EFBIG; |
| 133 | goto errout; |
| 134 | } |
| 135 | |
| 136 | if (pos + length > sbi->s_bitmap_maxbytes) |
| 137 | iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos); |
| 138 | } |
| 139 | |
| 140 | iocb->private = &overwrite; |
| 141 | if (o_direct) { |
| 142 | blk_start_plug(&plug); |
| 143 | |
| 144 | |
| 145 | /* check whether we do a DIO overwrite or not */ |
| 146 | if (ext4_should_dioread_nolock(inode) && !aio_mutex && |
| 147 | !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { |
| 148 | struct ext4_map_blocks map; |
| 149 | unsigned int blkbits = inode->i_blkbits; |
| 150 | int err, len; |
| 151 | |
| 152 | map.m_lblk = pos >> blkbits; |
| 153 | map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits) |
| 154 | - map.m_lblk; |
| 155 | len = map.m_len; |
| 156 | |
| 157 | err = ext4_map_blocks(NULL, inode, &map, 0); |
| 158 | /* |
| 159 | * 'err==len' means that all of blocks has |
| 160 | * been preallocated no matter they are |
| 161 | * initialized or not. For excluding |
| 162 | * unwritten extents, we need to check |
| 163 | * m_flags. There are two conditions that |
| 164 | * indicate for initialized extents. 1) If we |
| 165 | * hit extent cache, EXT4_MAP_MAPPED flag is |
| 166 | * returned; 2) If we do a real lookup, |
| 167 | * non-flags are returned. So we should check |
| 168 | * these two conditions. |
| 169 | */ |
| 170 | if (err == len && (map.m_flags & EXT4_MAP_MAPPED)) |
| 171 | overwrite = 1; |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | ret = __generic_file_write_iter(iocb, from); |
| 176 | mutex_unlock(&inode->i_mutex); |
| 177 | |
| 178 | if (ret > 0) { |
| 179 | ssize_t err; |
| 180 | |
| 181 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); |
| 182 | if (err < 0) |
| 183 | ret = err; |
| 184 | } |
| 185 | if (o_direct) |
| 186 | blk_finish_plug(&plug); |
| 187 | |
| 188 | errout: |
| 189 | if (aio_mutex) |
| 190 | mutex_unlock(aio_mutex); |
| 191 | return ret; |
| 192 | } |
| 193 | |
| 194 | static const struct vm_operations_struct ext4_file_vm_ops = { |
| 195 | .fault = filemap_fault, |
| 196 | .map_pages = filemap_map_pages, |
| 197 | .page_mkwrite = ext4_page_mkwrite, |
| 198 | .remap_pages = generic_file_remap_pages, |
| 199 | }; |
| 200 | |
| 201 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) |
| 202 | { |
| 203 | file_accessed(file); |
| 204 | vma->vm_ops = &ext4_file_vm_ops; |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static int ext4_file_open(struct inode * inode, struct file * filp) |
| 209 | { |
| 210 | struct super_block *sb = inode->i_sb; |
| 211 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 212 | struct vfsmount *mnt = filp->f_path.mnt; |
| 213 | struct path path; |
| 214 | char buf[64], *cp; |
| 215 | |
| 216 | if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && |
| 217 | !(sb->s_flags & MS_RDONLY))) { |
| 218 | sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; |
| 219 | /* |
| 220 | * Sample where the filesystem has been mounted and |
| 221 | * store it in the superblock for sysadmin convenience |
| 222 | * when trying to sort through large numbers of block |
| 223 | * devices or filesystem images. |
| 224 | */ |
| 225 | memset(buf, 0, sizeof(buf)); |
| 226 | path.mnt = mnt; |
| 227 | path.dentry = mnt->mnt_root; |
| 228 | cp = d_path(&path, buf, sizeof(buf)); |
| 229 | if (!IS_ERR(cp)) { |
| 230 | handle_t *handle; |
| 231 | int err; |
| 232 | |
| 233 | handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); |
| 234 | if (IS_ERR(handle)) |
| 235 | return PTR_ERR(handle); |
| 236 | BUFFER_TRACE(sbi->s_sbh, "get_write_access"); |
| 237 | err = ext4_journal_get_write_access(handle, sbi->s_sbh); |
| 238 | if (err) { |
| 239 | ext4_journal_stop(handle); |
| 240 | return err; |
| 241 | } |
| 242 | strlcpy(sbi->s_es->s_last_mounted, cp, |
| 243 | sizeof(sbi->s_es->s_last_mounted)); |
| 244 | ext4_handle_dirty_super(handle, sb); |
| 245 | ext4_journal_stop(handle); |
| 246 | } |
| 247 | } |
| 248 | /* |
| 249 | * Set up the jbd2_inode if we are opening the inode for |
| 250 | * writing and the journal is present |
| 251 | */ |
| 252 | if (filp->f_mode & FMODE_WRITE) { |
| 253 | int ret = ext4_inode_attach_jinode(inode); |
| 254 | if (ret < 0) |
| 255 | return ret; |
| 256 | } |
| 257 | return dquot_file_open(inode, filp); |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Here we use ext4_map_blocks() to get a block mapping for a extent-based |
| 262 | * file rather than ext4_ext_walk_space() because we can introduce |
| 263 | * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same |
| 264 | * function. When extent status tree has been fully implemented, it will |
| 265 | * track all extent status for a file and we can directly use it to |
| 266 | * retrieve the offset for SEEK_DATA/SEEK_HOLE. |
| 267 | */ |
| 268 | |
| 269 | /* |
| 270 | * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to |
| 271 | * lookup page cache to check whether or not there has some data between |
| 272 | * [startoff, endoff] because, if this range contains an unwritten extent, |
| 273 | * we determine this extent as a data or a hole according to whether the |
| 274 | * page cache has data or not. |
| 275 | */ |
| 276 | static int ext4_find_unwritten_pgoff(struct inode *inode, |
| 277 | int whence, |
| 278 | struct ext4_map_blocks *map, |
| 279 | loff_t *offset) |
| 280 | { |
| 281 | struct pagevec pvec; |
| 282 | unsigned int blkbits; |
| 283 | pgoff_t index; |
| 284 | pgoff_t end; |
| 285 | loff_t endoff; |
| 286 | loff_t startoff; |
| 287 | loff_t lastoff; |
| 288 | int found = 0; |
| 289 | |
| 290 | blkbits = inode->i_sb->s_blocksize_bits; |
| 291 | startoff = *offset; |
| 292 | lastoff = startoff; |
| 293 | endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; |
| 294 | |
| 295 | index = startoff >> PAGE_CACHE_SHIFT; |
| 296 | end = endoff >> PAGE_CACHE_SHIFT; |
| 297 | |
| 298 | pagevec_init(&pvec, 0); |
| 299 | do { |
| 300 | int i, num; |
| 301 | unsigned long nr_pages; |
| 302 | |
| 303 | num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); |
| 304 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, |
| 305 | (pgoff_t)num); |
| 306 | if (nr_pages == 0) { |
| 307 | if (whence == SEEK_DATA) |
| 308 | break; |
| 309 | |
| 310 | BUG_ON(whence != SEEK_HOLE); |
| 311 | /* |
| 312 | * If this is the first time to go into the loop and |
| 313 | * offset is not beyond the end offset, it will be a |
| 314 | * hole at this offset |
| 315 | */ |
| 316 | if (lastoff == startoff || lastoff < endoff) |
| 317 | found = 1; |
| 318 | break; |
| 319 | } |
| 320 | |
| 321 | /* |
| 322 | * If this is the first time to go into the loop and |
| 323 | * offset is smaller than the first page offset, it will be a |
| 324 | * hole at this offset. |
| 325 | */ |
| 326 | if (lastoff == startoff && whence == SEEK_HOLE && |
| 327 | lastoff < page_offset(pvec.pages[0])) { |
| 328 | found = 1; |
| 329 | break; |
| 330 | } |
| 331 | |
| 332 | for (i = 0; i < nr_pages; i++) { |
| 333 | struct page *page = pvec.pages[i]; |
| 334 | struct buffer_head *bh, *head; |
| 335 | |
| 336 | /* |
| 337 | * If the current offset is not beyond the end of given |
| 338 | * range, it will be a hole. |
| 339 | */ |
| 340 | if (lastoff < endoff && whence == SEEK_HOLE && |
| 341 | page->index > end) { |
| 342 | found = 1; |
| 343 | *offset = lastoff; |
| 344 | goto out; |
| 345 | } |
| 346 | |
| 347 | lock_page(page); |
| 348 | |
| 349 | if (unlikely(page->mapping != inode->i_mapping)) { |
| 350 | unlock_page(page); |
| 351 | continue; |
| 352 | } |
| 353 | |
| 354 | if (!page_has_buffers(page)) { |
| 355 | unlock_page(page); |
| 356 | continue; |
| 357 | } |
| 358 | |
| 359 | if (page_has_buffers(page)) { |
| 360 | lastoff = page_offset(page); |
| 361 | bh = head = page_buffers(page); |
| 362 | do { |
| 363 | if (buffer_uptodate(bh) || |
| 364 | buffer_unwritten(bh)) { |
| 365 | if (whence == SEEK_DATA) |
| 366 | found = 1; |
| 367 | } else { |
| 368 | if (whence == SEEK_HOLE) |
| 369 | found = 1; |
| 370 | } |
| 371 | if (found) { |
| 372 | *offset = max_t(loff_t, |
| 373 | startoff, lastoff); |
| 374 | unlock_page(page); |
| 375 | goto out; |
| 376 | } |
| 377 | lastoff += bh->b_size; |
| 378 | bh = bh->b_this_page; |
| 379 | } while (bh != head); |
| 380 | } |
| 381 | |
| 382 | lastoff = page_offset(page) + PAGE_SIZE; |
| 383 | unlock_page(page); |
| 384 | } |
| 385 | |
| 386 | /* |
| 387 | * The no. of pages is less than our desired, that would be a |
| 388 | * hole in there. |
| 389 | */ |
| 390 | if (nr_pages < num && whence == SEEK_HOLE) { |
| 391 | found = 1; |
| 392 | *offset = lastoff; |
| 393 | break; |
| 394 | } |
| 395 | |
| 396 | index = pvec.pages[i - 1]->index + 1; |
| 397 | pagevec_release(&pvec); |
| 398 | } while (index <= end); |
| 399 | |
| 400 | out: |
| 401 | pagevec_release(&pvec); |
| 402 | return found; |
| 403 | } |
| 404 | |
| 405 | /* |
| 406 | * ext4_seek_data() retrieves the offset for SEEK_DATA. |
| 407 | */ |
| 408 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
| 409 | { |
| 410 | struct inode *inode = file->f_mapping->host; |
| 411 | struct ext4_map_blocks map; |
| 412 | struct extent_status es; |
| 413 | ext4_lblk_t start, last, end; |
| 414 | loff_t dataoff, isize; |
| 415 | int blkbits; |
| 416 | int ret = 0; |
| 417 | |
| 418 | mutex_lock(&inode->i_mutex); |
| 419 | |
| 420 | isize = i_size_read(inode); |
| 421 | if (offset >= isize) { |
| 422 | mutex_unlock(&inode->i_mutex); |
| 423 | return -ENXIO; |
| 424 | } |
| 425 | |
| 426 | blkbits = inode->i_sb->s_blocksize_bits; |
| 427 | start = offset >> blkbits; |
| 428 | last = start; |
| 429 | end = isize >> blkbits; |
| 430 | dataoff = offset; |
| 431 | |
| 432 | do { |
| 433 | map.m_lblk = last; |
| 434 | map.m_len = end - last + 1; |
| 435 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
| 436 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
| 437 | if (last != start) |
| 438 | dataoff = (loff_t)last << blkbits; |
| 439 | break; |
| 440 | } |
| 441 | |
| 442 | /* |
| 443 | * If there is a delay extent at this offset, |
| 444 | * it will be as a data. |
| 445 | */ |
| 446 | ext4_es_find_delayed_extent_range(inode, last, last, &es); |
| 447 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { |
| 448 | if (last != start) |
| 449 | dataoff = (loff_t)last << blkbits; |
| 450 | break; |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * If there is a unwritten extent at this offset, |
| 455 | * it will be as a data or a hole according to page |
| 456 | * cache that has data or not. |
| 457 | */ |
| 458 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { |
| 459 | int unwritten; |
| 460 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, |
| 461 | &map, &dataoff); |
| 462 | if (unwritten) |
| 463 | break; |
| 464 | } |
| 465 | |
| 466 | last++; |
| 467 | dataoff = (loff_t)last << blkbits; |
| 468 | } while (last <= end); |
| 469 | |
| 470 | mutex_unlock(&inode->i_mutex); |
| 471 | |
| 472 | if (dataoff > isize) |
| 473 | return -ENXIO; |
| 474 | |
| 475 | return vfs_setpos(file, dataoff, maxsize); |
| 476 | } |
| 477 | |
| 478 | /* |
| 479 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE. |
| 480 | */ |
| 481 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
| 482 | { |
| 483 | struct inode *inode = file->f_mapping->host; |
| 484 | struct ext4_map_blocks map; |
| 485 | struct extent_status es; |
| 486 | ext4_lblk_t start, last, end; |
| 487 | loff_t holeoff, isize; |
| 488 | int blkbits; |
| 489 | int ret = 0; |
| 490 | |
| 491 | mutex_lock(&inode->i_mutex); |
| 492 | |
| 493 | isize = i_size_read(inode); |
| 494 | if (offset >= isize) { |
| 495 | mutex_unlock(&inode->i_mutex); |
| 496 | return -ENXIO; |
| 497 | } |
| 498 | |
| 499 | blkbits = inode->i_sb->s_blocksize_bits; |
| 500 | start = offset >> blkbits; |
| 501 | last = start; |
| 502 | end = isize >> blkbits; |
| 503 | holeoff = offset; |
| 504 | |
| 505 | do { |
| 506 | map.m_lblk = last; |
| 507 | map.m_len = end - last + 1; |
| 508 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
| 509 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
| 510 | last += ret; |
| 511 | holeoff = (loff_t)last << blkbits; |
| 512 | continue; |
| 513 | } |
| 514 | |
| 515 | /* |
| 516 | * If there is a delay extent at this offset, |
| 517 | * we will skip this extent. |
| 518 | */ |
| 519 | ext4_es_find_delayed_extent_range(inode, last, last, &es); |
| 520 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { |
| 521 | last = es.es_lblk + es.es_len; |
| 522 | holeoff = (loff_t)last << blkbits; |
| 523 | continue; |
| 524 | } |
| 525 | |
| 526 | /* |
| 527 | * If there is a unwritten extent at this offset, |
| 528 | * it will be as a data or a hole according to page |
| 529 | * cache that has data or not. |
| 530 | */ |
| 531 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { |
| 532 | int unwritten; |
| 533 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, |
| 534 | &map, &holeoff); |
| 535 | if (!unwritten) { |
| 536 | last += ret; |
| 537 | holeoff = (loff_t)last << blkbits; |
| 538 | continue; |
| 539 | } |
| 540 | } |
| 541 | |
| 542 | /* find a hole */ |
| 543 | break; |
| 544 | } while (last <= end); |
| 545 | |
| 546 | mutex_unlock(&inode->i_mutex); |
| 547 | |
| 548 | if (holeoff > isize) |
| 549 | holeoff = isize; |
| 550 | |
| 551 | return vfs_setpos(file, holeoff, maxsize); |
| 552 | } |
| 553 | |
| 554 | /* |
| 555 | * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values |
| 556 | * by calling generic_file_llseek_size() with the appropriate maxbytes |
| 557 | * value for each. |
| 558 | */ |
| 559 | loff_t ext4_llseek(struct file *file, loff_t offset, int whence) |
| 560 | { |
| 561 | struct inode *inode = file->f_mapping->host; |
| 562 | loff_t maxbytes; |
| 563 | |
| 564 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
| 565 | maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; |
| 566 | else |
| 567 | maxbytes = inode->i_sb->s_maxbytes; |
| 568 | |
| 569 | switch (whence) { |
| 570 | case SEEK_SET: |
| 571 | case SEEK_CUR: |
| 572 | case SEEK_END: |
| 573 | return generic_file_llseek_size(file, offset, whence, |
| 574 | maxbytes, i_size_read(inode)); |
| 575 | case SEEK_DATA: |
| 576 | return ext4_seek_data(file, offset, maxbytes); |
| 577 | case SEEK_HOLE: |
| 578 | return ext4_seek_hole(file, offset, maxbytes); |
| 579 | } |
| 580 | |
| 581 | return -EINVAL; |
| 582 | } |
| 583 | |
| 584 | const struct file_operations ext4_file_operations = { |
| 585 | .llseek = ext4_llseek, |
| 586 | .read = new_sync_read, |
| 587 | .write = new_sync_write, |
| 588 | .read_iter = generic_file_read_iter, |
| 589 | .write_iter = ext4_file_write_iter, |
| 590 | .unlocked_ioctl = ext4_ioctl, |
| 591 | #ifdef CONFIG_COMPAT |
| 592 | .compat_ioctl = ext4_compat_ioctl, |
| 593 | #endif |
| 594 | .mmap = ext4_file_mmap, |
| 595 | .open = ext4_file_open, |
| 596 | .release = ext4_release_file, |
| 597 | .fsync = ext4_sync_file, |
| 598 | .splice_read = generic_file_splice_read, |
| 599 | .splice_write = iter_file_splice_write, |
| 600 | .fallocate = ext4_fallocate, |
| 601 | }; |
| 602 | |
| 603 | const struct inode_operations ext4_file_inode_operations = { |
| 604 | .setattr = ext4_setattr, |
| 605 | .getattr = ext4_getattr, |
| 606 | .setxattr = generic_setxattr, |
| 607 | .getxattr = generic_getxattr, |
| 608 | .listxattr = ext4_listxattr, |
| 609 | .removexattr = generic_removexattr, |
| 610 | .get_acl = ext4_get_acl, |
| 611 | .set_acl = ext4_set_acl, |
| 612 | .fiemap = ext4_fiemap, |
| 613 | }; |
| 614 | |