lustre: remove unused declaration
[deliverable/linux.git] / fs / ext4 / file.c
CommitLineData
ac27a0ec 1/*
617ba13b 2 * linux/fs/ext4/file.c
ac27a0ec
DK
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
617ba13b 15 * ext4 fs regular file handling primitives
ac27a0ec
DK
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
bc0b0d6d
TT
23#include <linux/mount.h>
24#include <linux/path.h>
c94c2acf 25#include <linux/dax.h>
871a2931 26#include <linux/quotaops.h>
c8c0df24 27#include <linux/pagevec.h>
e2e40f2c 28#include <linux/uio.h>
3dcf5451
CH
29#include "ext4.h"
30#include "ext4_jbd2.h"
ac27a0ec
DK
31#include "xattr.h"
32#include "acl.h"
33
34/*
35 * Called when an inode is released. Note that this is different
617ba13b 36 * from ext4_file_open: open gets called at every open, but release
ac27a0ec
DK
37 * gets called only when /all/ the files are closed.
38 */
af5bc92d 39static int ext4_release_file(struct inode *inode, struct file *filp)
ac27a0ec 40{
19f5fb7a 41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
7d8f9f7d 42 ext4_alloc_da_blocks(inode);
19f5fb7a 43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
7d8f9f7d 44 }
ac27a0ec
DK
45 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
d6014301
AK
47 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
ac27a0ec 49 {
0e855ac8 50 down_write(&EXT4_I(inode)->i_data_sem);
c2ea3fde 51 ext4_discard_preallocations(inode);
0e855ac8 52 up_write(&EXT4_I(inode)->i_data_sem);
ac27a0ec
DK
53 }
54 if (is_dx(inode) && filp->private_data)
617ba13b 55 ext4_htree_free_dir_info(filp->private_data);
ac27a0ec
DK
56
57 return 0;
58}
59
c197855e 60static void ext4_unwritten_wait(struct inode *inode)
e9e3bcec
ES
61{
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
e27f41e1 64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
e9e3bcec
ES
65}
66
67/*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76static int
9b884164 77ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
e9e3bcec
ES
78{
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
e9e3bcec 81
6e6358fc 82 if (pos >= i_size_read(inode))
e9e3bcec
ES
83 return 0;
84
9b884164 85 if ((pos | iov_iter_alignment(from)) & blockmask)
e9e3bcec
ES
86 return 1;
87
88 return 0;
89}
90
ac27a0ec 91static ssize_t
9b884164 92ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ac27a0ec 93{
4bd809db 94 struct file *file = iocb->ki_filp;
8ad2850f 95 struct inode *inode = file_inode(iocb->ki_filp);
7ed07ba8 96 struct mutex *aio_mutex = NULL;
4bd809db 97 struct blk_plug plug;
2ba48ce5 98 int o_direct = iocb->ki_flags & IOCB_DIRECT;
4bd809db 99 int overwrite = 0;
8563000d 100 ssize_t ret;
7608e610 101
f5ccfe1d
TT
102 /*
103 * Unaligned direct AIO must be serialized; see comment above
104 * In the case of O_APPEND, assume that we must always serialize
105 */
106 if (o_direct &&
107 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
108 !is_sync_kiocb(iocb) &&
2ba48ce5 109 (iocb->ki_flags & IOCB_APPEND ||
e768d7ff 110 ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
f5ccfe1d
TT
111 aio_mutex = ext4_aio_mutex(inode);
112 mutex_lock(aio_mutex);
113 ext4_unwritten_wait(inode);
114 }
115
116 mutex_lock(&inode->i_mutex);
3309dd04
AV
117 ret = generic_write_checks(iocb, from);
118 if (ret <= 0)
e768d7ff 119 goto out;
f5ccfe1d 120
e2b46574
ES
121 /*
122 * If we have encountered a bitmap-format file, the size limit
123 * is smaller than s_maxbytes, which is for extent-mapped files.
124 */
12e9b892 125 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
e2b46574 126 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ac27a0ec 127
3309dd04 128 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
f5ccfe1d 129 ret = -EFBIG;
e768d7ff 130 goto out;
f5ccfe1d 131 }
3309dd04 132 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
e2b46574
ES
133 }
134
a41537e6 135 iocb->private = &overwrite;
7ed07ba8 136 if (o_direct) {
3309dd04
AV
137 size_t length = iov_iter_count(from);
138 loff_t pos = iocb->ki_pos;
8ad2850f
TT
139 blk_start_plug(&plug);
140
8ad2850f 141 /* check whether we do a DIO overwrite or not */
7ed07ba8 142 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
8ad2850f
TT
143 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
144 struct ext4_map_blocks map;
145 unsigned int blkbits = inode->i_blkbits;
146 int err, len;
147
148 map.m_lblk = pos >> blkbits;
149 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
150 - map.m_lblk;
151 len = map.m_len;
152
153 err = ext4_map_blocks(NULL, inode, &map, 0);
154 /*
155 * 'err==len' means that all of blocks has
156 * been preallocated no matter they are
157 * initialized or not. For excluding
158 * unwritten extents, we need to check
159 * m_flags. There are two conditions that
160 * indicate for initialized extents. 1) If we
161 * hit extent cache, EXT4_MAP_MAPPED flag is
162 * returned; 2) If we do a real lookup,
163 * non-flags are returned. So we should check
164 * these two conditions.
165 */
166 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
167 overwrite = 1;
168 }
f5ccfe1d 169 }
7608e610 170
9b884164 171 ret = __generic_file_write_iter(iocb, from);
7ed07ba8 172 mutex_unlock(&inode->i_mutex);
7608e610 173
7ed07ba8
TT
174 if (ret > 0) {
175 ssize_t err;
176
177 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
178 if (err < 0)
179 ret = err;
7608e610 180 }
7ed07ba8
TT
181 if (o_direct)
182 blk_finish_plug(&plug);
e9e3bcec 183
e768d7ff
AV
184 if (aio_mutex)
185 mutex_unlock(aio_mutex);
186 return ret;
187
188out:
189 mutex_unlock(&inode->i_mutex);
7ed07ba8
TT
190 if (aio_mutex)
191 mutex_unlock(aio_mutex);
e9e3bcec 192 return ret;
ac27a0ec
DK
193}
194
923ae0ff
RZ
195#ifdef CONFIG_FS_DAX
196static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
197{
01a33b4a
MW
198 int result;
199 handle_t *handle = NULL;
ea3d7209
JK
200 struct inode *inode = file_inode(vma->vm_file);
201 struct super_block *sb = inode->i_sb;
01a33b4a
MW
202 bool write = vmf->flags & FAULT_FLAG_WRITE;
203
204 if (write) {
205 sb_start_pagefault(sb);
206 file_update_time(vma->vm_file);
ea3d7209 207 down_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a
MW
208 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
209 EXT4_DATA_TRANS_BLOCKS(sb));
ea3d7209
JK
210 } else
211 down_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a
MW
212
213 if (IS_ERR(handle))
214 result = VM_FAULT_SIGBUS;
215 else
ba5843f5 216 result = __dax_fault(vma, vmf, ext4_dax_mmap_get_block, NULL);
01a33b4a
MW
217
218 if (write) {
219 if (!IS_ERR(handle))
220 ext4_journal_stop(handle);
ea3d7209 221 up_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a 222 sb_end_pagefault(sb);
ea3d7209
JK
223 } else
224 up_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a
MW
225
226 return result;
923ae0ff
RZ
227}
228
11bd1a9e
MW
229static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
230 pmd_t *pmd, unsigned int flags)
231{
01a33b4a
MW
232 int result;
233 handle_t *handle = NULL;
234 struct inode *inode = file_inode(vma->vm_file);
235 struct super_block *sb = inode->i_sb;
236 bool write = flags & FAULT_FLAG_WRITE;
237
238 if (write) {
239 sb_start_pagefault(sb);
240 file_update_time(vma->vm_file);
ea3d7209 241 down_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a
MW
242 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
243 ext4_chunk_trans_blocks(inode,
244 PMD_SIZE / PAGE_SIZE));
ea3d7209
JK
245 } else
246 down_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a
MW
247
248 if (IS_ERR(handle))
249 result = VM_FAULT_SIGBUS;
250 else
251 result = __dax_pmd_fault(vma, addr, pmd, flags,
ba5843f5 252 ext4_dax_mmap_get_block, NULL);
01a33b4a
MW
253
254 if (write) {
255 if (!IS_ERR(handle))
256 ext4_journal_stop(handle);
ea3d7209 257 up_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a 258 sb_end_pagefault(sb);
ea3d7209
JK
259 } else
260 up_read(&EXT4_I(inode)->i_mmap_sem);
01a33b4a
MW
261
262 return result;
11bd1a9e
MW
263}
264
923ae0ff
RZ
265static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
266{
ea3d7209
JK
267 int err;
268 struct inode *inode = file_inode(vma->vm_file);
269
270 sb_start_pagefault(inode->i_sb);
271 file_update_time(vma->vm_file);
272 down_read(&EXT4_I(inode)->i_mmap_sem);
ba5843f5 273 err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
ea3d7209
JK
274 up_read(&EXT4_I(inode)->i_mmap_sem);
275 sb_end_pagefault(inode->i_sb);
276
277 return err;
278}
279
280/*
281 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
282 * handler we check for races agaist truncate. Note that since we cycle through
283 * i_mmap_sem, we are sure that also any hole punching that began before we
284 * were called is finished by now and so if it included part of the file we
285 * are working on, our pte will get unmapped and the check for pte_same() in
286 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
287 * desired.
288 */
289static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
290 struct vm_fault *vmf)
291{
292 struct inode *inode = file_inode(vma->vm_file);
293 struct super_block *sb = inode->i_sb;
294 int ret = VM_FAULT_NOPAGE;
295 loff_t size;
296
297 sb_start_pagefault(sb);
298 file_update_time(vma->vm_file);
299 down_read(&EXT4_I(inode)->i_mmap_sem);
300 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
301 if (vmf->pgoff >= size)
302 ret = VM_FAULT_SIGBUS;
303 up_read(&EXT4_I(inode)->i_mmap_sem);
304 sb_end_pagefault(sb);
305
306 return ret;
923ae0ff
RZ
307}
308
309static const struct vm_operations_struct ext4_dax_vm_ops = {
310 .fault = ext4_dax_fault,
11bd1a9e 311 .pmd_fault = ext4_dax_pmd_fault,
923ae0ff 312 .page_mkwrite = ext4_dax_mkwrite,
ea3d7209 313 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
923ae0ff
RZ
314};
315#else
316#define ext4_dax_vm_ops ext4_file_vm_ops
317#endif
318
f0f37e2f 319static const struct vm_operations_struct ext4_file_vm_ops = {
ea3d7209 320 .fault = ext4_filemap_fault,
f1820361 321 .map_pages = filemap_map_pages,
2e9ee850
AK
322 .page_mkwrite = ext4_page_mkwrite,
323};
324
325static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
326{
c9c7429c
MH
327 struct inode *inode = file->f_mapping->host;
328
329 if (ext4_encrypted_inode(inode)) {
b7236e21 330 int err = ext4_get_encryption_info(inode);
c9c7429c
MH
331 if (err)
332 return 0;
abdd438b
TT
333 if (ext4_encryption_info(inode) == NULL)
334 return -ENOKEY;
c9c7429c 335 }
2e9ee850 336 file_accessed(file);
923ae0ff
RZ
337 if (IS_DAX(file_inode(file))) {
338 vma->vm_ops = &ext4_dax_vm_ops;
11bd1a9e 339 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
923ae0ff
RZ
340 } else {
341 vma->vm_ops = &ext4_file_vm_ops;
342 }
2e9ee850
AK
343 return 0;
344}
345
bc0b0d6d
TT
346static int ext4_file_open(struct inode * inode, struct file * filp)
347{
348 struct super_block *sb = inode->i_sb;
349 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
350 struct vfsmount *mnt = filp->f_path.mnt;
351 struct path path;
352 char buf[64], *cp;
c9c7429c 353 int ret;
bc0b0d6d
TT
354
355 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
356 !(sb->s_flags & MS_RDONLY))) {
357 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
358 /*
359 * Sample where the filesystem has been mounted and
360 * store it in the superblock for sysadmin convenience
361 * when trying to sort through large numbers of block
362 * devices or filesystem images.
363 */
364 memset(buf, 0, sizeof(buf));
3899167d
AV
365 path.mnt = mnt;
366 path.dentry = mnt->mnt_root;
bc0b0d6d 367 cp = d_path(&path, buf, sizeof(buf));
bc0b0d6d 368 if (!IS_ERR(cp)) {
044ce47f
JK
369 handle_t *handle;
370 int err;
371
9924a92a 372 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
044ce47f
JK
373 if (IS_ERR(handle))
374 return PTR_ERR(handle);
5d601255 375 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
044ce47f
JK
376 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
377 if (err) {
378 ext4_journal_stop(handle);
379 return err;
380 }
cf803903
DW
381 strlcpy(sbi->s_es->s_last_mounted, cp,
382 sizeof(sbi->s_es->s_last_mounted));
044ce47f
JK
383 ext4_handle_dirty_super(handle, sb);
384 ext4_journal_stop(handle);
bc0b0d6d
TT
385 }
386 }
abdd438b
TT
387 if (ext4_encrypted_inode(inode)) {
388 ret = ext4_get_encryption_info(inode);
389 if (ret)
390 return -EACCES;
391 if (ext4_encryption_info(inode) == NULL)
392 return -ENOKEY;
393 }
8aefcd55
TT
394 /*
395 * Set up the jbd2_inode if we are opening the inode for
396 * writing and the journal is present
397 */
a361293f 398 if (filp->f_mode & FMODE_WRITE) {
c9c7429c 399 ret = ext4_inode_attach_jinode(inode);
a361293f
JK
400 if (ret < 0)
401 return ret;
8aefcd55 402 }
abdd438b 403 return dquot_file_open(inode, filp);
bc0b0d6d
TT
404}
405
c8c0df24
ZL
406/*
407 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
408 * file rather than ext4_ext_walk_space() because we can introduce
409 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
410 * function. When extent status tree has been fully implemented, it will
411 * track all extent status for a file and we can directly use it to
412 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
413 */
414
415/*
416 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
417 * lookup page cache to check whether or not there has some data between
418 * [startoff, endoff] because, if this range contains an unwritten extent,
419 * we determine this extent as a data or a hole according to whether the
420 * page cache has data or not.
421 */
ad7fefb1
TT
422static int ext4_find_unwritten_pgoff(struct inode *inode,
423 int whence,
424 struct ext4_map_blocks *map,
425 loff_t *offset)
c8c0df24
ZL
426{
427 struct pagevec pvec;
ad7fefb1 428 unsigned int blkbits;
c8c0df24
ZL
429 pgoff_t index;
430 pgoff_t end;
ad7fefb1 431 loff_t endoff;
c8c0df24
ZL
432 loff_t startoff;
433 loff_t lastoff;
434 int found = 0;
435
ad7fefb1 436 blkbits = inode->i_sb->s_blocksize_bits;
c8c0df24
ZL
437 startoff = *offset;
438 lastoff = startoff;
ad7fefb1 439 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
c8c0df24
ZL
440
441 index = startoff >> PAGE_CACHE_SHIFT;
442 end = endoff >> PAGE_CACHE_SHIFT;
443
444 pagevec_init(&pvec, 0);
445 do {
446 int i, num;
447 unsigned long nr_pages;
448
449 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
450 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
451 (pgoff_t)num);
452 if (nr_pages == 0) {
965c8e59 453 if (whence == SEEK_DATA)
c8c0df24
ZL
454 break;
455
965c8e59 456 BUG_ON(whence != SEEK_HOLE);
c8c0df24
ZL
457 /*
458 * If this is the first time to go into the loop and
459 * offset is not beyond the end offset, it will be a
460 * hole at this offset
461 */
462 if (lastoff == startoff || lastoff < endoff)
463 found = 1;
464 break;
465 }
466
467 /*
468 * If this is the first time to go into the loop and
469 * offset is smaller than the first page offset, it will be a
470 * hole at this offset.
471 */
965c8e59 472 if (lastoff == startoff && whence == SEEK_HOLE &&
c8c0df24
ZL
473 lastoff < page_offset(pvec.pages[0])) {
474 found = 1;
475 break;
476 }
477
478 for (i = 0; i < nr_pages; i++) {
479 struct page *page = pvec.pages[i];
480 struct buffer_head *bh, *head;
481
482 /*
483 * If the current offset is not beyond the end of given
484 * range, it will be a hole.
485 */
965c8e59 486 if (lastoff < endoff && whence == SEEK_HOLE &&
c8c0df24
ZL
487 page->index > end) {
488 found = 1;
489 *offset = lastoff;
490 goto out;
491 }
492
493 lock_page(page);
494
495 if (unlikely(page->mapping != inode->i_mapping)) {
496 unlock_page(page);
497 continue;
498 }
499
500 if (!page_has_buffers(page)) {
501 unlock_page(page);
502 continue;
503 }
504
505 if (page_has_buffers(page)) {
506 lastoff = page_offset(page);
507 bh = head = page_buffers(page);
508 do {
509 if (buffer_uptodate(bh) ||
510 buffer_unwritten(bh)) {
965c8e59 511 if (whence == SEEK_DATA)
c8c0df24
ZL
512 found = 1;
513 } else {
965c8e59 514 if (whence == SEEK_HOLE)
c8c0df24
ZL
515 found = 1;
516 }
517 if (found) {
518 *offset = max_t(loff_t,
519 startoff, lastoff);
520 unlock_page(page);
521 goto out;
522 }
523 lastoff += bh->b_size;
524 bh = bh->b_this_page;
525 } while (bh != head);
526 }
527
528 lastoff = page_offset(page) + PAGE_SIZE;
529 unlock_page(page);
530 }
531
532 /*
533 * The no. of pages is less than our desired, that would be a
534 * hole in there.
535 */
965c8e59 536 if (nr_pages < num && whence == SEEK_HOLE) {
c8c0df24
ZL
537 found = 1;
538 *offset = lastoff;
539 break;
540 }
541
542 index = pvec.pages[i - 1]->index + 1;
543 pagevec_release(&pvec);
544 } while (index <= end);
545
546out:
547 pagevec_release(&pvec);
548 return found;
549}
550
551/*
552 * ext4_seek_data() retrieves the offset for SEEK_DATA.
553 */
554static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
555{
556 struct inode *inode = file->f_mapping->host;
ad7fefb1
TT
557 struct ext4_map_blocks map;
558 struct extent_status es;
559 ext4_lblk_t start, last, end;
560 loff_t dataoff, isize;
561 int blkbits;
562 int ret = 0;
c8c0df24
ZL
563
564 mutex_lock(&inode->i_mutex);
ad7fefb1
TT
565
566 isize = i_size_read(inode);
567 if (offset >= isize) {
c8c0df24
ZL
568 mutex_unlock(&inode->i_mutex);
569 return -ENXIO;
570 }
ad7fefb1
TT
571
572 blkbits = inode->i_sb->s_blocksize_bits;
573 start = offset >> blkbits;
574 last = start;
575 end = isize >> blkbits;
576 dataoff = offset;
577
578 do {
579 map.m_lblk = last;
580 map.m_len = end - last + 1;
581 ret = ext4_map_blocks(NULL, inode, &map, 0);
582 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
583 if (last != start)
584 dataoff = (loff_t)last << blkbits;
c8c0df24 585 break;
ad7fefb1 586 }
c8c0df24 587
ad7fefb1
TT
588 /*
589 * If there is a delay extent at this offset,
590 * it will be as a data.
591 */
592 ext4_es_find_delayed_extent_range(inode, last, last, &es);
593 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
594 if (last != start)
595 dataoff = (loff_t)last << blkbits;
c8c0df24
ZL
596 break;
597 }
598
ad7fefb1
TT
599 /*
600 * If there is a unwritten extent at this offset,
601 * it will be as a data or a hole according to page
602 * cache that has data or not.
603 */
604 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
605 int unwritten;
606 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
607 &map, &dataoff);
608 if (unwritten)
609 break;
610 }
c8c0df24 611
ad7fefb1
TT
612 last++;
613 dataoff = (loff_t)last << blkbits;
614 } while (last <= end);
c8c0df24
ZL
615
616 mutex_unlock(&inode->i_mutex);
617
ad7fefb1
TT
618 if (dataoff > isize)
619 return -ENXIO;
620
621 return vfs_setpos(file, dataoff, maxsize);
c8c0df24
ZL
622}
623
624/*
ad7fefb1 625 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
c8c0df24
ZL
626 */
627static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
628{
629 struct inode *inode = file->f_mapping->host;
ad7fefb1
TT
630 struct ext4_map_blocks map;
631 struct extent_status es;
632 ext4_lblk_t start, last, end;
633 loff_t holeoff, isize;
634 int blkbits;
635 int ret = 0;
c8c0df24
ZL
636
637 mutex_lock(&inode->i_mutex);
ad7fefb1
TT
638
639 isize = i_size_read(inode);
640 if (offset >= isize) {
c8c0df24
ZL
641 mutex_unlock(&inode->i_mutex);
642 return -ENXIO;
643 }
644
ad7fefb1
TT
645 blkbits = inode->i_sb->s_blocksize_bits;
646 start = offset >> blkbits;
647 last = start;
648 end = isize >> blkbits;
649 holeoff = offset;
c8c0df24 650
ad7fefb1
TT
651 do {
652 map.m_lblk = last;
653 map.m_len = end - last + 1;
654 ret = ext4_map_blocks(NULL, inode, &map, 0);
655 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
656 last += ret;
657 holeoff = (loff_t)last << blkbits;
658 continue;
659 }
c8c0df24 660
ad7fefb1
TT
661 /*
662 * If there is a delay extent at this offset,
663 * we will skip this extent.
664 */
665 ext4_es_find_delayed_extent_range(inode, last, last, &es);
666 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
667 last = es.es_lblk + es.es_len;
668 holeoff = (loff_t)last << blkbits;
669 continue;
670 }
14516bb7 671
ad7fefb1
TT
672 /*
673 * If there is a unwritten extent at this offset,
674 * it will be as a data or a hole according to page
675 * cache that has data or not.
676 */
677 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
678 int unwritten;
679 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
680 &map, &holeoff);
681 if (!unwritten) {
682 last += ret;
683 holeoff = (loff_t)last << blkbits;
c8c0df24
ZL
684 continue;
685 }
14516bb7 686 }
ad7fefb1
TT
687
688 /* find a hole */
689 break;
690 } while (last <= end);
691
c8c0df24
ZL
692 mutex_unlock(&inode->i_mutex);
693
ad7fefb1
TT
694 if (holeoff > isize)
695 holeoff = isize;
696
697 return vfs_setpos(file, holeoff, maxsize);
c8c0df24
ZL
698}
699
e0d10bfa 700/*
ec7268ce
ES
701 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
702 * by calling generic_file_llseek_size() with the appropriate maxbytes
703 * value for each.
e0d10bfa 704 */
965c8e59 705loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
e0d10bfa
TO
706{
707 struct inode *inode = file->f_mapping->host;
708 loff_t maxbytes;
709
710 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
711 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
712 else
713 maxbytes = inode->i_sb->s_maxbytes;
e0d10bfa 714
965c8e59 715 switch (whence) {
c8c0df24
ZL
716 case SEEK_SET:
717 case SEEK_CUR:
718 case SEEK_END:
965c8e59 719 return generic_file_llseek_size(file, offset, whence,
c8c0df24
ZL
720 maxbytes, i_size_read(inode));
721 case SEEK_DATA:
722 return ext4_seek_data(file, offset, maxbytes);
723 case SEEK_HOLE:
724 return ext4_seek_hole(file, offset, maxbytes);
725 }
726
727 return -EINVAL;
e0d10bfa
TO
728}
729
617ba13b 730const struct file_operations ext4_file_operations = {
e0d10bfa 731 .llseek = ext4_llseek,
aad4f8bb 732 .read_iter = generic_file_read_iter,
9b884164 733 .write_iter = ext4_file_write_iter,
5cdd7b2d 734 .unlocked_ioctl = ext4_ioctl,
ac27a0ec 735#ifdef CONFIG_COMPAT
617ba13b 736 .compat_ioctl = ext4_compat_ioctl,
ac27a0ec 737#endif
2e9ee850 738 .mmap = ext4_file_mmap,
bc0b0d6d 739 .open = ext4_file_open,
617ba13b
MC
740 .release = ext4_release_file,
741 .fsync = ext4_sync_file,
ac27a0ec 742 .splice_read = generic_file_splice_read,
8d020765 743 .splice_write = iter_file_splice_write,
2fe17c10 744 .fallocate = ext4_fallocate,
ac27a0ec
DK
745};
746
754661f1 747const struct inode_operations ext4_file_inode_operations = {
617ba13b 748 .setattr = ext4_setattr,
3e3398a0 749 .getattr = ext4_getattr,
ac27a0ec
DK
750 .setxattr = generic_setxattr,
751 .getxattr = generic_getxattr,
617ba13b 752 .listxattr = ext4_listxattr,
ac27a0ec 753 .removexattr = generic_removexattr,
4e34e719 754 .get_acl = ext4_get_acl,
64e178a7 755 .set_acl = ext4_set_acl,
6873fa0d 756 .fiemap = ext4_fiemap,
ac27a0ec
DK
757};
758
This page took 0.702408 seconds and 5 git commands to generate.