Fix: rseq: arm branch to failure
[deliverable/linux.git] / fs / ext2 / file.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/ext2/file.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * ext2 fs regular file handling primitives
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
48bde86d 22#include <linux/pagemap.h>
c94c2acf 23#include <linux/dax.h>
871a2931 24#include <linux/quotaops.h>
1da177e4
LT
25#include "ext2.h"
26#include "xattr.h"
27#include "acl.h"
28
6cd176a5 29#ifdef CONFIG_FS_DAX
5726b27b
RZ
30/*
31 * The lock ordering for ext2 DAX fault paths is:
32 *
33 * mmap_sem (MM)
34 * sb_start_pagefault (vfs, freeze)
35 * ext2_inode_info->dax_sem
36 * address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
37 * ext2_inode_info->truncate_mutex
38 *
39 * The default page_lock and i_size verification done by non-DAX fault paths
40 * is sufficient because ext2 doesn't support hole punching.
41 */
f7ca90b1
MW
42static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43{
5726b27b
RZ
44 struct inode *inode = file_inode(vma->vm_file);
45 struct ext2_inode_info *ei = EXT2_I(inode);
46 int ret;
47
48 if (vmf->flags & FAULT_FLAG_WRITE) {
49 sb_start_pagefault(inode->i_sb);
50 file_update_time(vma->vm_file);
51 }
52 down_read(&ei->dax_sem);
53
6b524995 54 ret = dax_fault(vma, vmf, ext2_get_block);
5726b27b
RZ
55
56 up_read(&ei->dax_sem);
57 if (vmf->flags & FAULT_FLAG_WRITE)
58 sb_end_pagefault(inode->i_sb);
59 return ret;
f7ca90b1
MW
60}
61
e7b1ea2a
MW
62static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
63 pmd_t *pmd, unsigned int flags)
64{
5726b27b
RZ
65 struct inode *inode = file_inode(vma->vm_file);
66 struct ext2_inode_info *ei = EXT2_I(inode);
67 int ret;
68
69 if (flags & FAULT_FLAG_WRITE) {
70 sb_start_pagefault(inode->i_sb);
71 file_update_time(vma->vm_file);
72 }
73 down_read(&ei->dax_sem);
74
6b524995 75 ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
5726b27b
RZ
76
77 up_read(&ei->dax_sem);
78 if (flags & FAULT_FLAG_WRITE)
79 sb_end_pagefault(inode->i_sb);
80 return ret;
e7b1ea2a
MW
81}
82
5726b27b
RZ
83static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
84 struct vm_fault *vmf)
85{
86 struct inode *inode = file_inode(vma->vm_file);
87 struct ext2_inode_info *ei = EXT2_I(inode);
5726b27b 88 loff_t size;
80b4adca 89 int ret;
5726b27b
RZ
90
91 sb_start_pagefault(inode->i_sb);
92 file_update_time(vma->vm_file);
93 down_read(&ei->dax_sem);
94
95 /* check that the faulting page hasn't raced with truncate */
96 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
97 if (vmf->pgoff >= size)
98 ret = VM_FAULT_SIGBUS;
80b4adca
RZ
99 else
100 ret = dax_pfn_mkwrite(vma, vmf);
5726b27b
RZ
101
102 up_read(&ei->dax_sem);
103 sb_end_pagefault(inode->i_sb);
104 return ret;
f7ca90b1
MW
105}
106
107static const struct vm_operations_struct ext2_dax_vm_ops = {
108 .fault = ext2_dax_fault,
e7b1ea2a 109 .pmd_fault = ext2_dax_pmd_fault,
1e9d180b 110 .page_mkwrite = ext2_dax_fault,
5726b27b 111 .pfn_mkwrite = ext2_dax_pfn_mkwrite,
f7ca90b1
MW
112};
113
114static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
115{
116 if (!IS_DAX(file_inode(file)))
117 return generic_file_mmap(file, vma);
118
119 file_accessed(file);
120 vma->vm_ops = &ext2_dax_vm_ops;
e7b1ea2a 121 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
f7ca90b1
MW
122 return 0;
123}
124#else
125#define ext2_file_mmap generic_file_mmap
126#endif
127
1da177e4 128/*
a6739af8
JK
129 * Called when filp is released. This happens when all file descriptors
130 * for a single struct file are closed. Note that different open() calls
131 * for the same file yield different struct file structures.
1da177e4
LT
132 */
133static int ext2_release_file (struct inode * inode, struct file * filp)
134{
a686cd89
MB
135 if (filp->f_mode & FMODE_WRITE) {
136 mutex_lock(&EXT2_I(inode)->truncate_mutex);
137 ext2_discard_reservation(inode);
138 mutex_unlock(&EXT2_I(inode)->truncate_mutex);
139 }
1da177e4
LT
140 return 0;
141}
142
02c24a82 143int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
48bde86d
JK
144{
145 int ret;
7ea80859 146 struct super_block *sb = file->f_mapping->host->i_sb;
48bde86d
JK
147 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
148
02c24a82 149 ret = generic_file_fsync(file, start, end, datasync);
48bde86d
JK
150 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
151 /* We don't really know where the IO error happened... */
152 ext2_error(sb, __func__,
153 "detected IO error when writing metadata buffers");
154 ret = -EIO;
155 }
156 return ret;
157}
158
1da177e4
LT
159/*
160 * We have mostly NULL's here: the current defaults are ok for
161 * the ext2 filesystem.
162 */
4b6f5d20 163const struct file_operations ext2_file_operations = {
1da177e4 164 .llseek = generic_file_llseek,
aad4f8bb 165 .read_iter = generic_file_read_iter,
8174202b 166 .write_iter = generic_file_write_iter,
14f9f7b2 167 .unlocked_ioctl = ext2_ioctl,
e322ff07
DH
168#ifdef CONFIG_COMPAT
169 .compat_ioctl = ext2_compat_ioctl,
170#endif
f7ca90b1 171 .mmap = ext2_file_mmap,
907f4554 172 .open = dquot_file_open,
1da177e4 173 .release = ext2_release_file,
48bde86d 174 .fsync = ext2_fsync,
5274f052 175 .splice_read = generic_file_splice_read,
8d020765 176 .splice_write = iter_file_splice_write,
1da177e4
LT
177};
178
754661f1 179const struct inode_operations ext2_file_inode_operations = {
1da177e4
LT
180#ifdef CONFIG_EXT2_FS_XATTR
181 .setxattr = generic_setxattr,
182 .getxattr = generic_getxattr,
183 .listxattr = ext2_listxattr,
184 .removexattr = generic_removexattr,
185#endif
186 .setattr = ext2_setattr,
4e34e719 187 .get_acl = ext2_get_acl,
64e178a7 188 .set_acl = ext2_set_acl,
68c9d702 189 .fiemap = ext2_fiemap,
1da177e4 190};
This page took 0.764973 seconds and 5 git commands to generate.