Merge branch 'x86-trampoline-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / fs / file_table.c
1 /*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/lglock.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/ima.h>
27
28 #include <asm/atomic.h>
29
30 #include "internal.h"
31
32 /* sysctl tunables... */
33 struct files_stat_struct files_stat = {
34 .max_files = NR_FILE
35 };
36
37 DECLARE_LGLOCK(files_lglock);
38 DEFINE_LGLOCK(files_lglock);
39
40 /* SLAB cache for file structures */
41 static struct kmem_cache *filp_cachep __read_mostly;
42
43 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
44
45 static inline void file_free_rcu(struct rcu_head *head)
46 {
47 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
48
49 put_cred(f->f_cred);
50 kmem_cache_free(filp_cachep, f);
51 }
52
53 static inline void file_free(struct file *f)
54 {
55 percpu_counter_dec(&nr_files);
56 file_check_state(f);
57 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
58 }
59
60 /*
61 * Return the total number of open files in the system
62 */
63 static long get_nr_files(void)
64 {
65 return percpu_counter_read_positive(&nr_files);
66 }
67
68 /*
69 * Return the maximum number of open files in the system
70 */
71 unsigned long get_max_files(void)
72 {
73 return files_stat.max_files;
74 }
75 EXPORT_SYMBOL_GPL(get_max_files);
76
77 /*
78 * Handle nr_files sysctl
79 */
80 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
81 int proc_nr_files(ctl_table *table, int write,
82 void __user *buffer, size_t *lenp, loff_t *ppos)
83 {
84 files_stat.nr_files = get_nr_files();
85 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
86 }
87 #else
88 int proc_nr_files(ctl_table *table, int write,
89 void __user *buffer, size_t *lenp, loff_t *ppos)
90 {
91 return -ENOSYS;
92 }
93 #endif
94
95 /* Find an unused file structure and return a pointer to it.
96 * Returns NULL, if there are no more free file structures or
97 * we run out of memory.
98 *
99 * Be very careful using this. You are responsible for
100 * getting write access to any mount that you might assign
101 * to this filp, if it is opened for write. If this is not
102 * done, you will imbalance int the mount's writer count
103 * and a warning at __fput() time.
104 */
105 struct file *get_empty_filp(void)
106 {
107 const struct cred *cred = current_cred();
108 static long old_max;
109 struct file * f;
110
111 /*
112 * Privileged users can go above max_files
113 */
114 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
115 /*
116 * percpu_counters are inaccurate. Do an expensive check before
117 * we go and fail.
118 */
119 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
120 goto over;
121 }
122
123 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
124 if (f == NULL)
125 goto fail;
126
127 percpu_counter_inc(&nr_files);
128 f->f_cred = get_cred(cred);
129 if (security_file_alloc(f))
130 goto fail_sec;
131
132 INIT_LIST_HEAD(&f->f_u.fu_list);
133 atomic_long_set(&f->f_count, 1);
134 rwlock_init(&f->f_owner.lock);
135 spin_lock_init(&f->f_lock);
136 eventpoll_init_file(f);
137 /* f->f_version: 0 */
138 return f;
139
140 over:
141 /* Ran out of filps - report that */
142 if (get_nr_files() > old_max) {
143 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
144 old_max = get_nr_files();
145 }
146 goto fail;
147
148 fail_sec:
149 file_free(f);
150 fail:
151 return NULL;
152 }
153
154 /**
155 * alloc_file - allocate and initialize a 'struct file'
156 * @mnt: the vfsmount on which the file will reside
157 * @dentry: the dentry representing the new file
158 * @mode: the mode with which the new file will be opened
159 * @fop: the 'struct file_operations' for the new file
160 *
161 * Use this instead of get_empty_filp() to get a new
162 * 'struct file'. Do so because of the same initialization
163 * pitfalls reasons listed for init_file(). This is a
164 * preferred interface to using init_file().
165 *
166 * If all the callers of init_file() are eliminated, its
167 * code should be moved into this function.
168 */
169 struct file *alloc_file(struct path *path, fmode_t mode,
170 const struct file_operations *fop)
171 {
172 struct file *file;
173
174 file = get_empty_filp();
175 if (!file)
176 return NULL;
177
178 file->f_path = *path;
179 file->f_mapping = path->dentry->d_inode->i_mapping;
180 file->f_mode = mode;
181 file->f_op = fop;
182
183 /*
184 * These mounts don't really matter in practice
185 * for r/o bind mounts. They aren't userspace-
186 * visible. We do this for consistency, and so
187 * that we can do debugging checks at __fput()
188 */
189 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
190 file_take_write(file);
191 WARN_ON(mnt_clone_write(path->mnt));
192 }
193 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
194 i_readcount_inc(path->dentry->d_inode);
195 return file;
196 }
197 EXPORT_SYMBOL(alloc_file);
198
199 /**
200 * drop_file_write_access - give up ability to write to a file
201 * @file: the file to which we will stop writing
202 *
203 * This is a central place which will give up the ability
204 * to write to @file, along with access to write through
205 * its vfsmount.
206 */
207 void drop_file_write_access(struct file *file)
208 {
209 struct vfsmount *mnt = file->f_path.mnt;
210 struct dentry *dentry = file->f_path.dentry;
211 struct inode *inode = dentry->d_inode;
212
213 put_write_access(inode);
214
215 if (special_file(inode->i_mode))
216 return;
217 if (file_check_writeable(file) != 0)
218 return;
219 mnt_drop_write(mnt);
220 file_release_write(file);
221 }
222 EXPORT_SYMBOL_GPL(drop_file_write_access);
223
224 /* the real guts of fput() - releasing the last reference to file
225 */
226 static void __fput(struct file *file)
227 {
228 struct dentry *dentry = file->f_path.dentry;
229 struct vfsmount *mnt = file->f_path.mnt;
230 struct inode *inode = dentry->d_inode;
231
232 might_sleep();
233
234 fsnotify_close(file);
235 /*
236 * The function eventpoll_release() should be the first called
237 * in the file cleanup chain.
238 */
239 eventpoll_release(file);
240 locks_remove_flock(file);
241
242 if (unlikely(file->f_flags & FASYNC)) {
243 if (file->f_op && file->f_op->fasync)
244 file->f_op->fasync(-1, file, 0);
245 }
246 if (file->f_op && file->f_op->release)
247 file->f_op->release(inode, file);
248 security_file_free(file);
249 ima_file_free(file);
250 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
251 cdev_put(inode->i_cdev);
252 fops_put(file->f_op);
253 put_pid(file->f_owner.pid);
254 file_sb_list_del(file);
255 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
256 i_readcount_dec(inode);
257 if (file->f_mode & FMODE_WRITE)
258 drop_file_write_access(file);
259 file->f_path.dentry = NULL;
260 file->f_path.mnt = NULL;
261 file_free(file);
262 dput(dentry);
263 mntput(mnt);
264 }
265
266 void fput(struct file *file)
267 {
268 if (atomic_long_dec_and_test(&file->f_count))
269 __fput(file);
270 }
271
272 EXPORT_SYMBOL(fput);
273
274 struct file *fget(unsigned int fd)
275 {
276 struct file *file;
277 struct files_struct *files = current->files;
278
279 rcu_read_lock();
280 file = fcheck_files(files, fd);
281 if (file) {
282 /* File object ref couldn't be taken */
283 if (file->f_mode & FMODE_PATH ||
284 !atomic_long_inc_not_zero(&file->f_count))
285 file = NULL;
286 }
287 rcu_read_unlock();
288
289 return file;
290 }
291
292 EXPORT_SYMBOL(fget);
293
294 struct file *fget_raw(unsigned int fd)
295 {
296 struct file *file;
297 struct files_struct *files = current->files;
298
299 rcu_read_lock();
300 file = fcheck_files(files, fd);
301 if (file) {
302 /* File object ref couldn't be taken */
303 if (!atomic_long_inc_not_zero(&file->f_count))
304 file = NULL;
305 }
306 rcu_read_unlock();
307
308 return file;
309 }
310
311 EXPORT_SYMBOL(fget_raw);
312
313 /*
314 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
315 *
316 * You can use this instead of fget if you satisfy all of the following
317 * conditions:
318 * 1) You must call fput_light before exiting the syscall and returning control
319 * to userspace (i.e. you cannot remember the returned struct file * after
320 * returning to userspace).
321 * 2) You must not call filp_close on the returned struct file * in between
322 * calls to fget_light and fput_light.
323 * 3) You must not clone the current task in between the calls to fget_light
324 * and fput_light.
325 *
326 * The fput_needed flag returned by fget_light should be passed to the
327 * corresponding fput_light.
328 */
329 struct file *fget_light(unsigned int fd, int *fput_needed)
330 {
331 struct file *file;
332 struct files_struct *files = current->files;
333
334 *fput_needed = 0;
335 if (atomic_read(&files->count) == 1) {
336 file = fcheck_files(files, fd);
337 if (file && (file->f_mode & FMODE_PATH))
338 file = NULL;
339 } else {
340 rcu_read_lock();
341 file = fcheck_files(files, fd);
342 if (file) {
343 if (!(file->f_mode & FMODE_PATH) &&
344 atomic_long_inc_not_zero(&file->f_count))
345 *fput_needed = 1;
346 else
347 /* Didn't get the reference, someone's freed */
348 file = NULL;
349 }
350 rcu_read_unlock();
351 }
352
353 return file;
354 }
355
356 struct file *fget_raw_light(unsigned int fd, int *fput_needed)
357 {
358 struct file *file;
359 struct files_struct *files = current->files;
360
361 *fput_needed = 0;
362 if (atomic_read(&files->count) == 1) {
363 file = fcheck_files(files, fd);
364 } else {
365 rcu_read_lock();
366 file = fcheck_files(files, fd);
367 if (file) {
368 if (atomic_long_inc_not_zero(&file->f_count))
369 *fput_needed = 1;
370 else
371 /* Didn't get the reference, someone's freed */
372 file = NULL;
373 }
374 rcu_read_unlock();
375 }
376
377 return file;
378 }
379
380 void put_filp(struct file *file)
381 {
382 if (atomic_long_dec_and_test(&file->f_count)) {
383 security_file_free(file);
384 file_sb_list_del(file);
385 file_free(file);
386 }
387 }
388
389 static inline int file_list_cpu(struct file *file)
390 {
391 #ifdef CONFIG_SMP
392 return file->f_sb_list_cpu;
393 #else
394 return smp_processor_id();
395 #endif
396 }
397
398 /* helper for file_sb_list_add to reduce ifdefs */
399 static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
400 {
401 struct list_head *list;
402 #ifdef CONFIG_SMP
403 int cpu;
404 cpu = smp_processor_id();
405 file->f_sb_list_cpu = cpu;
406 list = per_cpu_ptr(sb->s_files, cpu);
407 #else
408 list = &sb->s_files;
409 #endif
410 list_add(&file->f_u.fu_list, list);
411 }
412
413 /**
414 * file_sb_list_add - add a file to the sb's file list
415 * @file: file to add
416 * @sb: sb to add it to
417 *
418 * Use this function to associate a file with the superblock of the inode it
419 * refers to.
420 */
421 void file_sb_list_add(struct file *file, struct super_block *sb)
422 {
423 lg_local_lock(files_lglock);
424 __file_sb_list_add(file, sb);
425 lg_local_unlock(files_lglock);
426 }
427
428 /**
429 * file_sb_list_del - remove a file from the sb's file list
430 * @file: file to remove
431 * @sb: sb to remove it from
432 *
433 * Use this function to remove a file from its superblock.
434 */
435 void file_sb_list_del(struct file *file)
436 {
437 if (!list_empty(&file->f_u.fu_list)) {
438 lg_local_lock_cpu(files_lglock, file_list_cpu(file));
439 list_del_init(&file->f_u.fu_list);
440 lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
441 }
442 }
443
444 #ifdef CONFIG_SMP
445
446 /*
447 * These macros iterate all files on all CPUs for a given superblock.
448 * files_lglock must be held globally.
449 */
450 #define do_file_list_for_each_entry(__sb, __file) \
451 { \
452 int i; \
453 for_each_possible_cpu(i) { \
454 struct list_head *list; \
455 list = per_cpu_ptr((__sb)->s_files, i); \
456 list_for_each_entry((__file), list, f_u.fu_list)
457
458 #define while_file_list_for_each_entry \
459 } \
460 }
461
462 #else
463
464 #define do_file_list_for_each_entry(__sb, __file) \
465 { \
466 struct list_head *list; \
467 list = &(sb)->s_files; \
468 list_for_each_entry((__file), list, f_u.fu_list)
469
470 #define while_file_list_for_each_entry \
471 }
472
473 #endif
474
475 int fs_may_remount_ro(struct super_block *sb)
476 {
477 struct file *file;
478 /* Check that no files are currently opened for writing. */
479 lg_global_lock(files_lglock);
480 do_file_list_for_each_entry(sb, file) {
481 struct inode *inode = file->f_path.dentry->d_inode;
482
483 /* File with pending delete? */
484 if (inode->i_nlink == 0)
485 goto too_bad;
486
487 /* Writeable file? */
488 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
489 goto too_bad;
490 } while_file_list_for_each_entry;
491 lg_global_unlock(files_lglock);
492 return 1; /* Tis' cool bro. */
493 too_bad:
494 lg_global_unlock(files_lglock);
495 return 0;
496 }
497
498 /**
499 * mark_files_ro - mark all files read-only
500 * @sb: superblock in question
501 *
502 * All files are marked read-only. We don't care about pending
503 * delete files so this should be used in 'force' mode only.
504 */
505 void mark_files_ro(struct super_block *sb)
506 {
507 struct file *f;
508
509 retry:
510 lg_global_lock(files_lglock);
511 do_file_list_for_each_entry(sb, f) {
512 struct vfsmount *mnt;
513 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
514 continue;
515 if (!file_count(f))
516 continue;
517 if (!(f->f_mode & FMODE_WRITE))
518 continue;
519 spin_lock(&f->f_lock);
520 f->f_mode &= ~FMODE_WRITE;
521 spin_unlock(&f->f_lock);
522 if (file_check_writeable(f) != 0)
523 continue;
524 file_release_write(f);
525 mnt = mntget(f->f_path.mnt);
526 /* This can sleep, so we can't hold the spinlock. */
527 lg_global_unlock(files_lglock);
528 mnt_drop_write(mnt);
529 mntput(mnt);
530 goto retry;
531 } while_file_list_for_each_entry;
532 lg_global_unlock(files_lglock);
533 }
534
535 void __init files_init(unsigned long mempages)
536 {
537 unsigned long n;
538
539 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
540 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
541
542 /*
543 * One file with associated inode and dcache is very roughly 1K.
544 * Per default don't use more than 10% of our memory for files.
545 */
546
547 n = (mempages * (PAGE_SIZE / 1024)) / 10;
548 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
549 files_defer_init();
550 lg_lock_init(files_lglock);
551 percpu_counter_init(&nr_files, 0);
552 }
This page took 0.048873 seconds and 6 git commands to generate.