701985e4ccda4fc5afc05977f511bd6ca11ba3af
[deliverable/linux.git] / fs / file_table.c
1 /*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/lglock.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/hardirq.h>
27 #include <linux/task_work.h>
28 #include <linux/ima.h>
29
30 #include <linux/atomic.h>
31
32 #include "internal.h"
33
34 /* sysctl tunables... */
35 struct files_stat_struct files_stat = {
36 .max_files = NR_FILE
37 };
38
39 DEFINE_LGLOCK(files_lglock);
40
41 /* SLAB cache for file structures */
42 static struct kmem_cache *filp_cachep __read_mostly;
43
44 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45
46 static void file_free_rcu(struct rcu_head *head)
47 {
48 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
49
50 put_cred(f->f_cred);
51 kmem_cache_free(filp_cachep, f);
52 }
53
54 static inline void file_free(struct file *f)
55 {
56 percpu_counter_dec(&nr_files);
57 file_check_state(f);
58 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
59 }
60
61 /*
62 * Return the total number of open files in the system
63 */
64 static long get_nr_files(void)
65 {
66 return percpu_counter_read_positive(&nr_files);
67 }
68
69 /*
70 * Return the maximum number of open files in the system
71 */
72 unsigned long get_max_files(void)
73 {
74 return files_stat.max_files;
75 }
76 EXPORT_SYMBOL_GPL(get_max_files);
77
78 /*
79 * Handle nr_files sysctl
80 */
81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
82 int proc_nr_files(ctl_table *table, int write,
83 void __user *buffer, size_t *lenp, loff_t *ppos)
84 {
85 files_stat.nr_files = get_nr_files();
86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
87 }
88 #else
89 int proc_nr_files(ctl_table *table, int write,
90 void __user *buffer, size_t *lenp, loff_t *ppos)
91 {
92 return -ENOSYS;
93 }
94 #endif
95
96 /* Find an unused file structure and return a pointer to it.
97 * Returns NULL, if there are no more free file structures or
98 * we run out of memory.
99 *
100 * Be very careful using this. You are responsible for
101 * getting write access to any mount that you might assign
102 * to this filp, if it is opened for write. If this is not
103 * done, you will imbalance int the mount's writer count
104 * and a warning at __fput() time.
105 */
106 struct file *get_empty_filp(void)
107 {
108 const struct cred *cred = current_cred();
109 static long old_max;
110 struct file * f;
111
112 /*
113 * Privileged users can go above max_files
114 */
115 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
116 /*
117 * percpu_counters are inaccurate. Do an expensive check before
118 * we go and fail.
119 */
120 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
121 goto over;
122 }
123
124 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
125 if (f == NULL)
126 goto fail;
127
128 percpu_counter_inc(&nr_files);
129 f->f_cred = get_cred(cred);
130 if (security_file_alloc(f))
131 goto fail_sec;
132
133 INIT_LIST_HEAD(&f->f_u.fu_list);
134 atomic_long_set(&f->f_count, 1);
135 rwlock_init(&f->f_owner.lock);
136 spin_lock_init(&f->f_lock);
137 eventpoll_init_file(f);
138 /* f->f_version: 0 */
139 return f;
140
141 over:
142 /* Ran out of filps - report that */
143 if (get_nr_files() > old_max) {
144 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
145 old_max = get_nr_files();
146 }
147 goto fail;
148
149 fail_sec:
150 file_free(f);
151 fail:
152 return NULL;
153 }
154
155 /**
156 * alloc_file - allocate and initialize a 'struct file'
157 * @mnt: the vfsmount on which the file will reside
158 * @dentry: the dentry representing the new file
159 * @mode: the mode with which the new file will be opened
160 * @fop: the 'struct file_operations' for the new file
161 *
162 * Use this instead of get_empty_filp() to get a new
163 * 'struct file'. Do so because of the same initialization
164 * pitfalls reasons listed for init_file(). This is a
165 * preferred interface to using init_file().
166 *
167 * If all the callers of init_file() are eliminated, its
168 * code should be moved into this function.
169 */
170 struct file *alloc_file(struct path *path, fmode_t mode,
171 const struct file_operations *fop)
172 {
173 struct file *file;
174
175 file = get_empty_filp();
176 if (!file)
177 return NULL;
178
179 file->f_path = *path;
180 file->f_mapping = path->dentry->d_inode->i_mapping;
181 file->f_mode = mode;
182 file->f_op = fop;
183
184 /*
185 * These mounts don't really matter in practice
186 * for r/o bind mounts. They aren't userspace-
187 * visible. We do this for consistency, and so
188 * that we can do debugging checks at __fput()
189 */
190 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
191 file_take_write(file);
192 WARN_ON(mnt_clone_write(path->mnt));
193 }
194 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
195 i_readcount_inc(path->dentry->d_inode);
196 return file;
197 }
198 EXPORT_SYMBOL(alloc_file);
199
200 /**
201 * drop_file_write_access - give up ability to write to a file
202 * @file: the file to which we will stop writing
203 *
204 * This is a central place which will give up the ability
205 * to write to @file, along with access to write through
206 * its vfsmount.
207 */
208 static void drop_file_write_access(struct file *file)
209 {
210 struct vfsmount *mnt = file->f_path.mnt;
211 struct dentry *dentry = file->f_path.dentry;
212 struct inode *inode = dentry->d_inode;
213
214 put_write_access(inode);
215
216 if (special_file(inode->i_mode))
217 return;
218 if (file_check_writeable(file) != 0)
219 return;
220 __mnt_drop_write(mnt);
221 file_release_write(file);
222 }
223
224 /* the real guts of fput() - releasing the last reference to file
225 */
226 static void __fput(struct file *file)
227 {
228 struct dentry *dentry = file->f_path.dentry;
229 struct vfsmount *mnt = file->f_path.mnt;
230 struct inode *inode = dentry->d_inode;
231
232 might_sleep();
233
234 fsnotify_close(file);
235 /*
236 * The function eventpoll_release() should be the first called
237 * in the file cleanup chain.
238 */
239 eventpoll_release(file);
240 locks_remove_flock(file);
241
242 if (unlikely(file->f_flags & FASYNC)) {
243 if (file->f_op && file->f_op->fasync)
244 file->f_op->fasync(-1, file, 0);
245 }
246 if (file->f_op && file->f_op->release)
247 file->f_op->release(inode, file);
248 security_file_free(file);
249 ima_file_free(file);
250 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
251 !(file->f_mode & FMODE_PATH))) {
252 cdev_put(inode->i_cdev);
253 }
254 fops_put(file->f_op);
255 put_pid(file->f_owner.pid);
256 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
257 i_readcount_dec(inode);
258 if (file->f_mode & FMODE_WRITE)
259 drop_file_write_access(file);
260 file->f_path.dentry = NULL;
261 file->f_path.mnt = NULL;
262 file_free(file);
263 dput(dentry);
264 mntput(mnt);
265 }
266
267 static DEFINE_SPINLOCK(delayed_fput_lock);
268 static LIST_HEAD(delayed_fput_list);
269 static void delayed_fput(struct work_struct *unused)
270 {
271 LIST_HEAD(head);
272 spin_lock_irq(&delayed_fput_lock);
273 list_splice_init(&delayed_fput_list, &head);
274 spin_unlock_irq(&delayed_fput_lock);
275 while (!list_empty(&head)) {
276 struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
277 list_del_init(&f->f_u.fu_list);
278 __fput(f);
279 }
280 }
281
282 static void ____fput(struct callback_head *work)
283 {
284 __fput(container_of(work, struct file, f_u.fu_rcuhead));
285 }
286
287 /*
288 * If kernel thread really needs to have the final fput() it has done
289 * to complete, call this. The only user right now is the boot - we
290 * *do* need to make sure our writes to binaries on initramfs has
291 * not left us with opened struct file waiting for __fput() - execve()
292 * won't work without that. Please, don't add more callers without
293 * very good reasons; in particular, never call that with locks
294 * held and never call that from a thread that might need to do
295 * some work on any kind of umount.
296 */
297 void flush_delayed_fput(void)
298 {
299 delayed_fput(NULL);
300 }
301
302 static DECLARE_WORK(delayed_fput_work, delayed_fput);
303
304 void fput(struct file *file)
305 {
306 if (atomic_long_dec_and_test(&file->f_count)) {
307 struct task_struct *task = current;
308 file_sb_list_del(file);
309 if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
310 unsigned long flags;
311 spin_lock_irqsave(&delayed_fput_lock, flags);
312 list_add(&file->f_u.fu_list, &delayed_fput_list);
313 schedule_work(&delayed_fput_work);
314 spin_unlock_irqrestore(&delayed_fput_lock, flags);
315 return;
316 }
317 init_task_work(&file->f_u.fu_rcuhead, ____fput);
318 task_work_add(task, &file->f_u.fu_rcuhead, true);
319 }
320 }
321
322 /*
323 * synchronous analog of fput(); for kernel threads that might be needed
324 * in some umount() (and thus can't use flush_delayed_fput() without
325 * risking deadlocks), need to wait for completion of __fput() and know
326 * for this specific struct file it won't involve anything that would
327 * need them. Use only if you really need it - at the very least,
328 * don't blindly convert fput() by kernel thread to that.
329 */
330 void __fput_sync(struct file *file)
331 {
332 if (atomic_long_dec_and_test(&file->f_count)) {
333 struct task_struct *task = current;
334 file_sb_list_del(file);
335 BUG_ON(!(task->flags & PF_KTHREAD));
336 __fput(file);
337 }
338 }
339
340 EXPORT_SYMBOL(fput);
341
342 struct file *fget(unsigned int fd)
343 {
344 struct file *file;
345 struct files_struct *files = current->files;
346
347 rcu_read_lock();
348 file = fcheck_files(files, fd);
349 if (file) {
350 /* File object ref couldn't be taken */
351 if (file->f_mode & FMODE_PATH ||
352 !atomic_long_inc_not_zero(&file->f_count))
353 file = NULL;
354 }
355 rcu_read_unlock();
356
357 return file;
358 }
359
360 EXPORT_SYMBOL(fget);
361
362 struct file *fget_raw(unsigned int fd)
363 {
364 struct file *file;
365 struct files_struct *files = current->files;
366
367 rcu_read_lock();
368 file = fcheck_files(files, fd);
369 if (file) {
370 /* File object ref couldn't be taken */
371 if (!atomic_long_inc_not_zero(&file->f_count))
372 file = NULL;
373 }
374 rcu_read_unlock();
375
376 return file;
377 }
378
379 EXPORT_SYMBOL(fget_raw);
380
381 /*
382 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
383 *
384 * You can use this instead of fget if you satisfy all of the following
385 * conditions:
386 * 1) You must call fput_light before exiting the syscall and returning control
387 * to userspace (i.e. you cannot remember the returned struct file * after
388 * returning to userspace).
389 * 2) You must not call filp_close on the returned struct file * in between
390 * calls to fget_light and fput_light.
391 * 3) You must not clone the current task in between the calls to fget_light
392 * and fput_light.
393 *
394 * The fput_needed flag returned by fget_light should be passed to the
395 * corresponding fput_light.
396 */
397 struct file *fget_light(unsigned int fd, int *fput_needed)
398 {
399 struct file *file;
400 struct files_struct *files = current->files;
401
402 *fput_needed = 0;
403 if (atomic_read(&files->count) == 1) {
404 file = fcheck_files(files, fd);
405 if (file && (file->f_mode & FMODE_PATH))
406 file = NULL;
407 } else {
408 rcu_read_lock();
409 file = fcheck_files(files, fd);
410 if (file) {
411 if (!(file->f_mode & FMODE_PATH) &&
412 atomic_long_inc_not_zero(&file->f_count))
413 *fput_needed = 1;
414 else
415 /* Didn't get the reference, someone's freed */
416 file = NULL;
417 }
418 rcu_read_unlock();
419 }
420
421 return file;
422 }
423
424 struct file *fget_raw_light(unsigned int fd, int *fput_needed)
425 {
426 struct file *file;
427 struct files_struct *files = current->files;
428
429 *fput_needed = 0;
430 if (atomic_read(&files->count) == 1) {
431 file = fcheck_files(files, fd);
432 } else {
433 rcu_read_lock();
434 file = fcheck_files(files, fd);
435 if (file) {
436 if (atomic_long_inc_not_zero(&file->f_count))
437 *fput_needed = 1;
438 else
439 /* Didn't get the reference, someone's freed */
440 file = NULL;
441 }
442 rcu_read_unlock();
443 }
444
445 return file;
446 }
447
448 void put_filp(struct file *file)
449 {
450 if (atomic_long_dec_and_test(&file->f_count)) {
451 security_file_free(file);
452 file_sb_list_del(file);
453 file_free(file);
454 }
455 }
456
457 static inline int file_list_cpu(struct file *file)
458 {
459 #ifdef CONFIG_SMP
460 return file->f_sb_list_cpu;
461 #else
462 return smp_processor_id();
463 #endif
464 }
465
466 /* helper for file_sb_list_add to reduce ifdefs */
467 static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
468 {
469 struct list_head *list;
470 #ifdef CONFIG_SMP
471 int cpu;
472 cpu = smp_processor_id();
473 file->f_sb_list_cpu = cpu;
474 list = per_cpu_ptr(sb->s_files, cpu);
475 #else
476 list = &sb->s_files;
477 #endif
478 list_add(&file->f_u.fu_list, list);
479 }
480
481 /**
482 * file_sb_list_add - add a file to the sb's file list
483 * @file: file to add
484 * @sb: sb to add it to
485 *
486 * Use this function to associate a file with the superblock of the inode it
487 * refers to.
488 */
489 void file_sb_list_add(struct file *file, struct super_block *sb)
490 {
491 lg_local_lock(&files_lglock);
492 __file_sb_list_add(file, sb);
493 lg_local_unlock(&files_lglock);
494 }
495
496 /**
497 * file_sb_list_del - remove a file from the sb's file list
498 * @file: file to remove
499 * @sb: sb to remove it from
500 *
501 * Use this function to remove a file from its superblock.
502 */
503 void file_sb_list_del(struct file *file)
504 {
505 if (!list_empty(&file->f_u.fu_list)) {
506 lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
507 list_del_init(&file->f_u.fu_list);
508 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
509 }
510 }
511
512 #ifdef CONFIG_SMP
513
514 /*
515 * These macros iterate all files on all CPUs for a given superblock.
516 * files_lglock must be held globally.
517 */
518 #define do_file_list_for_each_entry(__sb, __file) \
519 { \
520 int i; \
521 for_each_possible_cpu(i) { \
522 struct list_head *list; \
523 list = per_cpu_ptr((__sb)->s_files, i); \
524 list_for_each_entry((__file), list, f_u.fu_list)
525
526 #define while_file_list_for_each_entry \
527 } \
528 }
529
530 #else
531
532 #define do_file_list_for_each_entry(__sb, __file) \
533 { \
534 struct list_head *list; \
535 list = &(sb)->s_files; \
536 list_for_each_entry((__file), list, f_u.fu_list)
537
538 #define while_file_list_for_each_entry \
539 }
540
541 #endif
542
543 /**
544 * mark_files_ro - mark all files read-only
545 * @sb: superblock in question
546 *
547 * All files are marked read-only. We don't care about pending
548 * delete files so this should be used in 'force' mode only.
549 */
550 void mark_files_ro(struct super_block *sb)
551 {
552 struct file *f;
553
554 lg_global_lock(&files_lglock);
555 do_file_list_for_each_entry(sb, f) {
556 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
557 continue;
558 if (!file_count(f))
559 continue;
560 if (!(f->f_mode & FMODE_WRITE))
561 continue;
562 spin_lock(&f->f_lock);
563 f->f_mode &= ~FMODE_WRITE;
564 spin_unlock(&f->f_lock);
565 if (file_check_writeable(f) != 0)
566 continue;
567 file_release_write(f);
568 mnt_drop_write_file(f);
569 } while_file_list_for_each_entry;
570 lg_global_unlock(&files_lglock);
571 }
572
573 void __init files_init(unsigned long mempages)
574 {
575 unsigned long n;
576
577 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
578 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
579
580 /*
581 * One file with associated inode and dcache is very roughly 1K.
582 * Per default don't use more than 10% of our memory for files.
583 */
584
585 n = (mempages * (PAGE_SIZE / 1024)) / 10;
586 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
587 files_defer_init();
588 lg_lock_init(&files_lglock, "files_lglock");
589 percpu_counter_init(&nr_files, 0);
590 }
This page took 0.041043 seconds and 4 git commands to generate.