| 1 | /* |
| 2 | * linux/fs/file_table.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) |
| 6 | */ |
| 7 | |
| 8 | #include <linux/string.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/file.h> |
| 11 | #include <linux/fdtable.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/fs.h> |
| 15 | #include <linux/security.h> |
| 16 | #include <linux/eventpoll.h> |
| 17 | #include <linux/rcupdate.h> |
| 18 | #include <linux/mount.h> |
| 19 | #include <linux/capability.h> |
| 20 | #include <linux/cdev.h> |
| 21 | #include <linux/fsnotify.h> |
| 22 | #include <linux/sysctl.h> |
| 23 | #include <linux/lglock.h> |
| 24 | #include <linux/percpu_counter.h> |
| 25 | #include <linux/percpu.h> |
| 26 | #include <linux/ima.h> |
| 27 | |
| 28 | #include <asm/atomic.h> |
| 29 | |
| 30 | #include "internal.h" |
| 31 | |
| 32 | /* sysctl tunables... */ |
| 33 | struct files_stat_struct files_stat = { |
| 34 | .max_files = NR_FILE |
| 35 | }; |
| 36 | |
| 37 | DECLARE_LGLOCK(files_lglock); |
| 38 | DEFINE_LGLOCK(files_lglock); |
| 39 | |
| 40 | /* SLAB cache for file structures */ |
| 41 | static struct kmem_cache *filp_cachep __read_mostly; |
| 42 | |
| 43 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; |
| 44 | |
| 45 | static inline void file_free_rcu(struct rcu_head *head) |
| 46 | { |
| 47 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); |
| 48 | |
| 49 | put_cred(f->f_cred); |
| 50 | kmem_cache_free(filp_cachep, f); |
| 51 | } |
| 52 | |
| 53 | static inline void file_free(struct file *f) |
| 54 | { |
| 55 | percpu_counter_dec(&nr_files); |
| 56 | file_check_state(f); |
| 57 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); |
| 58 | } |
| 59 | |
| 60 | /* |
| 61 | * Return the total number of open files in the system |
| 62 | */ |
| 63 | static long get_nr_files(void) |
| 64 | { |
| 65 | return percpu_counter_read_positive(&nr_files); |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * Return the maximum number of open files in the system |
| 70 | */ |
| 71 | unsigned long get_max_files(void) |
| 72 | { |
| 73 | return files_stat.max_files; |
| 74 | } |
| 75 | EXPORT_SYMBOL_GPL(get_max_files); |
| 76 | |
| 77 | /* |
| 78 | * Handle nr_files sysctl |
| 79 | */ |
| 80 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
| 81 | int proc_nr_files(ctl_table *table, int write, |
| 82 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 83 | { |
| 84 | files_stat.nr_files = get_nr_files(); |
| 85 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
| 86 | } |
| 87 | #else |
| 88 | int proc_nr_files(ctl_table *table, int write, |
| 89 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 90 | { |
| 91 | return -ENOSYS; |
| 92 | } |
| 93 | #endif |
| 94 | |
| 95 | /* Find an unused file structure and return a pointer to it. |
| 96 | * Returns NULL, if there are no more free file structures or |
| 97 | * we run out of memory. |
| 98 | * |
| 99 | * Be very careful using this. You are responsible for |
| 100 | * getting write access to any mount that you might assign |
| 101 | * to this filp, if it is opened for write. If this is not |
| 102 | * done, you will imbalance int the mount's writer count |
| 103 | * and a warning at __fput() time. |
| 104 | */ |
| 105 | struct file *get_empty_filp(void) |
| 106 | { |
| 107 | const struct cred *cred = current_cred(); |
| 108 | static long old_max; |
| 109 | struct file * f; |
| 110 | |
| 111 | /* |
| 112 | * Privileged users can go above max_files |
| 113 | */ |
| 114 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { |
| 115 | /* |
| 116 | * percpu_counters are inaccurate. Do an expensive check before |
| 117 | * we go and fail. |
| 118 | */ |
| 119 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) |
| 120 | goto over; |
| 121 | } |
| 122 | |
| 123 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); |
| 124 | if (f == NULL) |
| 125 | goto fail; |
| 126 | |
| 127 | percpu_counter_inc(&nr_files); |
| 128 | f->f_cred = get_cred(cred); |
| 129 | if (security_file_alloc(f)) |
| 130 | goto fail_sec; |
| 131 | |
| 132 | INIT_LIST_HEAD(&f->f_u.fu_list); |
| 133 | atomic_long_set(&f->f_count, 1); |
| 134 | rwlock_init(&f->f_owner.lock); |
| 135 | spin_lock_init(&f->f_lock); |
| 136 | eventpoll_init_file(f); |
| 137 | /* f->f_version: 0 */ |
| 138 | return f; |
| 139 | |
| 140 | over: |
| 141 | /* Ran out of filps - report that */ |
| 142 | if (get_nr_files() > old_max) { |
| 143 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); |
| 144 | old_max = get_nr_files(); |
| 145 | } |
| 146 | goto fail; |
| 147 | |
| 148 | fail_sec: |
| 149 | file_free(f); |
| 150 | fail: |
| 151 | return NULL; |
| 152 | } |
| 153 | |
| 154 | /** |
| 155 | * alloc_file - allocate and initialize a 'struct file' |
| 156 | * @mnt: the vfsmount on which the file will reside |
| 157 | * @dentry: the dentry representing the new file |
| 158 | * @mode: the mode with which the new file will be opened |
| 159 | * @fop: the 'struct file_operations' for the new file |
| 160 | * |
| 161 | * Use this instead of get_empty_filp() to get a new |
| 162 | * 'struct file'. Do so because of the same initialization |
| 163 | * pitfalls reasons listed for init_file(). This is a |
| 164 | * preferred interface to using init_file(). |
| 165 | * |
| 166 | * If all the callers of init_file() are eliminated, its |
| 167 | * code should be moved into this function. |
| 168 | */ |
| 169 | struct file *alloc_file(struct path *path, fmode_t mode, |
| 170 | const struct file_operations *fop) |
| 171 | { |
| 172 | struct file *file; |
| 173 | |
| 174 | file = get_empty_filp(); |
| 175 | if (!file) |
| 176 | return NULL; |
| 177 | |
| 178 | file->f_path = *path; |
| 179 | file->f_mapping = path->dentry->d_inode->i_mapping; |
| 180 | file->f_mode = mode; |
| 181 | file->f_op = fop; |
| 182 | |
| 183 | /* |
| 184 | * These mounts don't really matter in practice |
| 185 | * for r/o bind mounts. They aren't userspace- |
| 186 | * visible. We do this for consistency, and so |
| 187 | * that we can do debugging checks at __fput() |
| 188 | */ |
| 189 | if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { |
| 190 | file_take_write(file); |
| 191 | WARN_ON(mnt_clone_write(path->mnt)); |
| 192 | } |
| 193 | ima_counts_get(file); |
| 194 | return file; |
| 195 | } |
| 196 | EXPORT_SYMBOL(alloc_file); |
| 197 | |
| 198 | /** |
| 199 | * drop_file_write_access - give up ability to write to a file |
| 200 | * @file: the file to which we will stop writing |
| 201 | * |
| 202 | * This is a central place which will give up the ability |
| 203 | * to write to @file, along with access to write through |
| 204 | * its vfsmount. |
| 205 | */ |
| 206 | void drop_file_write_access(struct file *file) |
| 207 | { |
| 208 | struct vfsmount *mnt = file->f_path.mnt; |
| 209 | struct dentry *dentry = file->f_path.dentry; |
| 210 | struct inode *inode = dentry->d_inode; |
| 211 | |
| 212 | put_write_access(inode); |
| 213 | |
| 214 | if (special_file(inode->i_mode)) |
| 215 | return; |
| 216 | if (file_check_writeable(file) != 0) |
| 217 | return; |
| 218 | mnt_drop_write(mnt); |
| 219 | file_release_write(file); |
| 220 | } |
| 221 | EXPORT_SYMBOL_GPL(drop_file_write_access); |
| 222 | |
| 223 | /* the real guts of fput() - releasing the last reference to file |
| 224 | */ |
| 225 | static void __fput(struct file *file) |
| 226 | { |
| 227 | struct dentry *dentry = file->f_path.dentry; |
| 228 | struct vfsmount *mnt = file->f_path.mnt; |
| 229 | struct inode *inode = dentry->d_inode; |
| 230 | |
| 231 | might_sleep(); |
| 232 | |
| 233 | fsnotify_close(file); |
| 234 | /* |
| 235 | * The function eventpoll_release() should be the first called |
| 236 | * in the file cleanup chain. |
| 237 | */ |
| 238 | eventpoll_release(file); |
| 239 | locks_remove_flock(file); |
| 240 | |
| 241 | if (unlikely(file->f_flags & FASYNC)) { |
| 242 | if (file->f_op && file->f_op->fasync) |
| 243 | file->f_op->fasync(-1, file, 0); |
| 244 | } |
| 245 | if (file->f_op && file->f_op->release) |
| 246 | file->f_op->release(inode, file); |
| 247 | security_file_free(file); |
| 248 | ima_file_free(file); |
| 249 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) |
| 250 | cdev_put(inode->i_cdev); |
| 251 | fops_put(file->f_op); |
| 252 | put_pid(file->f_owner.pid); |
| 253 | file_sb_list_del(file); |
| 254 | if (file->f_mode & FMODE_WRITE) |
| 255 | drop_file_write_access(file); |
| 256 | file->f_path.dentry = NULL; |
| 257 | file->f_path.mnt = NULL; |
| 258 | file_free(file); |
| 259 | dput(dentry); |
| 260 | mntput(mnt); |
| 261 | } |
| 262 | |
| 263 | void fput(struct file *file) |
| 264 | { |
| 265 | if (atomic_long_dec_and_test(&file->f_count)) |
| 266 | __fput(file); |
| 267 | } |
| 268 | |
| 269 | EXPORT_SYMBOL(fput); |
| 270 | |
| 271 | struct file *fget(unsigned int fd) |
| 272 | { |
| 273 | struct file *file; |
| 274 | struct files_struct *files = current->files; |
| 275 | |
| 276 | rcu_read_lock(); |
| 277 | file = fcheck_files(files, fd); |
| 278 | if (file) { |
| 279 | /* File object ref couldn't be taken */ |
| 280 | if (file->f_mode & FMODE_PATH || |
| 281 | !atomic_long_inc_not_zero(&file->f_count)) |
| 282 | file = NULL; |
| 283 | } |
| 284 | rcu_read_unlock(); |
| 285 | |
| 286 | return file; |
| 287 | } |
| 288 | |
| 289 | EXPORT_SYMBOL(fget); |
| 290 | |
| 291 | struct file *fget_raw(unsigned int fd) |
| 292 | { |
| 293 | struct file *file; |
| 294 | struct files_struct *files = current->files; |
| 295 | |
| 296 | rcu_read_lock(); |
| 297 | file = fcheck_files(files, fd); |
| 298 | if (file) { |
| 299 | /* File object ref couldn't be taken */ |
| 300 | if (!atomic_long_inc_not_zero(&file->f_count)) |
| 301 | file = NULL; |
| 302 | } |
| 303 | rcu_read_unlock(); |
| 304 | |
| 305 | return file; |
| 306 | } |
| 307 | |
| 308 | EXPORT_SYMBOL(fget_raw); |
| 309 | |
| 310 | /* |
| 311 | * Lightweight file lookup - no refcnt increment if fd table isn't shared. |
| 312 | * |
| 313 | * You can use this instead of fget if you satisfy all of the following |
| 314 | * conditions: |
| 315 | * 1) You must call fput_light before exiting the syscall and returning control |
| 316 | * to userspace (i.e. you cannot remember the returned struct file * after |
| 317 | * returning to userspace). |
| 318 | * 2) You must not call filp_close on the returned struct file * in between |
| 319 | * calls to fget_light and fput_light. |
| 320 | * 3) You must not clone the current task in between the calls to fget_light |
| 321 | * and fput_light. |
| 322 | * |
| 323 | * The fput_needed flag returned by fget_light should be passed to the |
| 324 | * corresponding fput_light. |
| 325 | */ |
| 326 | struct file *fget_light(unsigned int fd, int *fput_needed) |
| 327 | { |
| 328 | struct file *file; |
| 329 | struct files_struct *files = current->files; |
| 330 | |
| 331 | *fput_needed = 0; |
| 332 | if (atomic_read(&files->count) == 1) { |
| 333 | file = fcheck_files(files, fd); |
| 334 | if (file && (file->f_mode & FMODE_PATH)) |
| 335 | file = NULL; |
| 336 | } else { |
| 337 | rcu_read_lock(); |
| 338 | file = fcheck_files(files, fd); |
| 339 | if (file) { |
| 340 | if (!(file->f_mode & FMODE_PATH) && |
| 341 | atomic_long_inc_not_zero(&file->f_count)) |
| 342 | *fput_needed = 1; |
| 343 | else |
| 344 | /* Didn't get the reference, someone's freed */ |
| 345 | file = NULL; |
| 346 | } |
| 347 | rcu_read_unlock(); |
| 348 | } |
| 349 | |
| 350 | return file; |
| 351 | } |
| 352 | |
| 353 | struct file *fget_raw_light(unsigned int fd, int *fput_needed) |
| 354 | { |
| 355 | struct file *file; |
| 356 | struct files_struct *files = current->files; |
| 357 | |
| 358 | *fput_needed = 0; |
| 359 | if (atomic_read(&files->count) == 1) { |
| 360 | file = fcheck_files(files, fd); |
| 361 | } else { |
| 362 | rcu_read_lock(); |
| 363 | file = fcheck_files(files, fd); |
| 364 | if (file) { |
| 365 | if (atomic_long_inc_not_zero(&file->f_count)) |
| 366 | *fput_needed = 1; |
| 367 | else |
| 368 | /* Didn't get the reference, someone's freed */ |
| 369 | file = NULL; |
| 370 | } |
| 371 | rcu_read_unlock(); |
| 372 | } |
| 373 | |
| 374 | return file; |
| 375 | } |
| 376 | |
| 377 | void put_filp(struct file *file) |
| 378 | { |
| 379 | if (atomic_long_dec_and_test(&file->f_count)) { |
| 380 | security_file_free(file); |
| 381 | file_sb_list_del(file); |
| 382 | file_free(file); |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | static inline int file_list_cpu(struct file *file) |
| 387 | { |
| 388 | #ifdef CONFIG_SMP |
| 389 | return file->f_sb_list_cpu; |
| 390 | #else |
| 391 | return smp_processor_id(); |
| 392 | #endif |
| 393 | } |
| 394 | |
| 395 | /* helper for file_sb_list_add to reduce ifdefs */ |
| 396 | static inline void __file_sb_list_add(struct file *file, struct super_block *sb) |
| 397 | { |
| 398 | struct list_head *list; |
| 399 | #ifdef CONFIG_SMP |
| 400 | int cpu; |
| 401 | cpu = smp_processor_id(); |
| 402 | file->f_sb_list_cpu = cpu; |
| 403 | list = per_cpu_ptr(sb->s_files, cpu); |
| 404 | #else |
| 405 | list = &sb->s_files; |
| 406 | #endif |
| 407 | list_add(&file->f_u.fu_list, list); |
| 408 | } |
| 409 | |
| 410 | /** |
| 411 | * file_sb_list_add - add a file to the sb's file list |
| 412 | * @file: file to add |
| 413 | * @sb: sb to add it to |
| 414 | * |
| 415 | * Use this function to associate a file with the superblock of the inode it |
| 416 | * refers to. |
| 417 | */ |
| 418 | void file_sb_list_add(struct file *file, struct super_block *sb) |
| 419 | { |
| 420 | lg_local_lock(files_lglock); |
| 421 | __file_sb_list_add(file, sb); |
| 422 | lg_local_unlock(files_lglock); |
| 423 | } |
| 424 | |
| 425 | /** |
| 426 | * file_sb_list_del - remove a file from the sb's file list |
| 427 | * @file: file to remove |
| 428 | * @sb: sb to remove it from |
| 429 | * |
| 430 | * Use this function to remove a file from its superblock. |
| 431 | */ |
| 432 | void file_sb_list_del(struct file *file) |
| 433 | { |
| 434 | if (!list_empty(&file->f_u.fu_list)) { |
| 435 | lg_local_lock_cpu(files_lglock, file_list_cpu(file)); |
| 436 | list_del_init(&file->f_u.fu_list); |
| 437 | lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); |
| 438 | } |
| 439 | } |
| 440 | |
| 441 | #ifdef CONFIG_SMP |
| 442 | |
| 443 | /* |
| 444 | * These macros iterate all files on all CPUs for a given superblock. |
| 445 | * files_lglock must be held globally. |
| 446 | */ |
| 447 | #define do_file_list_for_each_entry(__sb, __file) \ |
| 448 | { \ |
| 449 | int i; \ |
| 450 | for_each_possible_cpu(i) { \ |
| 451 | struct list_head *list; \ |
| 452 | list = per_cpu_ptr((__sb)->s_files, i); \ |
| 453 | list_for_each_entry((__file), list, f_u.fu_list) |
| 454 | |
| 455 | #define while_file_list_for_each_entry \ |
| 456 | } \ |
| 457 | } |
| 458 | |
| 459 | #else |
| 460 | |
| 461 | #define do_file_list_for_each_entry(__sb, __file) \ |
| 462 | { \ |
| 463 | struct list_head *list; \ |
| 464 | list = &(sb)->s_files; \ |
| 465 | list_for_each_entry((__file), list, f_u.fu_list) |
| 466 | |
| 467 | #define while_file_list_for_each_entry \ |
| 468 | } |
| 469 | |
| 470 | #endif |
| 471 | |
| 472 | int fs_may_remount_ro(struct super_block *sb) |
| 473 | { |
| 474 | struct file *file; |
| 475 | /* Check that no files are currently opened for writing. */ |
| 476 | lg_global_lock(files_lglock); |
| 477 | do_file_list_for_each_entry(sb, file) { |
| 478 | struct inode *inode = file->f_path.dentry->d_inode; |
| 479 | |
| 480 | /* File with pending delete? */ |
| 481 | if (inode->i_nlink == 0) |
| 482 | goto too_bad; |
| 483 | |
| 484 | /* Writeable file? */ |
| 485 | if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) |
| 486 | goto too_bad; |
| 487 | } while_file_list_for_each_entry; |
| 488 | lg_global_unlock(files_lglock); |
| 489 | return 1; /* Tis' cool bro. */ |
| 490 | too_bad: |
| 491 | lg_global_unlock(files_lglock); |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | /** |
| 496 | * mark_files_ro - mark all files read-only |
| 497 | * @sb: superblock in question |
| 498 | * |
| 499 | * All files are marked read-only. We don't care about pending |
| 500 | * delete files so this should be used in 'force' mode only. |
| 501 | */ |
| 502 | void mark_files_ro(struct super_block *sb) |
| 503 | { |
| 504 | struct file *f; |
| 505 | |
| 506 | retry: |
| 507 | lg_global_lock(files_lglock); |
| 508 | do_file_list_for_each_entry(sb, f) { |
| 509 | struct vfsmount *mnt; |
| 510 | if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) |
| 511 | continue; |
| 512 | if (!file_count(f)) |
| 513 | continue; |
| 514 | if (!(f->f_mode & FMODE_WRITE)) |
| 515 | continue; |
| 516 | spin_lock(&f->f_lock); |
| 517 | f->f_mode &= ~FMODE_WRITE; |
| 518 | spin_unlock(&f->f_lock); |
| 519 | if (file_check_writeable(f) != 0) |
| 520 | continue; |
| 521 | file_release_write(f); |
| 522 | mnt = mntget(f->f_path.mnt); |
| 523 | /* This can sleep, so we can't hold the spinlock. */ |
| 524 | lg_global_unlock(files_lglock); |
| 525 | mnt_drop_write(mnt); |
| 526 | mntput(mnt); |
| 527 | goto retry; |
| 528 | } while_file_list_for_each_entry; |
| 529 | lg_global_unlock(files_lglock); |
| 530 | } |
| 531 | |
| 532 | void __init files_init(unsigned long mempages) |
| 533 | { |
| 534 | unsigned long n; |
| 535 | |
| 536 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, |
| 537 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
| 538 | |
| 539 | /* |
| 540 | * One file with associated inode and dcache is very roughly 1K. |
| 541 | * Per default don't use more than 10% of our memory for files. |
| 542 | */ |
| 543 | |
| 544 | n = (mempages * (PAGE_SIZE / 1024)) / 10; |
| 545 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); |
| 546 | files_defer_init(); |
| 547 | lg_lock_init(files_lglock); |
| 548 | percpu_counter_init(&nr_files, 0); |
| 549 | } |