mm/rmap: share the i_mmap_rwsem
[deliverable/linux.git] / include / linux / fs.h
1 #ifndef _LINUX_FS_H
2 #define _LINUX_FS_H
3
4
5 #include <linux/linkage.h>
6 #include <linux/wait.h>
7 #include <linux/kdev_t.h>
8 #include <linux/dcache.h>
9 #include <linux/path.h>
10 #include <linux/stat.h>
11 #include <linux/cache.h>
12 #include <linux/list.h>
13 #include <linux/list_lru.h>
14 #include <linux/llist.h>
15 #include <linux/radix-tree.h>
16 #include <linux/rbtree.h>
17 #include <linux/init.h>
18 #include <linux/pid.h>
19 #include <linux/bug.h>
20 #include <linux/mutex.h>
21 #include <linux/rwsem.h>
22 #include <linux/capability.h>
23 #include <linux/semaphore.h>
24 #include <linux/fiemap.h>
25 #include <linux/rculist_bl.h>
26 #include <linux/atomic.h>
27 #include <linux/shrinker.h>
28 #include <linux/migrate_mode.h>
29 #include <linux/uidgid.h>
30 #include <linux/lockdep.h>
31 #include <linux/percpu-rwsem.h>
32 #include <linux/blk_types.h>
33
34 #include <asm/byteorder.h>
35 #include <uapi/linux/fs.h>
36
37 struct export_operations;
38 struct hd_geometry;
39 struct iovec;
40 struct nameidata;
41 struct kiocb;
42 struct kobject;
43 struct pipe_inode_info;
44 struct poll_table_struct;
45 struct kstatfs;
46 struct vm_area_struct;
47 struct vfsmount;
48 struct cred;
49 struct swap_info_struct;
50 struct seq_file;
51 struct workqueue_struct;
52 struct iov_iter;
53
54 extern void __init inode_init(void);
55 extern void __init inode_init_early(void);
56 extern void __init files_init(unsigned long);
57
58 extern struct files_stat_struct files_stat;
59 extern unsigned long get_max_files(void);
60 extern int sysctl_nr_open;
61 extern struct inodes_stat_t inodes_stat;
62 extern int leases_enable, lease_break_time;
63 extern int sysctl_protected_symlinks;
64 extern int sysctl_protected_hardlinks;
65
66 struct buffer_head;
67 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
68 struct buffer_head *bh_result, int create);
69 typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
70 ssize_t bytes, void *private);
71
72 #define MAY_EXEC 0x00000001
73 #define MAY_WRITE 0x00000002
74 #define MAY_READ 0x00000004
75 #define MAY_APPEND 0x00000008
76 #define MAY_ACCESS 0x00000010
77 #define MAY_OPEN 0x00000020
78 #define MAY_CHDIR 0x00000040
79 /* called from RCU mode, don't block */
80 #define MAY_NOT_BLOCK 0x00000080
81
82 /*
83 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
84 * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
85 */
86
87 /* file is open for reading */
88 #define FMODE_READ ((__force fmode_t)0x1)
89 /* file is open for writing */
90 #define FMODE_WRITE ((__force fmode_t)0x2)
91 /* file is seekable */
92 #define FMODE_LSEEK ((__force fmode_t)0x4)
93 /* file can be accessed using pread */
94 #define FMODE_PREAD ((__force fmode_t)0x8)
95 /* file can be accessed using pwrite */
96 #define FMODE_PWRITE ((__force fmode_t)0x10)
97 /* File is opened for execution with sys_execve / sys_uselib */
98 #define FMODE_EXEC ((__force fmode_t)0x20)
99 /* File is opened with O_NDELAY (only set for block devices) */
100 #define FMODE_NDELAY ((__force fmode_t)0x40)
101 /* File is opened with O_EXCL (only set for block devices) */
102 #define FMODE_EXCL ((__force fmode_t)0x80)
103 /* File is opened using open(.., 3, ..) and is writeable only for ioctls
104 (specialy hack for floppy.c) */
105 #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
106 /* 32bit hashes as llseek() offset (for directories) */
107 #define FMODE_32BITHASH ((__force fmode_t)0x200)
108 /* 64bit hashes as llseek() offset (for directories) */
109 #define FMODE_64BITHASH ((__force fmode_t)0x400)
110
111 /*
112 * Don't update ctime and mtime.
113 *
114 * Currently a special hack for the XFS open_by_handle ioctl, but we'll
115 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
116 */
117 #define FMODE_NOCMTIME ((__force fmode_t)0x800)
118
119 /* Expect random access pattern */
120 #define FMODE_RANDOM ((__force fmode_t)0x1000)
121
122 /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
123 #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
124
125 /* File is opened with O_PATH; almost nothing can be done with it */
126 #define FMODE_PATH ((__force fmode_t)0x4000)
127
128 /* File needs atomic accesses to f_pos */
129 #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
130 /* Write access to underlying fs */
131 #define FMODE_WRITER ((__force fmode_t)0x10000)
132 /* Has read method(s) */
133 #define FMODE_CAN_READ ((__force fmode_t)0x20000)
134 /* Has write method(s) */
135 #define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
136
137 /* File was opened by fanotify and shouldn't generate fanotify events */
138 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
139
140 /*
141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
142 * that indicates that they should check the contents of the iovec are
143 * valid, but not check the memory that the iovec elements
144 * points too.
145 */
146 #define CHECK_IOVEC_ONLY -1
147
148 /*
149 * The below are the various read and write types that we support. Some of
150 * them include behavioral modifiers that send information down to the
151 * block layer and IO scheduler. Terminology:
152 *
153 * The block layer uses device plugging to defer IO a little bit, in
154 * the hope that we will see more IO very shortly. This increases
155 * coalescing of adjacent IO and thus reduces the number of IOs we
156 * have to send to the device. It also allows for better queuing,
157 * if the IO isn't mergeable. If the caller is going to be waiting
158 * for the IO, then he must ensure that the device is unplugged so
159 * that the IO is dispatched to the driver.
160 *
161 * All IO is handled async in Linux. This is fine for background
162 * writes, but for reads or writes that someone waits for completion
163 * on, we want to notify the block layer and IO scheduler so that they
164 * know about it. That allows them to make better scheduling
165 * decisions. So when the below references 'sync' and 'async', it
166 * is referencing this priority hint.
167 *
168 * With that in mind, the available types are:
169 *
170 * READ A normal read operation. Device will be plugged.
171 * READ_SYNC A synchronous read. Device is not plugged, caller can
172 * immediately wait on this read without caring about
173 * unplugging.
174 * READA Used for read-ahead operations. Lower priority, and the
175 * block layer could (in theory) choose to ignore this
176 * request if it runs into resource problems.
177 * WRITE A normal async write. Device will be plugged.
178 * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
179 * the hint that someone will be waiting on this IO
180 * shortly. The write equivalent of READ_SYNC.
181 * WRITE_ODIRECT Special case write for O_DIRECT only.
182 * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
183 * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
184 * non-volatile media on completion.
185 * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
186 * by a cache flush and data is guaranteed to be on
187 * non-volatile media on completion.
188 *
189 */
190 #define RW_MASK REQ_WRITE
191 #define RWA_MASK REQ_RAHEAD
192
193 #define READ 0
194 #define WRITE RW_MASK
195 #define READA RWA_MASK
196
197 #define READ_SYNC (READ | REQ_SYNC)
198 #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
199 #define WRITE_ODIRECT (WRITE | REQ_SYNC)
200 #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
201 #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
202 #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
203
204 /*
205 * Attribute flags. These should be or-ed together to figure out what
206 * has been changed!
207 */
208 #define ATTR_MODE (1 << 0)
209 #define ATTR_UID (1 << 1)
210 #define ATTR_GID (1 << 2)
211 #define ATTR_SIZE (1 << 3)
212 #define ATTR_ATIME (1 << 4)
213 #define ATTR_MTIME (1 << 5)
214 #define ATTR_CTIME (1 << 6)
215 #define ATTR_ATIME_SET (1 << 7)
216 #define ATTR_MTIME_SET (1 << 8)
217 #define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
218 #define ATTR_ATTR_FLAG (1 << 10)
219 #define ATTR_KILL_SUID (1 << 11)
220 #define ATTR_KILL_SGID (1 << 12)
221 #define ATTR_FILE (1 << 13)
222 #define ATTR_KILL_PRIV (1 << 14)
223 #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
224 #define ATTR_TIMES_SET (1 << 16)
225
226 /*
227 * Whiteout is represented by a char device. The following constants define the
228 * mode and device number to use.
229 */
230 #define WHITEOUT_MODE 0
231 #define WHITEOUT_DEV 0
232
233 /*
234 * This is the Inode Attributes structure, used for notify_change(). It
235 * uses the above definitions as flags, to know which values have changed.
236 * Also, in this manner, a Filesystem can look at only the values it cares
237 * about. Basically, these are the attributes that the VFS layer can
238 * request to change from the FS layer.
239 *
240 * Derek Atkins <warlord@MIT.EDU> 94-10-20
241 */
242 struct iattr {
243 unsigned int ia_valid;
244 umode_t ia_mode;
245 kuid_t ia_uid;
246 kgid_t ia_gid;
247 loff_t ia_size;
248 struct timespec ia_atime;
249 struct timespec ia_mtime;
250 struct timespec ia_ctime;
251
252 /*
253 * Not an attribute, but an auxiliary info for filesystems wanting to
254 * implement an ftruncate() like method. NOTE: filesystem should
255 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
256 */
257 struct file *ia_file;
258 };
259
260 /*
261 * Includes for diskquotas.
262 */
263 #include <linux/quota.h>
264
265 /*
266 * Maximum number of layers of fs stack. Needs to be limited to
267 * prevent kernel stack overflow
268 */
269 #define FILESYSTEM_MAX_STACK_DEPTH 2
270
271 /**
272 * enum positive_aop_returns - aop return codes with specific semantics
273 *
274 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
275 * completed, that the page is still locked, and
276 * should be considered active. The VM uses this hint
277 * to return the page to the active list -- it won't
278 * be a candidate for writeback again in the near
279 * future. Other callers must be careful to unlock
280 * the page if they get this return. Returned by
281 * writepage();
282 *
283 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
284 * unlocked it and the page might have been truncated.
285 * The caller should back up to acquiring a new page and
286 * trying again. The aop will be taking reasonable
287 * precautions not to livelock. If the caller held a page
288 * reference, it should drop it before retrying. Returned
289 * by readpage().
290 *
291 * address_space_operation functions return these large constants to indicate
292 * special semantics to the caller. These are much larger than the bytes in a
293 * page to allow for functions that return the number of bytes operated on in a
294 * given page.
295 */
296
297 enum positive_aop_returns {
298 AOP_WRITEPAGE_ACTIVATE = 0x80000,
299 AOP_TRUNCATED_PAGE = 0x80001,
300 };
301
302 #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
303 #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
304 #define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
305 * helper code (eg buffer layer)
306 * to clear GFP_FS from alloc */
307
308 /*
309 * oh the beauties of C type declarations.
310 */
311 struct page;
312 struct address_space;
313 struct writeback_control;
314
315 /*
316 * "descriptor" for what we're up to with a read.
317 * This allows us to use the same read code yet
318 * have multiple different users of the data that
319 * we read from a file.
320 *
321 * The simplest case just copies the data to user
322 * mode.
323 */
324 typedef struct {
325 size_t written;
326 size_t count;
327 union {
328 char __user *buf;
329 void *data;
330 } arg;
331 int error;
332 } read_descriptor_t;
333
334 typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
335 unsigned long, unsigned long);
336
337 struct address_space_operations {
338 int (*writepage)(struct page *page, struct writeback_control *wbc);
339 int (*readpage)(struct file *, struct page *);
340
341 /* Write back some dirty pages from this mapping. */
342 int (*writepages)(struct address_space *, struct writeback_control *);
343
344 /* Set a page dirty. Return true if this dirtied it */
345 int (*set_page_dirty)(struct page *page);
346
347 int (*readpages)(struct file *filp, struct address_space *mapping,
348 struct list_head *pages, unsigned nr_pages);
349
350 int (*write_begin)(struct file *, struct address_space *mapping,
351 loff_t pos, unsigned len, unsigned flags,
352 struct page **pagep, void **fsdata);
353 int (*write_end)(struct file *, struct address_space *mapping,
354 loff_t pos, unsigned len, unsigned copied,
355 struct page *page, void *fsdata);
356
357 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
358 sector_t (*bmap)(struct address_space *, sector_t);
359 void (*invalidatepage) (struct page *, unsigned int, unsigned int);
360 int (*releasepage) (struct page *, gfp_t);
361 void (*freepage)(struct page *);
362 ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
363 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
364 void **, unsigned long *);
365 /*
366 * migrate the contents of a page to the specified target. If
367 * migrate_mode is MIGRATE_ASYNC, it must not block.
368 */
369 int (*migratepage) (struct address_space *,
370 struct page *, struct page *, enum migrate_mode);
371 int (*launder_page) (struct page *);
372 int (*is_partially_uptodate) (struct page *, unsigned long,
373 unsigned long);
374 void (*is_dirty_writeback) (struct page *, bool *, bool *);
375 int (*error_remove_page)(struct address_space *, struct page *);
376
377 /* swapfile support */
378 int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
379 sector_t *span);
380 void (*swap_deactivate)(struct file *file);
381 };
382
383 extern const struct address_space_operations empty_aops;
384
385 /*
386 * pagecache_write_begin/pagecache_write_end must be used by general code
387 * to write into the pagecache.
388 */
389 int pagecache_write_begin(struct file *, struct address_space *mapping,
390 loff_t pos, unsigned len, unsigned flags,
391 struct page **pagep, void **fsdata);
392
393 int pagecache_write_end(struct file *, struct address_space *mapping,
394 loff_t pos, unsigned len, unsigned copied,
395 struct page *page, void *fsdata);
396
397 struct backing_dev_info;
398 struct address_space {
399 struct inode *host; /* owner: inode, block_device */
400 struct radix_tree_root page_tree; /* radix tree of all pages */
401 spinlock_t tree_lock; /* and lock protecting it */
402 atomic_t i_mmap_writable;/* count VM_SHARED mappings */
403 struct rb_root i_mmap; /* tree of private and shared mappings */
404 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
405 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
406 /* Protected by tree_lock together with the radix tree */
407 unsigned long nrpages; /* number of total pages */
408 unsigned long nrshadows; /* number of shadow entries */
409 pgoff_t writeback_index;/* writeback starts here */
410 const struct address_space_operations *a_ops; /* methods */
411 unsigned long flags; /* error bits/gfp mask */
412 struct backing_dev_info *backing_dev_info; /* device readahead, etc */
413 spinlock_t private_lock; /* for use by the address_space */
414 struct list_head private_list; /* ditto */
415 void *private_data; /* ditto */
416 } __attribute__((aligned(sizeof(long))));
417 /*
418 * On most architectures that alignment is already the case; but
419 * must be enforced here for CRIS, to let the least significant bit
420 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
421 */
422 struct request_queue;
423
424 struct block_device {
425 dev_t bd_dev; /* not a kdev_t - it's a search key */
426 int bd_openers;
427 struct inode * bd_inode; /* will die */
428 struct super_block * bd_super;
429 struct mutex bd_mutex; /* open/close mutex */
430 struct list_head bd_inodes;
431 void * bd_claiming;
432 void * bd_holder;
433 int bd_holders;
434 bool bd_write_holder;
435 #ifdef CONFIG_SYSFS
436 struct list_head bd_holder_disks;
437 #endif
438 struct block_device * bd_contains;
439 unsigned bd_block_size;
440 struct hd_struct * bd_part;
441 /* number of times partitions within this device have been opened. */
442 unsigned bd_part_count;
443 int bd_invalidated;
444 struct gendisk * bd_disk;
445 struct request_queue * bd_queue;
446 struct list_head bd_list;
447 /*
448 * Private data. You must have bd_claim'ed the block_device
449 * to use this. NOTE: bd_claim allows an owner to claim
450 * the same device multiple times, the owner must take special
451 * care to not mess up bd_private for that case.
452 */
453 unsigned long bd_private;
454
455 /* The counter of freeze processes */
456 int bd_fsfreeze_count;
457 /* Mutex for freeze */
458 struct mutex bd_fsfreeze_mutex;
459 };
460
461 /*
462 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
463 * radix trees
464 */
465 #define PAGECACHE_TAG_DIRTY 0
466 #define PAGECACHE_TAG_WRITEBACK 1
467 #define PAGECACHE_TAG_TOWRITE 2
468
469 int mapping_tagged(struct address_space *mapping, int tag);
470
471 static inline void i_mmap_lock_write(struct address_space *mapping)
472 {
473 down_write(&mapping->i_mmap_rwsem);
474 }
475
476 static inline void i_mmap_unlock_write(struct address_space *mapping)
477 {
478 up_write(&mapping->i_mmap_rwsem);
479 }
480
481 static inline void i_mmap_lock_read(struct address_space *mapping)
482 {
483 down_read(&mapping->i_mmap_rwsem);
484 }
485
486 static inline void i_mmap_unlock_read(struct address_space *mapping)
487 {
488 up_read(&mapping->i_mmap_rwsem);
489 }
490
491 /*
492 * Might pages of this file be mapped into userspace?
493 */
494 static inline int mapping_mapped(struct address_space *mapping)
495 {
496 return !RB_EMPTY_ROOT(&mapping->i_mmap) ||
497 !list_empty(&mapping->i_mmap_nonlinear);
498 }
499
500 /*
501 * Might pages of this file have been modified in userspace?
502 * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
503 * marks vma as VM_SHARED if it is shared, and the file was opened for
504 * writing i.e. vma may be mprotected writable even if now readonly.
505 *
506 * If i_mmap_writable is negative, no new writable mappings are allowed. You
507 * can only deny writable mappings, if none exists right now.
508 */
509 static inline int mapping_writably_mapped(struct address_space *mapping)
510 {
511 return atomic_read(&mapping->i_mmap_writable) > 0;
512 }
513
514 static inline int mapping_map_writable(struct address_space *mapping)
515 {
516 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
517 0 : -EPERM;
518 }
519
520 static inline void mapping_unmap_writable(struct address_space *mapping)
521 {
522 atomic_dec(&mapping->i_mmap_writable);
523 }
524
525 static inline int mapping_deny_writable(struct address_space *mapping)
526 {
527 return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
528 0 : -EBUSY;
529 }
530
531 static inline void mapping_allow_writable(struct address_space *mapping)
532 {
533 atomic_inc(&mapping->i_mmap_writable);
534 }
535
536 /*
537 * Use sequence counter to get consistent i_size on 32-bit processors.
538 */
539 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
540 #include <linux/seqlock.h>
541 #define __NEED_I_SIZE_ORDERED
542 #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
543 #else
544 #define i_size_ordered_init(inode) do { } while (0)
545 #endif
546
547 struct posix_acl;
548 #define ACL_NOT_CACHED ((void *)(-1))
549
550 #define IOP_FASTPERM 0x0001
551 #define IOP_LOOKUP 0x0002
552 #define IOP_NOFOLLOW 0x0004
553
554 /*
555 * Keep mostly read-only and often accessed (especially for
556 * the RCU path lookup and 'stat' data) fields at the beginning
557 * of the 'struct inode'
558 */
559 struct inode {
560 umode_t i_mode;
561 unsigned short i_opflags;
562 kuid_t i_uid;
563 kgid_t i_gid;
564 unsigned int i_flags;
565
566 #ifdef CONFIG_FS_POSIX_ACL
567 struct posix_acl *i_acl;
568 struct posix_acl *i_default_acl;
569 #endif
570
571 const struct inode_operations *i_op;
572 struct super_block *i_sb;
573 struct address_space *i_mapping;
574
575 #ifdef CONFIG_SECURITY
576 void *i_security;
577 #endif
578
579 /* Stat data, not accessed from path walking */
580 unsigned long i_ino;
581 /*
582 * Filesystems may only read i_nlink directly. They shall use the
583 * following functions for modification:
584 *
585 * (set|clear|inc|drop)_nlink
586 * inode_(inc|dec)_link_count
587 */
588 union {
589 const unsigned int i_nlink;
590 unsigned int __i_nlink;
591 };
592 dev_t i_rdev;
593 loff_t i_size;
594 struct timespec i_atime;
595 struct timespec i_mtime;
596 struct timespec i_ctime;
597 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
598 unsigned short i_bytes;
599 unsigned int i_blkbits;
600 blkcnt_t i_blocks;
601
602 #ifdef __NEED_I_SIZE_ORDERED
603 seqcount_t i_size_seqcount;
604 #endif
605
606 /* Misc */
607 unsigned long i_state;
608 struct mutex i_mutex;
609
610 unsigned long dirtied_when; /* jiffies of first dirtying */
611
612 struct hlist_node i_hash;
613 struct list_head i_wb_list; /* backing dev IO list */
614 struct list_head i_lru; /* inode LRU list */
615 struct list_head i_sb_list;
616 union {
617 struct hlist_head i_dentry;
618 struct rcu_head i_rcu;
619 };
620 u64 i_version;
621 atomic_t i_count;
622 atomic_t i_dio_count;
623 atomic_t i_writecount;
624 #ifdef CONFIG_IMA
625 atomic_t i_readcount; /* struct files open RO */
626 #endif
627 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
628 struct file_lock *i_flock;
629 struct address_space i_data;
630 struct list_head i_devices;
631 union {
632 struct pipe_inode_info *i_pipe;
633 struct block_device *i_bdev;
634 struct cdev *i_cdev;
635 };
636
637 __u32 i_generation;
638
639 #ifdef CONFIG_FSNOTIFY
640 __u32 i_fsnotify_mask; /* all events this inode cares about */
641 struct hlist_head i_fsnotify_marks;
642 #endif
643
644 void *i_private; /* fs or device private pointer */
645 };
646
647 static inline int inode_unhashed(struct inode *inode)
648 {
649 return hlist_unhashed(&inode->i_hash);
650 }
651
652 /*
653 * inode->i_mutex nesting subclasses for the lock validator:
654 *
655 * 0: the object of the current VFS operation
656 * 1: parent
657 * 2: child/target
658 * 3: xattr
659 * 4: second non-directory
660 * 5: second parent (when locking independent directories in rename)
661 *
662 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
663 * non-directories at once.
664 *
665 * The locking order between these classes is
666 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
667 */
668 enum inode_i_mutex_lock_class
669 {
670 I_MUTEX_NORMAL,
671 I_MUTEX_PARENT,
672 I_MUTEX_CHILD,
673 I_MUTEX_XATTR,
674 I_MUTEX_NONDIR2,
675 I_MUTEX_PARENT2,
676 };
677
678 void lock_two_nondirectories(struct inode *, struct inode*);
679 void unlock_two_nondirectories(struct inode *, struct inode*);
680
681 /*
682 * NOTE: in a 32bit arch with a preemptable kernel and
683 * an UP compile the i_size_read/write must be atomic
684 * with respect to the local cpu (unlike with preempt disabled),
685 * but they don't need to be atomic with respect to other cpus like in
686 * true SMP (so they need either to either locally disable irq around
687 * the read or for example on x86 they can be still implemented as a
688 * cmpxchg8b without the need of the lock prefix). For SMP compiles
689 * and 64bit archs it makes no difference if preempt is enabled or not.
690 */
691 static inline loff_t i_size_read(const struct inode *inode)
692 {
693 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
694 loff_t i_size;
695 unsigned int seq;
696
697 do {
698 seq = read_seqcount_begin(&inode->i_size_seqcount);
699 i_size = inode->i_size;
700 } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
701 return i_size;
702 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
703 loff_t i_size;
704
705 preempt_disable();
706 i_size = inode->i_size;
707 preempt_enable();
708 return i_size;
709 #else
710 return inode->i_size;
711 #endif
712 }
713
714 /*
715 * NOTE: unlike i_size_read(), i_size_write() does need locking around it
716 * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
717 * can be lost, resulting in subsequent i_size_read() calls spinning forever.
718 */
719 static inline void i_size_write(struct inode *inode, loff_t i_size)
720 {
721 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
722 preempt_disable();
723 write_seqcount_begin(&inode->i_size_seqcount);
724 inode->i_size = i_size;
725 write_seqcount_end(&inode->i_size_seqcount);
726 preempt_enable();
727 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
728 preempt_disable();
729 inode->i_size = i_size;
730 preempt_enable();
731 #else
732 inode->i_size = i_size;
733 #endif
734 }
735
736 /* Helper functions so that in most cases filesystems will
737 * not need to deal directly with kuid_t and kgid_t and can
738 * instead deal with the raw numeric values that are stored
739 * in the filesystem.
740 */
741 static inline uid_t i_uid_read(const struct inode *inode)
742 {
743 return from_kuid(&init_user_ns, inode->i_uid);
744 }
745
746 static inline gid_t i_gid_read(const struct inode *inode)
747 {
748 return from_kgid(&init_user_ns, inode->i_gid);
749 }
750
751 static inline void i_uid_write(struct inode *inode, uid_t uid)
752 {
753 inode->i_uid = make_kuid(&init_user_ns, uid);
754 }
755
756 static inline void i_gid_write(struct inode *inode, gid_t gid)
757 {
758 inode->i_gid = make_kgid(&init_user_ns, gid);
759 }
760
761 static inline unsigned iminor(const struct inode *inode)
762 {
763 return MINOR(inode->i_rdev);
764 }
765
766 static inline unsigned imajor(const struct inode *inode)
767 {
768 return MAJOR(inode->i_rdev);
769 }
770
771 extern struct block_device *I_BDEV(struct inode *inode);
772
773 struct fown_struct {
774 rwlock_t lock; /* protects pid, uid, euid fields */
775 struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
776 enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
777 kuid_t uid, euid; /* uid/euid of process setting the owner */
778 int signum; /* posix.1b rt signal to be delivered on IO */
779 };
780
781 /*
782 * Track a single file's readahead state
783 */
784 struct file_ra_state {
785 pgoff_t start; /* where readahead started */
786 unsigned int size; /* # of readahead pages */
787 unsigned int async_size; /* do asynchronous readahead when
788 there are only # of pages ahead */
789
790 unsigned int ra_pages; /* Maximum readahead window */
791 unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
792 loff_t prev_pos; /* Cache last read() position */
793 };
794
795 /*
796 * Check if @index falls in the readahead windows.
797 */
798 static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
799 {
800 return (index >= ra->start &&
801 index < ra->start + ra->size);
802 }
803
804 struct file {
805 union {
806 struct llist_node fu_llist;
807 struct rcu_head fu_rcuhead;
808 } f_u;
809 struct path f_path;
810 struct inode *f_inode; /* cached value */
811 const struct file_operations *f_op;
812
813 /*
814 * Protects f_ep_links, f_flags.
815 * Must not be taken from IRQ context.
816 */
817 spinlock_t f_lock;
818 atomic_long_t f_count;
819 unsigned int f_flags;
820 fmode_t f_mode;
821 struct mutex f_pos_lock;
822 loff_t f_pos;
823 struct fown_struct f_owner;
824 const struct cred *f_cred;
825 struct file_ra_state f_ra;
826
827 u64 f_version;
828 #ifdef CONFIG_SECURITY
829 void *f_security;
830 #endif
831 /* needed for tty driver, and maybe others */
832 void *private_data;
833
834 #ifdef CONFIG_EPOLL
835 /* Used by fs/eventpoll.c to link all the hooks to this file */
836 struct list_head f_ep_links;
837 struct list_head f_tfile_llink;
838 #endif /* #ifdef CONFIG_EPOLL */
839 struct address_space *f_mapping;
840 } __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
841
842 struct file_handle {
843 __u32 handle_bytes;
844 int handle_type;
845 /* file identifier */
846 unsigned char f_handle[0];
847 };
848
849 static inline struct file *get_file(struct file *f)
850 {
851 atomic_long_inc(&f->f_count);
852 return f;
853 }
854 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
855 #define file_count(x) atomic_long_read(&(x)->f_count)
856
857 #define MAX_NON_LFS ((1UL<<31) - 1)
858
859 /* Page cache limit. The filesystems should put that into their s_maxbytes
860 limits, otherwise bad things can happen in VM. */
861 #if BITS_PER_LONG==32
862 #define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
863 #elif BITS_PER_LONG==64
864 #define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
865 #endif
866
867 #define FL_POSIX 1
868 #define FL_FLOCK 2
869 #define FL_DELEG 4 /* NFSv4 delegation */
870 #define FL_ACCESS 8 /* not trying to lock, just looking */
871 #define FL_EXISTS 16 /* when unlocking, test for existence */
872 #define FL_LEASE 32 /* lease held on this file */
873 #define FL_CLOSE 64 /* unlock on close */
874 #define FL_SLEEP 128 /* A blocking lock */
875 #define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
876 #define FL_UNLOCK_PENDING 512 /* Lease is being broken */
877 #define FL_OFDLCK 1024 /* lock is "owned" by struct file */
878
879 /*
880 * Special return value from posix_lock_file() and vfs_lock_file() for
881 * asynchronous locking.
882 */
883 #define FILE_LOCK_DEFERRED 1
884
885 /* legacy typedef, should eventually be removed */
886 typedef void *fl_owner_t;
887
888 struct file_lock_operations {
889 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
890 void (*fl_release_private)(struct file_lock *);
891 };
892
893 struct lock_manager_operations {
894 int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
895 unsigned long (*lm_owner_key)(struct file_lock *);
896 void (*lm_get_owner)(struct file_lock *, struct file_lock *);
897 void (*lm_put_owner)(struct file_lock *);
898 void (*lm_notify)(struct file_lock *); /* unblock callback */
899 int (*lm_grant)(struct file_lock *, int);
900 bool (*lm_break)(struct file_lock *);
901 int (*lm_change)(struct file_lock **, int, struct list_head *);
902 void (*lm_setup)(struct file_lock *, void **);
903 };
904
905 struct lock_manager {
906 struct list_head list;
907 };
908
909 struct net;
910 void locks_start_grace(struct net *, struct lock_manager *);
911 void locks_end_grace(struct lock_manager *);
912 int locks_in_grace(struct net *);
913
914 /* that will die - we need it for nfs_lock_info */
915 #include <linux/nfs_fs_i.h>
916
917 /*
918 * struct file_lock represents a generic "file lock". It's used to represent
919 * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
920 * note that the same struct is used to represent both a request for a lock and
921 * the lock itself, but the same object is never used for both.
922 *
923 * FIXME: should we create a separate "struct lock_request" to help distinguish
924 * these two uses?
925 *
926 * The i_flock list is ordered by:
927 *
928 * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
929 * 2) lock owner
930 * 3) lock range start
931 * 4) lock range end
932 *
933 * Obviously, the last two criteria only matter for POSIX locks.
934 */
935 struct file_lock {
936 struct file_lock *fl_next; /* singly linked list for this inode */
937 struct hlist_node fl_link; /* node in global lists */
938 struct list_head fl_block; /* circular list of blocked processes */
939 fl_owner_t fl_owner;
940 unsigned int fl_flags;
941 unsigned char fl_type;
942 unsigned int fl_pid;
943 int fl_link_cpu; /* what cpu's list is this on? */
944 struct pid *fl_nspid;
945 wait_queue_head_t fl_wait;
946 struct file *fl_file;
947 loff_t fl_start;
948 loff_t fl_end;
949
950 struct fasync_struct * fl_fasync; /* for lease break notifications */
951 /* for lease breaks: */
952 unsigned long fl_break_time;
953 unsigned long fl_downgrade_time;
954
955 const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
956 const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
957 union {
958 struct nfs_lock_info nfs_fl;
959 struct nfs4_lock_info nfs4_fl;
960 struct {
961 struct list_head link; /* link in AFS vnode's pending_locks list */
962 int state; /* state of grant or error if -ve */
963 } afs;
964 } fl_u;
965 };
966
967 /* The following constant reflects the upper bound of the file/locking space */
968 #ifndef OFFSET_MAX
969 #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
970 #define OFFSET_MAX INT_LIMIT(loff_t)
971 #define OFFT_OFFSET_MAX INT_LIMIT(off_t)
972 #endif
973
974 #include <linux/fcntl.h>
975
976 extern void send_sigio(struct fown_struct *fown, int fd, int band);
977
978 #ifdef CONFIG_FILE_LOCKING
979 extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *);
980 extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
981 struct flock __user *);
982
983 #if BITS_PER_LONG == 32
984 extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 __user *);
985 extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
986 struct flock64 __user *);
987 #endif
988
989 extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
990 extern int fcntl_getlease(struct file *filp);
991
992 /* fs/locks.c */
993 void locks_free_lock(struct file_lock *fl);
994 extern void locks_init_lock(struct file_lock *);
995 extern struct file_lock * locks_alloc_lock(void);
996 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
997 extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
998 extern void locks_remove_posix(struct file *, fl_owner_t);
999 extern void locks_remove_file(struct file *);
1000 extern void locks_release_private(struct file_lock *);
1001 extern void posix_test_lock(struct file *, struct file_lock *);
1002 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
1003 extern int posix_lock_file_wait(struct file *, struct file_lock *);
1004 extern int posix_unblock_lock(struct file_lock *);
1005 extern int vfs_test_lock(struct file *, struct file_lock *);
1006 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
1007 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1008 extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
1009 extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
1010 extern void lease_get_mtime(struct inode *, struct timespec *time);
1011 extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
1012 extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
1013 extern int lease_modify(struct file_lock **, int, struct list_head *);
1014 #else /* !CONFIG_FILE_LOCKING */
1015 static inline int fcntl_getlk(struct file *file, unsigned int cmd,
1016 struct flock __user *user)
1017 {
1018 return -EINVAL;
1019 }
1020
1021 static inline int fcntl_setlk(unsigned int fd, struct file *file,
1022 unsigned int cmd, struct flock __user *user)
1023 {
1024 return -EACCES;
1025 }
1026
1027 #if BITS_PER_LONG == 32
1028 static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
1029 struct flock64 __user *user)
1030 {
1031 return -EINVAL;
1032 }
1033
1034 static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1035 unsigned int cmd, struct flock64 __user *user)
1036 {
1037 return -EACCES;
1038 }
1039 #endif
1040 static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1041 {
1042 return -EINVAL;
1043 }
1044
1045 static inline int fcntl_getlease(struct file *filp)
1046 {
1047 return F_UNLCK;
1048 }
1049
1050 static inline void locks_init_lock(struct file_lock *fl)
1051 {
1052 return;
1053 }
1054
1055 static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
1056 {
1057 return;
1058 }
1059
1060 static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1061 {
1062 return;
1063 }
1064
1065 static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
1066 {
1067 return;
1068 }
1069
1070 static inline void locks_remove_file(struct file *filp)
1071 {
1072 return;
1073 }
1074
1075 static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
1076 {
1077 return;
1078 }
1079
1080 static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1081 struct file_lock *conflock)
1082 {
1083 return -ENOLCK;
1084 }
1085
1086 static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1087 {
1088 return -ENOLCK;
1089 }
1090
1091 static inline int posix_unblock_lock(struct file_lock *waiter)
1092 {
1093 return -ENOENT;
1094 }
1095
1096 static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
1097 {
1098 return 0;
1099 }
1100
1101 static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
1102 struct file_lock *fl, struct file_lock *conf)
1103 {
1104 return -ENOLCK;
1105 }
1106
1107 static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1108 {
1109 return 0;
1110 }
1111
1112 static inline int flock_lock_file_wait(struct file *filp,
1113 struct file_lock *request)
1114 {
1115 return -ENOLCK;
1116 }
1117
1118 static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1119 {
1120 return 0;
1121 }
1122
1123 static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
1124 {
1125 return;
1126 }
1127
1128 static inline int generic_setlease(struct file *filp, long arg,
1129 struct file_lock **flp, void **priv)
1130 {
1131 return -EINVAL;
1132 }
1133
1134 static inline int vfs_setlease(struct file *filp, long arg,
1135 struct file_lock **lease, void **priv)
1136 {
1137 return -EINVAL;
1138 }
1139
1140 static inline int lease_modify(struct file_lock **before, int arg,
1141 struct list_head *dispose)
1142 {
1143 return -EINVAL;
1144 }
1145 #endif /* !CONFIG_FILE_LOCKING */
1146
1147
1148 struct fasync_struct {
1149 spinlock_t fa_lock;
1150 int magic;
1151 int fa_fd;
1152 struct fasync_struct *fa_next; /* singly linked list */
1153 struct file *fa_file;
1154 struct rcu_head fa_rcu;
1155 };
1156
1157 #define FASYNC_MAGIC 0x4601
1158
1159 /* SMP safe fasync helpers: */
1160 extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
1161 extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
1162 extern int fasync_remove_entry(struct file *, struct fasync_struct **);
1163 extern struct fasync_struct *fasync_alloc(void);
1164 extern void fasync_free(struct fasync_struct *);
1165
1166 /* can be called from interrupts */
1167 extern void kill_fasync(struct fasync_struct **, int, int);
1168
1169 extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
1170 extern void f_setown(struct file *filp, unsigned long arg, int force);
1171 extern void f_delown(struct file *filp);
1172 extern pid_t f_getown(struct file *filp);
1173 extern int send_sigurg(struct fown_struct *fown);
1174
1175 struct mm_struct;
1176
1177 /*
1178 * Umount options
1179 */
1180
1181 #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
1182 #define MNT_DETACH 0x00000002 /* Just detach from the tree */
1183 #define MNT_EXPIRE 0x00000004 /* Mark for expiry */
1184 #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1185 #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1186
1187 extern struct list_head super_blocks;
1188 extern spinlock_t sb_lock;
1189
1190 /* Possible states of 'frozen' field */
1191 enum {
1192 SB_UNFROZEN = 0, /* FS is unfrozen */
1193 SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
1194 SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
1195 SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
1196 * internal threads if needed) */
1197 SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
1198 };
1199
1200 #define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
1201
1202 struct sb_writers {
1203 /* Counters for counting writers at each level */
1204 struct percpu_counter counter[SB_FREEZE_LEVELS];
1205 wait_queue_head_t wait; /* queue for waiting for
1206 writers / faults to finish */
1207 int frozen; /* Is sb frozen? */
1208 wait_queue_head_t wait_unfrozen; /* queue for waiting for
1209 sb to be thawed */
1210 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1211 struct lockdep_map lock_map[SB_FREEZE_LEVELS];
1212 #endif
1213 };
1214
1215 struct super_block {
1216 struct list_head s_list; /* Keep this first */
1217 dev_t s_dev; /* search index; _not_ kdev_t */
1218 unsigned char s_blocksize_bits;
1219 unsigned long s_blocksize;
1220 loff_t s_maxbytes; /* Max file size */
1221 struct file_system_type *s_type;
1222 const struct super_operations *s_op;
1223 const struct dquot_operations *dq_op;
1224 const struct quotactl_ops *s_qcop;
1225 const struct export_operations *s_export_op;
1226 unsigned long s_flags;
1227 unsigned long s_magic;
1228 struct dentry *s_root;
1229 struct rw_semaphore s_umount;
1230 int s_count;
1231 atomic_t s_active;
1232 #ifdef CONFIG_SECURITY
1233 void *s_security;
1234 #endif
1235 const struct xattr_handler **s_xattr;
1236
1237 struct list_head s_inodes; /* all inodes */
1238 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1239 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1240 struct block_device *s_bdev;
1241 struct backing_dev_info *s_bdi;
1242 struct mtd_info *s_mtd;
1243 struct hlist_node s_instances;
1244 unsigned int s_quota_types; /* Bitmask of supported quota types */
1245 struct quota_info s_dquot; /* Diskquota specific options */
1246
1247 struct sb_writers s_writers;
1248
1249 char s_id[32]; /* Informational name */
1250 u8 s_uuid[16]; /* UUID */
1251
1252 void *s_fs_info; /* Filesystem private info */
1253 unsigned int s_max_links;
1254 fmode_t s_mode;
1255
1256 /* Granularity of c/m/atime in ns.
1257 Cannot be worse than a second */
1258 u32 s_time_gran;
1259
1260 /*
1261 * The next field is for VFS *only*. No filesystems have any business
1262 * even looking at it. You had been warned.
1263 */
1264 struct mutex s_vfs_rename_mutex; /* Kludge */
1265
1266 /*
1267 * Filesystem subtype. If non-empty the filesystem type field
1268 * in /proc/mounts will be "type.subtype"
1269 */
1270 char *s_subtype;
1271
1272 /*
1273 * Saved mount options for lazy filesystems using
1274 * generic_show_options()
1275 */
1276 char __rcu *s_options;
1277 const struct dentry_operations *s_d_op; /* default d_op for dentries */
1278
1279 /*
1280 * Saved pool identifier for cleancache (-1 means none)
1281 */
1282 int cleancache_poolid;
1283
1284 struct shrinker s_shrink; /* per-sb shrinker handle */
1285
1286 /* Number of inodes with nlink == 0 but still referenced */
1287 atomic_long_t s_remove_count;
1288
1289 /* Being remounted read-only */
1290 int s_readonly_remount;
1291
1292 /* AIO completions deferred from interrupt context */
1293 struct workqueue_struct *s_dio_done_wq;
1294 struct hlist_head s_pins;
1295
1296 /*
1297 * Keep the lru lists last in the structure so they always sit on their
1298 * own individual cachelines.
1299 */
1300 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1301 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1302 struct rcu_head rcu;
1303
1304 /*
1305 * Indicates how deep in a filesystem stack this SB is
1306 */
1307 int s_stack_depth;
1308 };
1309
1310 extern struct timespec current_fs_time(struct super_block *sb);
1311
1312 /*
1313 * Snapshotting support.
1314 */
1315
1316 void __sb_end_write(struct super_block *sb, int level);
1317 int __sb_start_write(struct super_block *sb, int level, bool wait);
1318
1319 /**
1320 * sb_end_write - drop write access to a superblock
1321 * @sb: the super we wrote to
1322 *
1323 * Decrement number of writers to the filesystem. Wake up possible waiters
1324 * wanting to freeze the filesystem.
1325 */
1326 static inline void sb_end_write(struct super_block *sb)
1327 {
1328 __sb_end_write(sb, SB_FREEZE_WRITE);
1329 }
1330
1331 /**
1332 * sb_end_pagefault - drop write access to a superblock from a page fault
1333 * @sb: the super we wrote to
1334 *
1335 * Decrement number of processes handling write page fault to the filesystem.
1336 * Wake up possible waiters wanting to freeze the filesystem.
1337 */
1338 static inline void sb_end_pagefault(struct super_block *sb)
1339 {
1340 __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
1341 }
1342
1343 /**
1344 * sb_end_intwrite - drop write access to a superblock for internal fs purposes
1345 * @sb: the super we wrote to
1346 *
1347 * Decrement fs-internal number of writers to the filesystem. Wake up possible
1348 * waiters wanting to freeze the filesystem.
1349 */
1350 static inline void sb_end_intwrite(struct super_block *sb)
1351 {
1352 __sb_end_write(sb, SB_FREEZE_FS);
1353 }
1354
1355 /**
1356 * sb_start_write - get write access to a superblock
1357 * @sb: the super we write to
1358 *
1359 * When a process wants to write data or metadata to a file system (i.e. dirty
1360 * a page or an inode), it should embed the operation in a sb_start_write() -
1361 * sb_end_write() pair to get exclusion against file system freezing. This
1362 * function increments number of writers preventing freezing. If the file
1363 * system is already frozen, the function waits until the file system is
1364 * thawed.
1365 *
1366 * Since freeze protection behaves as a lock, users have to preserve
1367 * ordering of freeze protection and other filesystem locks. Generally,
1368 * freeze protection should be the outermost lock. In particular, we have:
1369 *
1370 * sb_start_write
1371 * -> i_mutex (write path, truncate, directory ops, ...)
1372 * -> s_umount (freeze_super, thaw_super)
1373 */
1374 static inline void sb_start_write(struct super_block *sb)
1375 {
1376 __sb_start_write(sb, SB_FREEZE_WRITE, true);
1377 }
1378
1379 static inline int sb_start_write_trylock(struct super_block *sb)
1380 {
1381 return __sb_start_write(sb, SB_FREEZE_WRITE, false);
1382 }
1383
1384 /**
1385 * sb_start_pagefault - get write access to a superblock from a page fault
1386 * @sb: the super we write to
1387 *
1388 * When a process starts handling write page fault, it should embed the
1389 * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
1390 * exclusion against file system freezing. This is needed since the page fault
1391 * is going to dirty a page. This function increments number of running page
1392 * faults preventing freezing. If the file system is already frozen, the
1393 * function waits until the file system is thawed.
1394 *
1395 * Since page fault freeze protection behaves as a lock, users have to preserve
1396 * ordering of freeze protection and other filesystem locks. It is advised to
1397 * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
1398 * handling code implies lock dependency:
1399 *
1400 * mmap_sem
1401 * -> sb_start_pagefault
1402 */
1403 static inline void sb_start_pagefault(struct super_block *sb)
1404 {
1405 __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
1406 }
1407
1408 /*
1409 * sb_start_intwrite - get write access to a superblock for internal fs purposes
1410 * @sb: the super we write to
1411 *
1412 * This is the third level of protection against filesystem freezing. It is
1413 * free for use by a filesystem. The only requirement is that it must rank
1414 * below sb_start_pagefault.
1415 *
1416 * For example filesystem can call sb_start_intwrite() when starting a
1417 * transaction which somewhat eases handling of freezing for internal sources
1418 * of filesystem changes (internal fs threads, discarding preallocation on file
1419 * close, etc.).
1420 */
1421 static inline void sb_start_intwrite(struct super_block *sb)
1422 {
1423 __sb_start_write(sb, SB_FREEZE_FS, true);
1424 }
1425
1426
1427 extern bool inode_owner_or_capable(const struct inode *inode);
1428
1429 /*
1430 * VFS helper functions..
1431 */
1432 extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
1433 extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
1434 extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
1435 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
1436 extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
1437 extern int vfs_rmdir(struct inode *, struct dentry *);
1438 extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1439 extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
1440 extern int vfs_whiteout(struct inode *, struct dentry *);
1441
1442 /*
1443 * VFS dentry helper functions.
1444 */
1445 extern void dentry_unhash(struct dentry *dentry);
1446
1447 /*
1448 * VFS file helper functions.
1449 */
1450 extern void inode_init_owner(struct inode *inode, const struct inode *dir,
1451 umode_t mode);
1452 /*
1453 * VFS FS_IOC_FIEMAP helper definitions.
1454 */
1455 struct fiemap_extent_info {
1456 unsigned int fi_flags; /* Flags as passed from user */
1457 unsigned int fi_extents_mapped; /* Number of mapped extents */
1458 unsigned int fi_extents_max; /* Size of fiemap_extent array */
1459 struct fiemap_extent __user *fi_extents_start; /* Start of
1460 fiemap_extent array */
1461 };
1462 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
1463 u64 phys, u64 len, u32 flags);
1464 int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
1465
1466 /*
1467 * File types
1468 *
1469 * NOTE! These match bits 12..15 of stat.st_mode
1470 * (ie "(i_mode >> 12) & 15").
1471 */
1472 #define DT_UNKNOWN 0
1473 #define DT_FIFO 1
1474 #define DT_CHR 2
1475 #define DT_DIR 4
1476 #define DT_BLK 6
1477 #define DT_REG 8
1478 #define DT_LNK 10
1479 #define DT_SOCK 12
1480 #define DT_WHT 14
1481
1482 /*
1483 * This is the "filldir" function type, used by readdir() to let
1484 * the kernel specify what kind of dirent layout it wants to have.
1485 * This allows the kernel to read directories into kernel space or
1486 * to have different dirent layouts depending on the binary type.
1487 */
1488 struct dir_context;
1489 typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
1490 unsigned);
1491
1492 struct dir_context {
1493 const filldir_t actor;
1494 loff_t pos;
1495 };
1496
1497 struct block_device_operations;
1498
1499 /* These macros are for out of kernel modules to test that
1500 * the kernel supports the unlocked_ioctl and compat_ioctl
1501 * fields in struct file_operations. */
1502 #define HAVE_COMPAT_IOCTL 1
1503 #define HAVE_UNLOCKED_IOCTL 1
1504
1505 struct iov_iter;
1506
1507 struct file_operations {
1508 struct module *owner;
1509 loff_t (*llseek) (struct file *, loff_t, int);
1510 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
1511 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
1512 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
1513 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
1514 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
1515 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
1516 int (*iterate) (struct file *, struct dir_context *);
1517 unsigned int (*poll) (struct file *, struct poll_table_struct *);
1518 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1519 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1520 int (*mmap) (struct file *, struct vm_area_struct *);
1521 int (*open) (struct inode *, struct file *);
1522 int (*flush) (struct file *, fl_owner_t id);
1523 int (*release) (struct inode *, struct file *);
1524 int (*fsync) (struct file *, loff_t, loff_t, int datasync);
1525 int (*aio_fsync) (struct kiocb *, int datasync);
1526 int (*fasync) (int, struct file *, int);
1527 int (*lock) (struct file *, int, struct file_lock *);
1528 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
1529 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1530 int (*check_flags)(int);
1531 int (*flock) (struct file *, int, struct file_lock *);
1532 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1533 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1534 int (*setlease)(struct file *, long, struct file_lock **, void **);
1535 long (*fallocate)(struct file *file, int mode, loff_t offset,
1536 loff_t len);
1537 void (*show_fdinfo)(struct seq_file *m, struct file *f);
1538 };
1539
1540 struct inode_operations {
1541 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
1542 void * (*follow_link) (struct dentry *, struct nameidata *);
1543 int (*permission) (struct inode *, int);
1544 struct posix_acl * (*get_acl)(struct inode *, int);
1545
1546 int (*readlink) (struct dentry *, char __user *,int);
1547 void (*put_link) (struct dentry *, struct nameidata *, void *);
1548
1549 int (*create) (struct inode *,struct dentry *, umode_t, bool);
1550 int (*link) (struct dentry *,struct inode *,struct dentry *);
1551 int (*unlink) (struct inode *,struct dentry *);
1552 int (*symlink) (struct inode *,struct dentry *,const char *);
1553 int (*mkdir) (struct inode *,struct dentry *,umode_t);
1554 int (*rmdir) (struct inode *,struct dentry *);
1555 int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
1556 int (*rename) (struct inode *, struct dentry *,
1557 struct inode *, struct dentry *);
1558 int (*rename2) (struct inode *, struct dentry *,
1559 struct inode *, struct dentry *, unsigned int);
1560 int (*setattr) (struct dentry *, struct iattr *);
1561 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
1562 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
1563 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
1564 ssize_t (*listxattr) (struct dentry *, char *, size_t);
1565 int (*removexattr) (struct dentry *, const char *);
1566 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1567 u64 len);
1568 int (*update_time)(struct inode *, struct timespec *, int);
1569 int (*atomic_open)(struct inode *, struct dentry *,
1570 struct file *, unsigned open_flag,
1571 umode_t create_mode, int *opened);
1572 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1573 int (*set_acl)(struct inode *, struct posix_acl *, int);
1574
1575 /* WARNING: probably going away soon, do not use! */
1576 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
1577 } ____cacheline_aligned;
1578
1579 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
1580 unsigned long nr_segs, unsigned long fast_segs,
1581 struct iovec *fast_pointer,
1582 struct iovec **ret_pointer);
1583
1584 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
1585 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
1586 extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
1587 unsigned long, loff_t *);
1588 extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
1589 unsigned long, loff_t *);
1590
1591 struct super_operations {
1592 struct inode *(*alloc_inode)(struct super_block *sb);
1593 void (*destroy_inode)(struct inode *);
1594
1595 void (*dirty_inode) (struct inode *, int flags);
1596 int (*write_inode) (struct inode *, struct writeback_control *wbc);
1597 int (*drop_inode) (struct inode *);
1598 void (*evict_inode) (struct inode *);
1599 void (*put_super) (struct super_block *);
1600 int (*sync_fs)(struct super_block *sb, int wait);
1601 int (*freeze_super) (struct super_block *);
1602 int (*freeze_fs) (struct super_block *);
1603 int (*thaw_super) (struct super_block *);
1604 int (*unfreeze_fs) (struct super_block *);
1605 int (*statfs) (struct dentry *, struct kstatfs *);
1606 int (*remount_fs) (struct super_block *, int *, char *);
1607 void (*umount_begin) (struct super_block *);
1608
1609 int (*show_options)(struct seq_file *, struct dentry *);
1610 int (*show_devname)(struct seq_file *, struct dentry *);
1611 int (*show_path)(struct seq_file *, struct dentry *);
1612 int (*show_stats)(struct seq_file *, struct dentry *);
1613 #ifdef CONFIG_QUOTA
1614 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
1615 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
1616 struct dquot **(*get_dquots)(struct inode *);
1617 #endif
1618 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1619 long (*nr_cached_objects)(struct super_block *, int);
1620 long (*free_cached_objects)(struct super_block *, long, int);
1621 };
1622
1623 /*
1624 * Inode flags - they have no relation to superblock flags now
1625 */
1626 #define S_SYNC 1 /* Writes are synced at once */
1627 #define S_NOATIME 2 /* Do not update access times */
1628 #define S_APPEND 4 /* Append-only file */
1629 #define S_IMMUTABLE 8 /* Immutable file */
1630 #define S_DEAD 16 /* removed, but still open directory */
1631 #define S_NOQUOTA 32 /* Inode is not counted to quota */
1632 #define S_DIRSYNC 64 /* Directory modifications are synchronous */
1633 #define S_NOCMTIME 128 /* Do not update file c/mtime */
1634 #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
1635 #define S_PRIVATE 512 /* Inode is fs-internal */
1636 #define S_IMA 1024 /* Inode has an associated IMA struct */
1637 #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
1638 #define S_NOSEC 4096 /* no suid or xattr security attributes */
1639
1640 /*
1641 * Note that nosuid etc flags are inode-specific: setting some file-system
1642 * flags just means all the inodes inherit those flags by default. It might be
1643 * possible to override it selectively if you really wanted to with some
1644 * ioctl() that is not currently implemented.
1645 *
1646 * Exception: MS_RDONLY is always applied to the entire file system.
1647 *
1648 * Unfortunately, it is possible to change a filesystems flags with it mounted
1649 * with files in use. This means that all of the inodes will not have their
1650 * i_flags updated. Hence, i_flags no longer inherit the superblock mount
1651 * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
1652 */
1653 #define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
1654
1655 #define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
1656 #define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
1657 ((inode)->i_flags & S_SYNC))
1658 #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
1659 ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
1660 #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
1661 #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
1662 #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
1663
1664 #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
1665 #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
1666 #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
1667 #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
1668
1669 #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
1670 #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
1671 #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
1672 #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
1673 #define IS_IMA(inode) ((inode)->i_flags & S_IMA)
1674 #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
1675 #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
1676
1677 #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
1678 (inode)->i_rdev == WHITEOUT_DEV)
1679
1680 /*
1681 * Inode state bits. Protected by inode->i_lock
1682 *
1683 * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
1684 * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
1685 *
1686 * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
1687 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
1688 * various stages of removing an inode.
1689 *
1690 * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
1691 *
1692 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
1693 * fdatasync(). i_atime is the usual cause.
1694 * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
1695 * these changes separately from I_DIRTY_SYNC so that we
1696 * don't have to write inode on fdatasync() when only
1697 * mtime has changed in it.
1698 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
1699 * I_NEW Serves as both a mutex and completion notification.
1700 * New inodes set I_NEW. If two processes both create
1701 * the same inode, one of them will release its inode and
1702 * wait for I_NEW to be released before returning.
1703 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1704 * also cause waiting on I_NEW, without I_NEW actually
1705 * being set. find_inode() uses this to prevent returning
1706 * nearly-dead inodes.
1707 * I_WILL_FREE Must be set when calling write_inode_now() if i_count
1708 * is zero. I_FREEING must be set when I_WILL_FREE is
1709 * cleared.
1710 * I_FREEING Set when inode is about to be freed but still has dirty
1711 * pages or buffers attached or the inode itself is still
1712 * dirty.
1713 * I_CLEAR Added by clear_inode(). In this state the inode is
1714 * clean and can be destroyed. Inode keeps I_FREEING.
1715 *
1716 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
1717 * prohibited for many purposes. iget() must wait for
1718 * the inode to be completely released, then create it
1719 * anew. Other functions will just ignore such inodes,
1720 * if appropriate. I_NEW is used for waiting.
1721 *
1722 * I_SYNC Writeback of inode is running. The bit is set during
1723 * data writeback, and cleared with a wakeup on the bit
1724 * address once it is done. The bit is also used to pin
1725 * the inode in memory for flusher thread.
1726 *
1727 * I_REFERENCED Marks the inode as recently references on the LRU list.
1728 *
1729 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
1730 *
1731 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1732 */
1733 #define I_DIRTY_SYNC (1 << 0)
1734 #define I_DIRTY_DATASYNC (1 << 1)
1735 #define I_DIRTY_PAGES (1 << 2)
1736 #define __I_NEW 3
1737 #define I_NEW (1 << __I_NEW)
1738 #define I_WILL_FREE (1 << 4)
1739 #define I_FREEING (1 << 5)
1740 #define I_CLEAR (1 << 6)
1741 #define __I_SYNC 7
1742 #define I_SYNC (1 << __I_SYNC)
1743 #define I_REFERENCED (1 << 8)
1744 #define __I_DIO_WAKEUP 9
1745 #define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
1746 #define I_LINKABLE (1 << 10)
1747
1748 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
1749
1750 extern void __mark_inode_dirty(struct inode *, int);
1751 static inline void mark_inode_dirty(struct inode *inode)
1752 {
1753 __mark_inode_dirty(inode, I_DIRTY);
1754 }
1755
1756 static inline void mark_inode_dirty_sync(struct inode *inode)
1757 {
1758 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1759 }
1760
1761 extern void inc_nlink(struct inode *inode);
1762 extern void drop_nlink(struct inode *inode);
1763 extern void clear_nlink(struct inode *inode);
1764 extern void set_nlink(struct inode *inode, unsigned int nlink);
1765
1766 static inline void inode_inc_link_count(struct inode *inode)
1767 {
1768 inc_nlink(inode);
1769 mark_inode_dirty(inode);
1770 }
1771
1772 static inline void inode_dec_link_count(struct inode *inode)
1773 {
1774 drop_nlink(inode);
1775 mark_inode_dirty(inode);
1776 }
1777
1778 /**
1779 * inode_inc_iversion - increments i_version
1780 * @inode: inode that need to be updated
1781 *
1782 * Every time the inode is modified, the i_version field will be incremented.
1783 * The filesystem has to be mounted with i_version flag
1784 */
1785
1786 static inline void inode_inc_iversion(struct inode *inode)
1787 {
1788 spin_lock(&inode->i_lock);
1789 inode->i_version++;
1790 spin_unlock(&inode->i_lock);
1791 }
1792
1793 enum file_time_flags {
1794 S_ATIME = 1,
1795 S_MTIME = 2,
1796 S_CTIME = 4,
1797 S_VERSION = 8,
1798 };
1799
1800 extern void touch_atime(const struct path *);
1801 static inline void file_accessed(struct file *file)
1802 {
1803 if (!(file->f_flags & O_NOATIME))
1804 touch_atime(&file->f_path);
1805 }
1806
1807 int sync_inode(struct inode *inode, struct writeback_control *wbc);
1808 int sync_inode_metadata(struct inode *inode, int wait);
1809
1810 struct file_system_type {
1811 const char *name;
1812 int fs_flags;
1813 #define FS_REQUIRES_DEV 1
1814 #define FS_BINARY_MOUNTDATA 2
1815 #define FS_HAS_SUBTYPE 4
1816 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
1817 #define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
1818 #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
1819 struct dentry *(*mount) (struct file_system_type *, int,
1820 const char *, void *);
1821 void (*kill_sb) (struct super_block *);
1822 struct module *owner;
1823 struct file_system_type * next;
1824 struct hlist_head fs_supers;
1825
1826 struct lock_class_key s_lock_key;
1827 struct lock_class_key s_umount_key;
1828 struct lock_class_key s_vfs_rename_key;
1829 struct lock_class_key s_writers_key[SB_FREEZE_LEVELS];
1830
1831 struct lock_class_key i_lock_key;
1832 struct lock_class_key i_mutex_key;
1833 struct lock_class_key i_mutex_dir_key;
1834 };
1835
1836 #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
1837
1838 extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
1839 void *data, int (*fill_super)(struct super_block *, void *, int));
1840 extern struct dentry *mount_bdev(struct file_system_type *fs_type,
1841 int flags, const char *dev_name, void *data,
1842 int (*fill_super)(struct super_block *, void *, int));
1843 extern struct dentry *mount_single(struct file_system_type *fs_type,
1844 int flags, void *data,
1845 int (*fill_super)(struct super_block *, void *, int));
1846 extern struct dentry *mount_nodev(struct file_system_type *fs_type,
1847 int flags, void *data,
1848 int (*fill_super)(struct super_block *, void *, int));
1849 extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
1850 void generic_shutdown_super(struct super_block *sb);
1851 void kill_block_super(struct super_block *sb);
1852 void kill_anon_super(struct super_block *sb);
1853 void kill_litter_super(struct super_block *sb);
1854 void deactivate_super(struct super_block *sb);
1855 void deactivate_locked_super(struct super_block *sb);
1856 int set_anon_super(struct super_block *s, void *data);
1857 int get_anon_bdev(dev_t *);
1858 void free_anon_bdev(dev_t);
1859 struct super_block *sget(struct file_system_type *type,
1860 int (*test)(struct super_block *,void *),
1861 int (*set)(struct super_block *,void *),
1862 int flags, void *data);
1863 extern struct dentry *mount_pseudo(struct file_system_type *, char *,
1864 const struct super_operations *ops,
1865 const struct dentry_operations *dops,
1866 unsigned long);
1867
1868 /* Alas, no aliases. Too much hassle with bringing module.h everywhere */
1869 #define fops_get(fops) \
1870 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
1871 #define fops_put(fops) \
1872 do { if (fops) module_put((fops)->owner); } while(0)
1873 /*
1874 * This one is to be used *ONLY* from ->open() instances.
1875 * fops must be non-NULL, pinned down *and* module dependencies
1876 * should be sufficient to pin the caller down as well.
1877 */
1878 #define replace_fops(f, fops) \
1879 do { \
1880 struct file *__file = (f); \
1881 fops_put(__file->f_op); \
1882 BUG_ON(!(__file->f_op = (fops))); \
1883 } while(0)
1884
1885 extern int register_filesystem(struct file_system_type *);
1886 extern int unregister_filesystem(struct file_system_type *);
1887 extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
1888 #define kern_mount(type) kern_mount_data(type, NULL)
1889 extern void kern_unmount(struct vfsmount *mnt);
1890 extern int may_umount_tree(struct vfsmount *);
1891 extern int may_umount(struct vfsmount *);
1892 extern long do_mount(const char *, const char __user *,
1893 const char *, unsigned long, void *);
1894 extern struct vfsmount *collect_mounts(struct path *);
1895 extern void drop_collected_mounts(struct vfsmount *);
1896 extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
1897 struct vfsmount *);
1898 extern int vfs_statfs(struct path *, struct kstatfs *);
1899 extern int user_statfs(const char __user *, struct kstatfs *);
1900 extern int fd_statfs(int, struct kstatfs *);
1901 extern int vfs_ustat(dev_t, struct kstatfs *);
1902 extern int freeze_super(struct super_block *super);
1903 extern int thaw_super(struct super_block *super);
1904 extern bool our_mnt(struct vfsmount *mnt);
1905 extern bool fs_fully_visible(struct file_system_type *);
1906
1907 extern int current_umask(void);
1908
1909 extern void ihold(struct inode * inode);
1910 extern void iput(struct inode *);
1911
1912 static inline struct inode *file_inode(const struct file *f)
1913 {
1914 return f->f_inode;
1915 }
1916
1917 /* /sys/fs */
1918 extern struct kobject *fs_kobj;
1919
1920 #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
1921
1922 #define FLOCK_VERIFY_READ 1
1923 #define FLOCK_VERIFY_WRITE 2
1924
1925 #ifdef CONFIG_FILE_LOCKING
1926 extern int locks_mandatory_locked(struct file *);
1927 extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
1928
1929 /*
1930 * Candidates for mandatory locking have the setgid bit set
1931 * but no group execute bit - an otherwise meaningless combination.
1932 */
1933
1934 static inline int __mandatory_lock(struct inode *ino)
1935 {
1936 return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
1937 }
1938
1939 /*
1940 * ... and these candidates should be on MS_MANDLOCK mounted fs,
1941 * otherwise these will be advisory locks
1942 */
1943
1944 static inline int mandatory_lock(struct inode *ino)
1945 {
1946 return IS_MANDLOCK(ino) && __mandatory_lock(ino);
1947 }
1948
1949 static inline int locks_verify_locked(struct file *file)
1950 {
1951 if (mandatory_lock(file_inode(file)))
1952 return locks_mandatory_locked(file);
1953 return 0;
1954 }
1955
1956 static inline int locks_verify_truncate(struct inode *inode,
1957 struct file *filp,
1958 loff_t size)
1959 {
1960 if (inode->i_flock && mandatory_lock(inode))
1961 return locks_mandatory_area(
1962 FLOCK_VERIFY_WRITE, inode, filp,
1963 size < inode->i_size ? size : inode->i_size,
1964 (size < inode->i_size ? inode->i_size - size
1965 : size - inode->i_size)
1966 );
1967 return 0;
1968 }
1969
1970 static inline int break_lease(struct inode *inode, unsigned int mode)
1971 {
1972 /*
1973 * Since this check is lockless, we must ensure that any refcounts
1974 * taken are done before checking inode->i_flock. Otherwise, we could
1975 * end up racing with tasks trying to set a new lease on this file.
1976 */
1977 smp_mb();
1978 if (inode->i_flock)
1979 return __break_lease(inode, mode, FL_LEASE);
1980 return 0;
1981 }
1982
1983 static inline int break_deleg(struct inode *inode, unsigned int mode)
1984 {
1985 /*
1986 * Since this check is lockless, we must ensure that any refcounts
1987 * taken are done before checking inode->i_flock. Otherwise, we could
1988 * end up racing with tasks trying to set a new lease on this file.
1989 */
1990 smp_mb();
1991 if (inode->i_flock)
1992 return __break_lease(inode, mode, FL_DELEG);
1993 return 0;
1994 }
1995
1996 static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
1997 {
1998 int ret;
1999
2000 ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
2001 if (ret == -EWOULDBLOCK && delegated_inode) {
2002 *delegated_inode = inode;
2003 ihold(inode);
2004 }
2005 return ret;
2006 }
2007
2008 static inline int break_deleg_wait(struct inode **delegated_inode)
2009 {
2010 int ret;
2011
2012 ret = break_deleg(*delegated_inode, O_WRONLY);
2013 iput(*delegated_inode);
2014 *delegated_inode = NULL;
2015 return ret;
2016 }
2017
2018 #else /* !CONFIG_FILE_LOCKING */
2019 static inline int locks_mandatory_locked(struct file *file)
2020 {
2021 return 0;
2022 }
2023
2024 static inline int locks_mandatory_area(int rw, struct inode *inode,
2025 struct file *filp, loff_t offset,
2026 size_t count)
2027 {
2028 return 0;
2029 }
2030
2031 static inline int __mandatory_lock(struct inode *inode)
2032 {
2033 return 0;
2034 }
2035
2036 static inline int mandatory_lock(struct inode *inode)
2037 {
2038 return 0;
2039 }
2040
2041 static inline int locks_verify_locked(struct file *file)
2042 {
2043 return 0;
2044 }
2045
2046 static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
2047 size_t size)
2048 {
2049 return 0;
2050 }
2051
2052 static inline int break_lease(struct inode *inode, unsigned int mode)
2053 {
2054 return 0;
2055 }
2056
2057 static inline int break_deleg(struct inode *inode, unsigned int mode)
2058 {
2059 return 0;
2060 }
2061
2062 static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
2063 {
2064 return 0;
2065 }
2066
2067 static inline int break_deleg_wait(struct inode **delegated_inode)
2068 {
2069 BUG();
2070 return 0;
2071 }
2072
2073 #endif /* CONFIG_FILE_LOCKING */
2074
2075 /* fs/open.c */
2076 struct audit_names;
2077 struct filename {
2078 const char *name; /* pointer to actual string */
2079 const __user char *uptr; /* original userland pointer */
2080 struct audit_names *aname;
2081 bool separate; /* should "name" be freed? */
2082 };
2083
2084 extern long vfs_truncate(struct path *, loff_t);
2085 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
2086 struct file *filp);
2087 extern int do_fallocate(struct file *file, int mode, loff_t offset,
2088 loff_t len);
2089 extern long do_sys_open(int dfd, const char __user *filename, int flags,
2090 umode_t mode);
2091 extern struct file *file_open_name(struct filename *, int, umode_t);
2092 extern struct file *filp_open(const char *, int, umode_t);
2093 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2094 const char *, int);
2095 extern int vfs_open(const struct path *, struct file *, const struct cred *);
2096 extern struct file * dentry_open(const struct path *, int, const struct cred *);
2097 extern int filp_close(struct file *, fl_owner_t id);
2098
2099 extern struct filename *getname(const char __user *);
2100 extern struct filename *getname_kernel(const char *);
2101
2102 enum {
2103 FILE_CREATED = 1,
2104 FILE_OPENED = 2
2105 };
2106 extern int finish_open(struct file *file, struct dentry *dentry,
2107 int (*open)(struct inode *, struct file *),
2108 int *opened);
2109 extern int finish_no_open(struct file *file, struct dentry *dentry);
2110
2111 /* fs/ioctl.c */
2112
2113 extern int ioctl_preallocate(struct file *filp, void __user *argp);
2114
2115 /* fs/dcache.c */
2116 extern void __init vfs_caches_init_early(void);
2117 extern void __init vfs_caches_init(unsigned long);
2118
2119 extern struct kmem_cache *names_cachep;
2120
2121 extern void final_putname(struct filename *name);
2122
2123 #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
2124 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
2125 #ifndef CONFIG_AUDITSYSCALL
2126 #define putname(name) final_putname(name)
2127 #else
2128 extern void putname(struct filename *name);
2129 #endif
2130
2131 #ifdef CONFIG_BLOCK
2132 extern int register_blkdev(unsigned int, const char *);
2133 extern void unregister_blkdev(unsigned int, const char *);
2134 extern struct block_device *bdget(dev_t);
2135 extern struct block_device *bdgrab(struct block_device *bdev);
2136 extern void bd_set_size(struct block_device *, loff_t size);
2137 extern void bd_forget(struct inode *inode);
2138 extern void bdput(struct block_device *);
2139 extern void invalidate_bdev(struct block_device *);
2140 extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
2141 extern int sync_blockdev(struct block_device *bdev);
2142 extern void kill_bdev(struct block_device *);
2143 extern struct super_block *freeze_bdev(struct block_device *);
2144 extern void emergency_thaw_all(void);
2145 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
2146 extern int fsync_bdev(struct block_device *);
2147 extern int sb_is_blkdev_sb(struct super_block *sb);
2148 #else
2149 static inline void bd_forget(struct inode *inode) {}
2150 static inline int sync_blockdev(struct block_device *bdev) { return 0; }
2151 static inline void kill_bdev(struct block_device *bdev) {}
2152 static inline void invalidate_bdev(struct block_device *bdev) {}
2153
2154 static inline struct super_block *freeze_bdev(struct block_device *sb)
2155 {
2156 return NULL;
2157 }
2158
2159 static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
2160 {
2161 return 0;
2162 }
2163
2164 static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
2165 {
2166 }
2167
2168 static inline int sb_is_blkdev_sb(struct super_block *sb)
2169 {
2170 return 0;
2171 }
2172 #endif
2173 extern int sync_filesystem(struct super_block *);
2174 extern const struct file_operations def_blk_fops;
2175 extern const struct file_operations def_chr_fops;
2176 extern const struct file_operations bad_sock_fops;
2177 #ifdef CONFIG_BLOCK
2178 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
2179 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
2180 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
2181 extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
2182 extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
2183 void *holder);
2184 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
2185 void *holder);
2186 extern void blkdev_put(struct block_device *bdev, fmode_t mode);
2187 #ifdef CONFIG_SYSFS
2188 extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
2189 extern void bd_unlink_disk_holder(struct block_device *bdev,
2190 struct gendisk *disk);
2191 #else
2192 static inline int bd_link_disk_holder(struct block_device *bdev,
2193 struct gendisk *disk)
2194 {
2195 return 0;
2196 }
2197 static inline void bd_unlink_disk_holder(struct block_device *bdev,
2198 struct gendisk *disk)
2199 {
2200 }
2201 #endif
2202 #endif
2203
2204 /* fs/char_dev.c */
2205 #define CHRDEV_MAJOR_HASH_SIZE 255
2206 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
2207 extern int register_chrdev_region(dev_t, unsigned, const char *);
2208 extern int __register_chrdev(unsigned int major, unsigned int baseminor,
2209 unsigned int count, const char *name,
2210 const struct file_operations *fops);
2211 extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
2212 unsigned int count, const char *name);
2213 extern void unregister_chrdev_region(dev_t, unsigned);
2214 extern void chrdev_show(struct seq_file *,off_t);
2215
2216 static inline int register_chrdev(unsigned int major, const char *name,
2217 const struct file_operations *fops)
2218 {
2219 return __register_chrdev(major, 0, 256, name, fops);
2220 }
2221
2222 static inline void unregister_chrdev(unsigned int major, const char *name)
2223 {
2224 __unregister_chrdev(major, 0, 256, name);
2225 }
2226
2227 /* fs/block_dev.c */
2228 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
2229 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
2230
2231 #ifdef CONFIG_BLOCK
2232 #define BLKDEV_MAJOR_HASH_SIZE 255
2233 extern const char *__bdevname(dev_t, char *buffer);
2234 extern const char *bdevname(struct block_device *bdev, char *buffer);
2235 extern struct block_device *lookup_bdev(const char *);
2236 extern void blkdev_show(struct seq_file *,off_t);
2237
2238 #else
2239 #define BLKDEV_MAJOR_HASH_SIZE 0
2240 #endif
2241
2242 extern void init_special_inode(struct inode *, umode_t, dev_t);
2243
2244 /* Invalid inode operations -- fs/bad_inode.c */
2245 extern void make_bad_inode(struct inode *);
2246 extern int is_bad_inode(struct inode *);
2247
2248 #ifdef CONFIG_BLOCK
2249 /*
2250 * return READ, READA, or WRITE
2251 */
2252 #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK))
2253
2254 /*
2255 * return data direction, READ or WRITE
2256 */
2257 #define bio_data_dir(bio) ((bio)->bi_rw & 1)
2258
2259 extern void check_disk_size_change(struct gendisk *disk,
2260 struct block_device *bdev);
2261 extern int revalidate_disk(struct gendisk *);
2262 extern int check_disk_change(struct block_device *);
2263 extern int __invalidate_device(struct block_device *, bool);
2264 extern int invalidate_partition(struct gendisk *, int);
2265 #endif
2266 unsigned long invalidate_mapping_pages(struct address_space *mapping,
2267 pgoff_t start, pgoff_t end);
2268
2269 static inline void invalidate_remote_inode(struct inode *inode)
2270 {
2271 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2272 S_ISLNK(inode->i_mode))
2273 invalidate_mapping_pages(inode->i_mapping, 0, -1);
2274 }
2275 extern int invalidate_inode_pages2(struct address_space *mapping);
2276 extern int invalidate_inode_pages2_range(struct address_space *mapping,
2277 pgoff_t start, pgoff_t end);
2278 extern int write_inode_now(struct inode *, int);
2279 extern int filemap_fdatawrite(struct address_space *);
2280 extern int filemap_flush(struct address_space *);
2281 extern int filemap_fdatawait(struct address_space *);
2282 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
2283 loff_t lend);
2284 extern int filemap_write_and_wait(struct address_space *mapping);
2285 extern int filemap_write_and_wait_range(struct address_space *mapping,
2286 loff_t lstart, loff_t lend);
2287 extern int __filemap_fdatawrite_range(struct address_space *mapping,
2288 loff_t start, loff_t end, int sync_mode);
2289 extern int filemap_fdatawrite_range(struct address_space *mapping,
2290 loff_t start, loff_t end);
2291
2292 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
2293 int datasync);
2294 extern int vfs_fsync(struct file *file, int datasync);
2295 static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
2296 {
2297 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
2298 return 0;
2299 return vfs_fsync_range(file, pos, pos + count - 1,
2300 (file->f_flags & __O_SYNC) ? 0 : 1);
2301 }
2302 extern void emergency_sync(void);
2303 extern void emergency_remount(void);
2304 #ifdef CONFIG_BLOCK
2305 extern sector_t bmap(struct inode *, sector_t);
2306 #endif
2307 extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2308 extern int inode_permission(struct inode *, int);
2309 extern int __inode_permission(struct inode *, int);
2310 extern int generic_permission(struct inode *, int);
2311 extern int __check_sticky(struct inode *dir, struct inode *inode);
2312
2313 static inline bool execute_ok(struct inode *inode)
2314 {
2315 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
2316 }
2317
2318 static inline void file_start_write(struct file *file)
2319 {
2320 if (!S_ISREG(file_inode(file)->i_mode))
2321 return;
2322 __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
2323 }
2324
2325 static inline bool file_start_write_trylock(struct file *file)
2326 {
2327 if (!S_ISREG(file_inode(file)->i_mode))
2328 return true;
2329 return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
2330 }
2331
2332 static inline void file_end_write(struct file *file)
2333 {
2334 if (!S_ISREG(file_inode(file)->i_mode))
2335 return;
2336 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
2337 }
2338
2339 /*
2340 * get_write_access() gets write permission for a file.
2341 * put_write_access() releases this write permission.
2342 * This is used for regular files.
2343 * We cannot support write (and maybe mmap read-write shared) accesses and
2344 * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
2345 * can have the following values:
2346 * 0: no writers, no VM_DENYWRITE mappings
2347 * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
2348 * > 0: (i_writecount) users are writing to the file.
2349 *
2350 * Normally we operate on that counter with atomic_{inc,dec} and it's safe
2351 * except for the cases where we don't hold i_writecount yet. Then we need to
2352 * use {get,deny}_write_access() - these functions check the sign and refuse
2353 * to do the change if sign is wrong.
2354 */
2355 static inline int get_write_access(struct inode *inode)
2356 {
2357 return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY;
2358 }
2359 static inline int deny_write_access(struct file *file)
2360 {
2361 struct inode *inode = file_inode(file);
2362 return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
2363 }
2364 static inline void put_write_access(struct inode * inode)
2365 {
2366 atomic_dec(&inode->i_writecount);
2367 }
2368 static inline void allow_write_access(struct file *file)
2369 {
2370 if (file)
2371 atomic_inc(&file_inode(file)->i_writecount);
2372 }
2373 static inline bool inode_is_open_for_write(const struct inode *inode)
2374 {
2375 return atomic_read(&inode->i_writecount) > 0;
2376 }
2377
2378 #ifdef CONFIG_IMA
2379 static inline void i_readcount_dec(struct inode *inode)
2380 {
2381 BUG_ON(!atomic_read(&inode->i_readcount));
2382 atomic_dec(&inode->i_readcount);
2383 }
2384 static inline void i_readcount_inc(struct inode *inode)
2385 {
2386 atomic_inc(&inode->i_readcount);
2387 }
2388 #else
2389 static inline void i_readcount_dec(struct inode *inode)
2390 {
2391 return;
2392 }
2393 static inline void i_readcount_inc(struct inode *inode)
2394 {
2395 return;
2396 }
2397 #endif
2398 extern int do_pipe_flags(int *, int);
2399
2400 extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2401 extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
2402 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
2403 extern struct file * open_exec(const char *);
2404
2405 /* fs/dcache.c -- generic fs support functions */
2406 extern int is_subdir(struct dentry *, struct dentry *);
2407 extern int path_is_under(struct path *, struct path *);
2408
2409 #include <linux/err.h>
2410
2411 /* needed for stackable file system support */
2412 extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
2413
2414 extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
2415
2416 extern int inode_init_always(struct super_block *, struct inode *);
2417 extern void inode_init_once(struct inode *);
2418 extern void address_space_init_once(struct address_space *mapping);
2419 extern struct inode * igrab(struct inode *);
2420 extern ino_t iunique(struct super_block *, ino_t);
2421 extern int inode_needs_sync(struct inode *inode);
2422 extern int generic_delete_inode(struct inode *inode);
2423 static inline int generic_drop_inode(struct inode *inode)
2424 {
2425 return !inode->i_nlink || inode_unhashed(inode);
2426 }
2427
2428 extern struct inode *ilookup5_nowait(struct super_block *sb,
2429 unsigned long hashval, int (*test)(struct inode *, void *),
2430 void *data);
2431 extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
2432 int (*test)(struct inode *, void *), void *data);
2433 extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
2434
2435 extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
2436 extern struct inode * iget_locked(struct super_block *, unsigned long);
2437 extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
2438 extern int insert_inode_locked(struct inode *);
2439 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2440 extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
2441 #else
2442 static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
2443 #endif
2444 extern void unlock_new_inode(struct inode *);
2445 extern unsigned int get_next_ino(void);
2446
2447 extern void __iget(struct inode * inode);
2448 extern void iget_failed(struct inode *);
2449 extern void clear_inode(struct inode *);
2450 extern void __destroy_inode(struct inode *);
2451 extern struct inode *new_inode_pseudo(struct super_block *sb);
2452 extern struct inode *new_inode(struct super_block *sb);
2453 extern void free_inode_nonrcu(struct inode *inode);
2454 extern int should_remove_suid(struct dentry *);
2455 extern int file_remove_suid(struct file *);
2456
2457 extern void __insert_inode_hash(struct inode *, unsigned long hashval);
2458 static inline void insert_inode_hash(struct inode *inode)
2459 {
2460 __insert_inode_hash(inode, inode->i_ino);
2461 }
2462
2463 extern void __remove_inode_hash(struct inode *);
2464 static inline void remove_inode_hash(struct inode *inode)
2465 {
2466 if (!inode_unhashed(inode))
2467 __remove_inode_hash(inode);
2468 }
2469
2470 extern void inode_sb_list_add(struct inode *inode);
2471
2472 #ifdef CONFIG_BLOCK
2473 extern void submit_bio(int, struct bio *);
2474 extern int bdev_read_only(struct block_device *);
2475 #endif
2476 extern int set_blocksize(struct block_device *, int);
2477 extern int sb_set_blocksize(struct super_block *, int);
2478 extern int sb_min_blocksize(struct super_block *, int);
2479
2480 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2481 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
2482 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
2483 unsigned long size, pgoff_t pgoff);
2484 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
2485 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
2486 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
2487 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
2488 extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
2489 extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
2490 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
2491 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2492 extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
2493 extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2494
2495 /* fs/block_dev.c */
2496 extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
2497 extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2498 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2499 int datasync);
2500 extern void block_sync_page(struct page *page);
2501
2502 /* fs/splice.c */
2503 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2504 struct pipe_inode_info *, size_t, unsigned int);
2505 extern ssize_t default_file_splice_read(struct file *, loff_t *,
2506 struct pipe_inode_info *, size_t, unsigned int);
2507 extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
2508 struct file *, loff_t *, size_t, unsigned int);
2509 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2510 struct file *out, loff_t *, size_t len, unsigned int flags);
2511 extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2512 loff_t *opos, size_t len, unsigned int flags);
2513
2514
2515 extern void
2516 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
2517 extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
2518 extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
2519 extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
2520 extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
2521 extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
2522 int whence, loff_t maxsize, loff_t eof);
2523 extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
2524 int whence, loff_t size);
2525 extern int generic_file_open(struct inode * inode, struct file * filp);
2526 extern int nonseekable_open(struct inode * inode, struct file * filp);
2527
2528 #ifdef CONFIG_FS_XIP
2529 extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
2530 loff_t *ppos);
2531 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
2532 extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
2533 size_t len, loff_t *ppos);
2534 extern int xip_truncate_page(struct address_space *mapping, loff_t from);
2535 #else
2536 static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
2537 {
2538 return 0;
2539 }
2540 #endif
2541
2542 #ifdef CONFIG_BLOCK
2543 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2544 loff_t file_offset);
2545
2546 enum {
2547 /* need locking between buffered and direct access */
2548 DIO_LOCKING = 0x01,
2549
2550 /* filesystem does not support filling holes */
2551 DIO_SKIP_HOLES = 0x02,
2552
2553 /* filesystem can handle aio writes beyond i_size */
2554 DIO_ASYNC_EXTEND = 0x04,
2555 };
2556
2557 void dio_end_io(struct bio *bio, int error);
2558
2559 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
2560 struct block_device *bdev, struct iov_iter *iter, loff_t offset,
2561 get_block_t get_block, dio_iodone_t end_io,
2562 dio_submit_t submit_io, int flags);
2563
2564 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
2565 struct inode *inode, struct iov_iter *iter, loff_t offset,
2566 get_block_t get_block)
2567 {
2568 return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
2569 offset, get_block, NULL, NULL,
2570 DIO_LOCKING | DIO_SKIP_HOLES);
2571 }
2572 #endif
2573
2574 void inode_dio_wait(struct inode *inode);
2575 void inode_dio_done(struct inode *inode);
2576
2577 extern void inode_set_flags(struct inode *inode, unsigned int flags,
2578 unsigned int mask);
2579
2580 extern const struct file_operations generic_ro_fops;
2581
2582 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
2583
2584 extern int readlink_copy(char __user *, int, const char *);
2585 extern int page_readlink(struct dentry *, char __user *, int);
2586 extern void *page_follow_link_light(struct dentry *, struct nameidata *);
2587 extern void page_put_link(struct dentry *, struct nameidata *, void *);
2588 extern int __page_symlink(struct inode *inode, const char *symname, int len,
2589 int nofs);
2590 extern int page_symlink(struct inode *inode, const char *symname, int len);
2591 extern const struct inode_operations page_symlink_inode_operations;
2592 extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
2593 extern int generic_readlink(struct dentry *, char __user *, int);
2594 extern void generic_fillattr(struct inode *, struct kstat *);
2595 int vfs_getattr_nosec(struct path *path, struct kstat *stat);
2596 extern int vfs_getattr(struct path *, struct kstat *);
2597 void __inode_add_bytes(struct inode *inode, loff_t bytes);
2598 void inode_add_bytes(struct inode *inode, loff_t bytes);
2599 void __inode_sub_bytes(struct inode *inode, loff_t bytes);
2600 void inode_sub_bytes(struct inode *inode, loff_t bytes);
2601 loff_t inode_get_bytes(struct inode *inode);
2602 void inode_set_bytes(struct inode *inode, loff_t bytes);
2603
2604 extern int vfs_readdir(struct file *, filldir_t, void *);
2605 extern int iterate_dir(struct file *, struct dir_context *);
2606
2607 extern int vfs_stat(const char __user *, struct kstat *);
2608 extern int vfs_lstat(const char __user *, struct kstat *);
2609 extern int vfs_fstat(unsigned int, struct kstat *);
2610 extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
2611
2612 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
2613 unsigned long arg);
2614 extern int __generic_block_fiemap(struct inode *inode,
2615 struct fiemap_extent_info *fieinfo,
2616 loff_t start, loff_t len,
2617 get_block_t *get_block);
2618 extern int generic_block_fiemap(struct inode *inode,
2619 struct fiemap_extent_info *fieinfo, u64 start,
2620 u64 len, get_block_t *get_block);
2621
2622 extern void get_filesystem(struct file_system_type *fs);
2623 extern void put_filesystem(struct file_system_type *fs);
2624 extern struct file_system_type *get_fs_type(const char *name);
2625 extern struct super_block *get_super(struct block_device *);
2626 extern struct super_block *get_super_thawed(struct block_device *);
2627 extern struct super_block *get_active_super(struct block_device *bdev);
2628 extern void drop_super(struct super_block *sb);
2629 extern void iterate_supers(void (*)(struct super_block *, void *), void *);
2630 extern void iterate_supers_type(struct file_system_type *,
2631 void (*)(struct super_block *, void *), void *);
2632
2633 extern int dcache_dir_open(struct inode *, struct file *);
2634 extern int dcache_dir_close(struct inode *, struct file *);
2635 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
2636 extern int dcache_readdir(struct file *, struct dir_context *);
2637 extern int simple_setattr(struct dentry *, struct iattr *);
2638 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
2639 extern int simple_statfs(struct dentry *, struct kstatfs *);
2640 extern int simple_open(struct inode *inode, struct file *file);
2641 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
2642 extern int simple_unlink(struct inode *, struct dentry *);
2643 extern int simple_rmdir(struct inode *, struct dentry *);
2644 extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
2645 extern int noop_fsync(struct file *, loff_t, loff_t, int);
2646 extern int simple_empty(struct dentry *);
2647 extern int simple_readpage(struct file *file, struct page *page);
2648 extern int simple_write_begin(struct file *file, struct address_space *mapping,
2649 loff_t pos, unsigned len, unsigned flags,
2650 struct page **pagep, void **fsdata);
2651 extern int simple_write_end(struct file *file, struct address_space *mapping,
2652 loff_t pos, unsigned len, unsigned copied,
2653 struct page *page, void *fsdata);
2654 extern int always_delete_dentry(const struct dentry *);
2655 extern struct inode *alloc_anon_inode(struct super_block *);
2656 extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
2657 extern const struct dentry_operations simple_dentry_operations;
2658
2659 extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
2660 extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
2661 extern const struct file_operations simple_dir_operations;
2662 extern const struct inode_operations simple_dir_inode_operations;
2663 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
2664 struct dentry *d_alloc_name(struct dentry *, const char *);
2665 extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
2666 extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
2667 extern void simple_release_fs(struct vfsmount **mount, int *count);
2668
2669 extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
2670 loff_t *ppos, const void *from, size_t available);
2671 extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
2672 const void __user *from, size_t count);
2673
2674 extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
2675 extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
2676
2677 extern int generic_check_addressable(unsigned, u64);
2678
2679 #ifdef CONFIG_MIGRATION
2680 extern int buffer_migrate_page(struct address_space *,
2681 struct page *, struct page *,
2682 enum migrate_mode);
2683 #else
2684 #define buffer_migrate_page NULL
2685 #endif
2686
2687 extern int inode_change_ok(const struct inode *, struct iattr *);
2688 extern int inode_newsize_ok(const struct inode *, loff_t offset);
2689 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
2690
2691 extern int file_update_time(struct file *file);
2692
2693 extern int generic_show_options(struct seq_file *m, struct dentry *root);
2694 extern void save_mount_options(struct super_block *sb, char *options);
2695 extern void replace_mount_options(struct super_block *sb, char *options);
2696
2697 static inline ino_t parent_ino(struct dentry *dentry)
2698 {
2699 ino_t res;
2700
2701 /*
2702 * Don't strictly need d_lock here? If the parent ino could change
2703 * then surely we'd have a deeper race in the caller?
2704 */
2705 spin_lock(&dentry->d_lock);
2706 res = dentry->d_parent->d_inode->i_ino;
2707 spin_unlock(&dentry->d_lock);
2708 return res;
2709 }
2710
2711 /* Transaction based IO helpers */
2712
2713 /*
2714 * An argresp is stored in an allocated page and holds the
2715 * size of the argument or response, along with its content
2716 */
2717 struct simple_transaction_argresp {
2718 ssize_t size;
2719 char data[0];
2720 };
2721
2722 #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
2723
2724 char *simple_transaction_get(struct file *file, const char __user *buf,
2725 size_t size);
2726 ssize_t simple_transaction_read(struct file *file, char __user *buf,
2727 size_t size, loff_t *pos);
2728 int simple_transaction_release(struct inode *inode, struct file *file);
2729
2730 void simple_transaction_set(struct file *file, size_t n);
2731
2732 /*
2733 * simple attribute files
2734 *
2735 * These attributes behave similar to those in sysfs:
2736 *
2737 * Writing to an attribute immediately sets a value, an open file can be
2738 * written to multiple times.
2739 *
2740 * Reading from an attribute creates a buffer from the value that might get
2741 * read with multiple read calls. When the attribute has been read
2742 * completely, no further read calls are possible until the file is opened
2743 * again.
2744 *
2745 * All attributes contain a text representation of a numeric value
2746 * that are accessed with the get() and set() functions.
2747 */
2748 #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
2749 static int __fops ## _open(struct inode *inode, struct file *file) \
2750 { \
2751 __simple_attr_check_format(__fmt, 0ull); \
2752 return simple_attr_open(inode, file, __get, __set, __fmt); \
2753 } \
2754 static const struct file_operations __fops = { \
2755 .owner = THIS_MODULE, \
2756 .open = __fops ## _open, \
2757 .release = simple_attr_release, \
2758 .read = simple_attr_read, \
2759 .write = simple_attr_write, \
2760 .llseek = generic_file_llseek, \
2761 }
2762
2763 static inline __printf(1, 2)
2764 void __simple_attr_check_format(const char *fmt, ...)
2765 {
2766 /* don't do anything, just let the compiler check the arguments; */
2767 }
2768
2769 int simple_attr_open(struct inode *inode, struct file *file,
2770 int (*get)(void *, u64 *), int (*set)(void *, u64),
2771 const char *fmt);
2772 int simple_attr_release(struct inode *inode, struct file *file);
2773 ssize_t simple_attr_read(struct file *file, char __user *buf,
2774 size_t len, loff_t *ppos);
2775 ssize_t simple_attr_write(struct file *file, const char __user *buf,
2776 size_t len, loff_t *ppos);
2777
2778 struct ctl_table;
2779 int proc_nr_files(struct ctl_table *table, int write,
2780 void __user *buffer, size_t *lenp, loff_t *ppos);
2781 int proc_nr_dentry(struct ctl_table *table, int write,
2782 void __user *buffer, size_t *lenp, loff_t *ppos);
2783 int proc_nr_inodes(struct ctl_table *table, int write,
2784 void __user *buffer, size_t *lenp, loff_t *ppos);
2785 int __init get_filesystem_list(char *buf);
2786
2787 #define __FMODE_EXEC ((__force int) FMODE_EXEC)
2788 #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
2789
2790 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
2791 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
2792 (flag & __FMODE_NONOTIFY)))
2793
2794 static inline int is_sxid(umode_t mode)
2795 {
2796 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
2797 }
2798
2799 static inline int check_sticky(struct inode *dir, struct inode *inode)
2800 {
2801 if (!(dir->i_mode & S_ISVTX))
2802 return 0;
2803
2804 return __check_sticky(dir, inode);
2805 }
2806
2807 static inline void inode_has_no_xattr(struct inode *inode)
2808 {
2809 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2810 inode->i_flags |= S_NOSEC;
2811 }
2812
2813 static inline bool is_root_inode(struct inode *inode)
2814 {
2815 return inode == inode->i_sb->s_root->d_inode;
2816 }
2817
2818 static inline bool dir_emit(struct dir_context *ctx,
2819 const char *name, int namelen,
2820 u64 ino, unsigned type)
2821 {
2822 return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
2823 }
2824 static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
2825 {
2826 return ctx->actor(ctx, ".", 1, ctx->pos,
2827 file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
2828 }
2829 static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
2830 {
2831 return ctx->actor(ctx, "..", 2, ctx->pos,
2832 parent_ino(file->f_path.dentry), DT_DIR) == 0;
2833 }
2834 static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
2835 {
2836 if (ctx->pos == 0) {
2837 if (!dir_emit_dot(file, ctx))
2838 return false;
2839 ctx->pos = 1;
2840 }
2841 if (ctx->pos == 1) {
2842 if (!dir_emit_dotdot(file, ctx))
2843 return false;
2844 ctx->pos = 2;
2845 }
2846 return true;
2847 }
2848 static inline bool dir_relax(struct inode *inode)
2849 {
2850 mutex_unlock(&inode->i_mutex);
2851 mutex_lock(&inode->i_mutex);
2852 return !IS_DEADDIR(inode);
2853 }
2854
2855 #endif /* _LINUX_FS_H */
This page took 0.104074 seconds and 5 git commands to generate.