2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/magic.h> /* superblock magic number */
33 #include <linux/mount.h> /* mntget */
34 #include <linux/namei.h> /* LOOKUP_FOLLOW */
35 #include <linux/path.h> /* struct path */
36 #include <linux/sched.h> /* struct user */
37 #include <linux/slab.h> /* struct kmem_cache */
38 #include <linux/syscalls.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
46 #include <asm/ioctls.h>
48 static struct vfsmount
*inotify_mnt __read_mostly
;
50 /* these are configurable via /proc/sys/fs/inotify/ */
51 static int inotify_max_user_instances __read_mostly
;
52 static int inotify_max_queued_events __read_mostly
;
53 int inotify_max_user_watches __read_mostly
;
55 static struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
56 struct kmem_cache
*event_priv_cachep __read_mostly
;
59 * When inotify registers a new group it increments this and uses that
60 * value as an offset to set the fsnotify group "name" and priority.
62 static atomic_t inotify_grp_num
;
66 #include <linux/sysctl.h>
70 ctl_table inotify_table
[] = {
72 .ctl_name
= INOTIFY_MAX_USER_INSTANCES
,
73 .procname
= "max_user_instances",
74 .data
= &inotify_max_user_instances
,
75 .maxlen
= sizeof(int),
77 .proc_handler
= &proc_dointvec_minmax
,
78 .strategy
= &sysctl_intvec
,
82 .ctl_name
= INOTIFY_MAX_USER_WATCHES
,
83 .procname
= "max_user_watches",
84 .data
= &inotify_max_user_watches
,
85 .maxlen
= sizeof(int),
87 .proc_handler
= &proc_dointvec_minmax
,
88 .strategy
= &sysctl_intvec
,
92 .ctl_name
= INOTIFY_MAX_QUEUED_EVENTS
,
93 .procname
= "max_queued_events",
94 .data
= &inotify_max_queued_events
,
95 .maxlen
= sizeof(int),
97 .proc_handler
= &proc_dointvec_minmax
,
98 .strategy
= &sysctl_intvec
,
103 #endif /* CONFIG_SYSCTL */
105 static inline __u32
inotify_arg_to_mask(u32 arg
)
109 /* everything should accept their own ignored and cares about children */
110 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
);
112 /* mask off the flags used to open the fd */
113 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
));
118 static inline u32
inotify_mask_to_arg(__u32 mask
)
120 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
124 /* intofiy userspace file descriptor functions */
125 static unsigned int inotify_poll(struct file
*file
, poll_table
*wait
)
127 struct fsnotify_group
*group
= file
->private_data
;
130 poll_wait(file
, &group
->notification_waitq
, wait
);
131 mutex_lock(&group
->notification_mutex
);
132 if (!fsnotify_notify_queue_is_empty(group
))
133 ret
= POLLIN
| POLLRDNORM
;
134 mutex_unlock(&group
->notification_mutex
);
140 * Get an inotify_kernel_event if one exists and is small
141 * enough to fit in "count". Return an error pointer if
144 * Called with the group->notification_mutex held.
146 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
149 size_t event_size
= sizeof(struct inotify_event
);
150 struct fsnotify_event
*event
;
152 if (fsnotify_notify_queue_is_empty(group
))
155 event
= fsnotify_peek_notify_event(group
);
157 event_size
+= roundup(event
->name_len
, event_size
);
159 if (event_size
> count
)
160 return ERR_PTR(-EINVAL
);
162 /* held the notification_mutex the whole time, so this is the
163 * same event we peeked above */
164 fsnotify_remove_notify_event(group
);
170 * Copy an event to user space, returning how much we copied.
172 * We already checked that the event size is smaller than the
173 * buffer we had in "get_one_event()" above.
175 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
176 struct fsnotify_event
*event
,
179 struct inotify_event inotify_event
;
180 struct fsnotify_event_private_data
*fsn_priv
;
181 struct inotify_event_private_data
*priv
;
182 size_t event_size
= sizeof(struct inotify_event
);
185 /* we get the inotify watch descriptor from the event private data */
186 spin_lock(&event
->lock
);
187 fsn_priv
= fsnotify_remove_priv_from_event(group
, event
);
188 spin_unlock(&event
->lock
);
191 inotify_event
.wd
= -1;
193 priv
= container_of(fsn_priv
, struct inotify_event_private_data
,
194 fsnotify_event_priv_data
);
195 inotify_event
.wd
= priv
->wd
;
196 inotify_free_event_priv(fsn_priv
);
199 /* round up event->name_len so it is a multiple of event_size
200 * plus an extra byte for the terminating '\0'.
202 name_len
= roundup(event
->name_len
+ 1, event_size
);
203 inotify_event
.len
= name_len
;
205 inotify_event
.mask
= inotify_mask_to_arg(event
->mask
);
206 inotify_event
.cookie
= event
->sync_cookie
;
208 /* send the main event */
209 if (copy_to_user(buf
, &inotify_event
, event_size
))
215 * fsnotify only stores the pathname, so here we have to send the pathname
216 * and then pad that pathname out to a multiple of sizeof(inotify_event)
217 * with zeros. I get my zeros from the nul_inotify_event.
220 unsigned int len_to_zero
= name_len
- event
->name_len
;
221 /* copy the path name */
222 if (copy_to_user(buf
, event
->file_name
, event
->name_len
))
224 buf
+= event
->name_len
;
226 /* fill userspace with 0's */
227 if (clear_user(buf
, len_to_zero
))
230 event_size
+= name_len
;
236 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
237 size_t count
, loff_t
*pos
)
239 struct fsnotify_group
*group
;
240 struct fsnotify_event
*kevent
;
246 group
= file
->private_data
;
249 prepare_to_wait(&group
->notification_waitq
, &wait
, TASK_INTERRUPTIBLE
);
251 mutex_lock(&group
->notification_mutex
);
252 kevent
= get_one_event(group
, count
);
253 mutex_unlock(&group
->notification_mutex
);
256 ret
= PTR_ERR(kevent
);
259 ret
= copy_event_to_user(group
, kevent
, buf
);
260 fsnotify_put_event(kevent
);
269 if (file
->f_flags
& O_NONBLOCK
)
272 if (signal_pending(current
))
281 finish_wait(&group
->notification_waitq
, &wait
);
282 if (start
!= buf
&& ret
!= -EFAULT
)
287 static int inotify_fasync(int fd
, struct file
*file
, int on
)
289 struct fsnotify_group
*group
= file
->private_data
;
291 return fasync_helper(fd
, file
, on
, &group
->inotify_data
.fa
) >= 0 ? 0 : -EIO
;
294 static int inotify_release(struct inode
*ignored
, struct file
*file
)
296 struct fsnotify_group
*group
= file
->private_data
;
297 struct user_struct
*user
= group
->inotify_data
.user
;
299 fsnotify_clear_marks_by_group(group
);
301 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
302 fsnotify_put_group(group
);
304 atomic_dec(&user
->inotify_devs
);
309 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
312 struct fsnotify_group
*group
;
313 struct fsnotify_event_holder
*holder
;
314 struct fsnotify_event
*event
;
319 group
= file
->private_data
;
320 p
= (void __user
*) arg
;
324 mutex_lock(&group
->notification_mutex
);
325 list_for_each_entry(holder
, &group
->notification_list
, event_list
) {
326 event
= holder
->event
;
327 send_len
+= sizeof(struct inotify_event
);
328 send_len
+= roundup(event
->name_len
,
329 sizeof(struct inotify_event
));
331 mutex_unlock(&group
->notification_mutex
);
332 ret
= put_user(send_len
, (int __user
*) p
);
339 static const struct file_operations inotify_fops
= {
340 .poll
= inotify_poll
,
341 .read
= inotify_read
,
342 .fasync
= inotify_fasync
,
343 .release
= inotify_release
,
344 .unlocked_ioctl
= inotify_ioctl
,
345 .compat_ioctl
= inotify_ioctl
,
350 * find_inode - resolve a user-given path to a specific inode
352 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
356 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
359 /* you can only watch an inode if you have read permissions on it */
360 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
367 * Remove the mark from the idr (if present) and drop the reference
368 * on the mark because it was in the idr.
370 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
371 struct inotify_inode_mark_entry
*ientry
)
374 struct fsnotify_mark_entry
*entry
;
375 struct inotify_inode_mark_entry
*found_ientry
;
378 spin_lock(&group
->inotify_data
.idr_lock
);
379 idr
= &group
->inotify_data
.idr
;
385 entry
= idr_find(&group
->inotify_data
.idr
, wd
);
386 if (unlikely(!entry
))
389 found_ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
390 if (unlikely(found_ientry
!= ientry
)) {
391 /* We found an entry in the idr with the right wd, but it's
392 * not the entry we were told to remove. eparis seriously
393 * fucked up somewhere. */
399 /* One ref for being in the idr, one ref held by the caller */
400 BUG_ON(atomic_read(&entry
->refcnt
) < 2);
405 /* removed from the idr, drop that ref */
406 fsnotify_put_mark(entry
);
408 spin_unlock(&group
->inotify_data
.idr_lock
);
412 * Send IN_IGNORED for this wd, remove this wd from the idr.
414 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry
*entry
,
415 struct fsnotify_group
*group
)
417 struct inotify_inode_mark_entry
*ientry
;
418 struct fsnotify_event
*ignored_event
;
419 struct inotify_event_private_data
*event_priv
;
420 struct fsnotify_event_private_data
*fsn_event_priv
;
423 ignored_event
= fsnotify_create_event(NULL
, FS_IN_IGNORED
, NULL
,
424 FSNOTIFY_EVENT_NONE
, NULL
, 0,
429 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
431 event_priv
= kmem_cache_alloc(event_priv_cachep
, GFP_NOFS
);
432 if (unlikely(!event_priv
))
433 goto skip_send_ignore
;
435 fsn_event_priv
= &event_priv
->fsnotify_event_priv_data
;
437 fsn_event_priv
->group
= group
;
438 event_priv
->wd
= ientry
->wd
;
440 ret
= fsnotify_add_notify_event(group
, ignored_event
, fsn_event_priv
);
442 inotify_free_event_priv(fsn_event_priv
);
446 /* matches the reference taken when the event was created */
447 fsnotify_put_event(ignored_event
);
449 /* remove this entry from the idr */
450 inotify_remove_from_idr(group
, ientry
);
452 atomic_dec(&group
->inotify_data
.user
->inotify_watches
);
455 /* ding dong the mark is dead */
456 static void inotify_free_mark(struct fsnotify_mark_entry
*entry
)
458 struct inotify_inode_mark_entry
*ientry
= (struct inotify_inode_mark_entry
*)entry
;
460 kmem_cache_free(inotify_inode_mark_cachep
, ientry
);
463 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
467 struct fsnotify_mark_entry
*entry
;
468 struct inotify_inode_mark_entry
*ientry
;
469 __u32 old_mask
, new_mask
;
471 int add
= (arg
& IN_MASK_ADD
);
474 /* don't allow invalid bits: we don't want flags set */
475 mask
= inotify_arg_to_mask(arg
);
479 spin_lock(&inode
->i_lock
);
480 entry
= fsnotify_find_mark_entry(group
, inode
);
481 spin_unlock(&inode
->i_lock
);
485 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
487 spin_lock(&entry
->lock
);
489 old_mask
= entry
->mask
;
492 new_mask
= entry
->mask
;
495 new_mask
= entry
->mask
;
498 spin_unlock(&entry
->lock
);
500 if (old_mask
!= new_mask
) {
501 /* more bits in old than in new? */
502 int dropped
= (old_mask
& ~new_mask
);
503 /* more bits in this entry than the inode's mask? */
504 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
505 /* more bits in this entry than the group? */
506 int do_group
= (new_mask
& ~group
->mask
);
508 /* update the inode with this new entry */
509 if (dropped
|| do_inode
)
510 fsnotify_recalc_inode_mask(inode
);
512 /* update the group mask with the new mask */
513 if (dropped
|| do_group
)
514 fsnotify_recalc_group_mask(group
);
520 /* match the get from fsnotify_find_mark_entry() */
521 fsnotify_put_mark(entry
);
526 static int inotify_new_watch(struct fsnotify_group
*group
,
530 struct inotify_inode_mark_entry
*tmp_ientry
;
534 /* don't allow invalid bits: we don't want flags set */
535 mask
= inotify_arg_to_mask(arg
);
539 tmp_ientry
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
540 if (unlikely(!tmp_ientry
))
543 fsnotify_init_mark(&tmp_ientry
->fsn_entry
, inotify_free_mark
);
544 tmp_ientry
->fsn_entry
.mask
= mask
;
548 if (atomic_read(&group
->inotify_data
.user
->inotify_watches
) >= inotify_max_user_watches
)
552 if (unlikely(!idr_pre_get(&group
->inotify_data
.idr
, GFP_KERNEL
)))
555 spin_lock(&group
->inotify_data
.idr_lock
);
556 ret
= idr_get_new_above(&group
->inotify_data
.idr
, &tmp_ientry
->fsn_entry
,
557 group
->inotify_data
.last_wd
,
559 spin_unlock(&group
->inotify_data
.idr_lock
);
561 /* idr was out of memory allocate and try again */
567 /* we put the mark on the idr, take a reference */
568 fsnotify_get_mark(&tmp_ientry
->fsn_entry
);
570 /* we are on the idr, now get on the inode */
571 ret
= fsnotify_add_mark(&tmp_ientry
->fsn_entry
, group
, inode
);
573 /* we failed to get on the inode, get off the idr */
574 inotify_remove_from_idr(group
, tmp_ientry
);
578 /* update the idr hint, who cares about races, it's just a hint */
579 group
->inotify_data
.last_wd
= tmp_ientry
->wd
;
581 /* increment the number of watches the user has */
582 atomic_inc(&group
->inotify_data
.user
->inotify_watches
);
584 /* return the watch descriptor for this new entry */
585 ret
= tmp_ientry
->wd
;
587 /* match the ref from fsnotify_init_markentry() */
588 fsnotify_put_mark(&tmp_ientry
->fsn_entry
);
592 kmem_cache_free(inotify_inode_mark_cachep
, tmp_ientry
);
597 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
602 /* try to update and existing watch with the new arg */
603 ret
= inotify_update_existing_watch(group
, inode
, arg
);
604 /* no mark present, try to add a new one */
606 ret
= inotify_new_watch(group
, inode
, arg
);
608 * inotify_new_watch could race with another thread which did an
609 * inotify_new_watch between the update_existing and the add watch
610 * here, go back and try to update an existing mark again.
618 static struct fsnotify_group
*inotify_new_group(struct user_struct
*user
, unsigned int max_events
)
620 struct fsnotify_group
*group
;
621 unsigned int grp_num
;
623 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
624 grp_num
= (INOTIFY_GROUP_NUM
- atomic_inc_return(&inotify_grp_num
));
625 group
= fsnotify_obtain_group(grp_num
, 0, &inotify_fsnotify_ops
);
629 group
->max_events
= max_events
;
631 spin_lock_init(&group
->inotify_data
.idr_lock
);
632 idr_init(&group
->inotify_data
.idr
);
633 group
->inotify_data
.last_wd
= 1;
634 group
->inotify_data
.user
= user
;
635 group
->inotify_data
.fa
= NULL
;
641 /* inotify syscalls */
642 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
644 struct fsnotify_group
*group
;
645 struct user_struct
*user
;
649 /* Check the IN_* constants for consistency. */
650 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
651 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
653 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
656 fd
= get_unused_fd_flags(flags
& O_CLOEXEC
);
660 filp
= get_empty_filp();
666 user
= get_current_user();
667 if (unlikely(atomic_read(&user
->inotify_devs
) >=
668 inotify_max_user_instances
)) {
673 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
674 group
= inotify_new_group(user
, inotify_max_queued_events
);
676 ret
= PTR_ERR(group
);
680 filp
->f_op
= &inotify_fops
;
681 filp
->f_path
.mnt
= mntget(inotify_mnt
);
682 filp
->f_path
.dentry
= dget(inotify_mnt
->mnt_root
);
683 filp
->f_mapping
= filp
->f_path
.dentry
->d_inode
->i_mapping
;
684 filp
->f_mode
= FMODE_READ
;
685 filp
->f_flags
= O_RDONLY
| (flags
& O_NONBLOCK
);
686 filp
->private_data
= group
;
688 atomic_inc(&user
->inotify_devs
);
690 fd_install(fd
, filp
);
702 SYSCALL_DEFINE0(inotify_init
)
704 return sys_inotify_init1(0);
707 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
710 struct fsnotify_group
*group
;
714 int ret
, fput_needed
;
717 filp
= fget_light(fd
, &fput_needed
);
721 /* verify that this is indeed an inotify instance */
722 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
727 if (!(mask
& IN_DONT_FOLLOW
))
728 flags
|= LOOKUP_FOLLOW
;
729 if (mask
& IN_ONLYDIR
)
730 flags
|= LOOKUP_DIRECTORY
;
732 ret
= inotify_find_inode(pathname
, &path
, flags
);
736 /* inode held in place by reference to path; group by fget on fd */
737 inode
= path
.dentry
->d_inode
;
738 group
= filp
->private_data
;
740 /* create/update an inode mark */
741 ret
= inotify_update_watch(group
, inode
, mask
);
743 goto path_put_and_out
;
748 fput_light(filp
, fput_needed
);
752 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
754 struct fsnotify_group
*group
;
755 struct fsnotify_mark_entry
*entry
;
757 int ret
= 0, fput_needed
;
759 filp
= fget_light(fd
, &fput_needed
);
763 /* verify that this is indeed an inotify instance */
764 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
769 group
= filp
->private_data
;
771 spin_lock(&group
->inotify_data
.idr_lock
);
772 entry
= idr_find(&group
->inotify_data
.idr
, wd
);
773 if (unlikely(!entry
)) {
774 spin_unlock(&group
->inotify_data
.idr_lock
);
778 fsnotify_get_mark(entry
);
779 spin_unlock(&group
->inotify_data
.idr_lock
);
781 fsnotify_destroy_mark_by_entry(entry
);
782 fsnotify_put_mark(entry
);
785 fput_light(filp
, fput_needed
);
790 inotify_get_sb(struct file_system_type
*fs_type
, int flags
,
791 const char *dev_name
, void *data
, struct vfsmount
*mnt
)
793 return get_sb_pseudo(fs_type
, "inotify", NULL
,
794 INOTIFYFS_SUPER_MAGIC
, mnt
);
797 static struct file_system_type inotify_fs_type
= {
799 .get_sb
= inotify_get_sb
,
800 .kill_sb
= kill_anon_super
,
804 * inotify_user_setup - Our initialization function. Note that we cannnot return
805 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
806 * must result in panic().
808 static int __init
inotify_user_setup(void)
812 ret
= register_filesystem(&inotify_fs_type
);
814 panic("inotify: register_filesystem returned %d!\n", ret
);
816 inotify_mnt
= kern_mount(&inotify_fs_type
);
817 if (IS_ERR(inotify_mnt
))
818 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt
));
820 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark_entry
, SLAB_PANIC
);
821 event_priv_cachep
= KMEM_CACHE(inotify_event_private_data
, SLAB_PANIC
);
823 inotify_max_queued_events
= 16384;
824 inotify_max_user_instances
= 128;
825 inotify_max_user_watches
= 8192;
829 module_init(inotify_user_setup
);