fsnotify: per group notification queue merge types
[deliverable/linux.git] / fs / notify / inotify / inotify_user.c
1 /*
2 * fs/inotify_user.c - inotify support for userspace
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 *
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 */
24
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched.h> /* struct user */
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41
42 #include "inotify.h"
43
44 #include <asm/ioctls.h>
45
46 /* these are configurable via /proc/sys/fs/inotify/ */
47 static int inotify_max_user_instances __read_mostly;
48 static int inotify_max_queued_events __read_mostly;
49 int inotify_max_user_watches __read_mostly;
50
51 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
52 struct kmem_cache *event_priv_cachep __read_mostly;
53
54 /*
55 * When inotify registers a new group it increments this and uses that
56 * value as an offset to set the fsnotify group "name" and priority.
57 */
58 static atomic_t inotify_grp_num;
59
60 #ifdef CONFIG_SYSCTL
61
62 #include <linux/sysctl.h>
63
64 static int zero;
65
66 ctl_table inotify_table[] = {
67 {
68 .procname = "max_user_instances",
69 .data = &inotify_max_user_instances,
70 .maxlen = sizeof(int),
71 .mode = 0644,
72 .proc_handler = proc_dointvec_minmax,
73 .extra1 = &zero,
74 },
75 {
76 .procname = "max_user_watches",
77 .data = &inotify_max_user_watches,
78 .maxlen = sizeof(int),
79 .mode = 0644,
80 .proc_handler = proc_dointvec_minmax,
81 .extra1 = &zero,
82 },
83 {
84 .procname = "max_queued_events",
85 .data = &inotify_max_queued_events,
86 .maxlen = sizeof(int),
87 .mode = 0644,
88 .proc_handler = proc_dointvec_minmax,
89 .extra1 = &zero
90 },
91 { }
92 };
93 #endif /* CONFIG_SYSCTL */
94
95 static inline __u32 inotify_arg_to_mask(u32 arg)
96 {
97 __u32 mask;
98
99 /* everything should accept their own ignored and cares about children */
100 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
101
102 /* mask off the flags used to open the fd */
103 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
104
105 return mask;
106 }
107
108 static inline u32 inotify_mask_to_arg(__u32 mask)
109 {
110 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
111 IN_Q_OVERFLOW);
112 }
113
114 /* intofiy userspace file descriptor functions */
115 static unsigned int inotify_poll(struct file *file, poll_table *wait)
116 {
117 struct fsnotify_group *group = file->private_data;
118 int ret = 0;
119
120 poll_wait(file, &group->notification_waitq, wait);
121 mutex_lock(&group->notification_mutex);
122 if (!fsnotify_notify_queue_is_empty(group))
123 ret = POLLIN | POLLRDNORM;
124 mutex_unlock(&group->notification_mutex);
125
126 return ret;
127 }
128
129 /*
130 * Get an inotify_kernel_event if one exists and is small
131 * enough to fit in "count". Return an error pointer if
132 * not large enough.
133 *
134 * Called with the group->notification_mutex held.
135 */
136 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
137 size_t count)
138 {
139 size_t event_size = sizeof(struct inotify_event);
140 struct fsnotify_event *event;
141
142 if (fsnotify_notify_queue_is_empty(group))
143 return NULL;
144
145 event = fsnotify_peek_notify_event(group);
146
147 if (event->name_len)
148 event_size += roundup(event->name_len + 1, event_size);
149
150 if (event_size > count)
151 return ERR_PTR(-EINVAL);
152
153 /* held the notification_mutex the whole time, so this is the
154 * same event we peeked above */
155 fsnotify_remove_notify_event(group);
156
157 return event;
158 }
159
160 /*
161 * Copy an event to user space, returning how much we copied.
162 *
163 * We already checked that the event size is smaller than the
164 * buffer we had in "get_one_event()" above.
165 */
166 static ssize_t copy_event_to_user(struct fsnotify_group *group,
167 struct fsnotify_event *event,
168 char __user *buf)
169 {
170 struct inotify_event inotify_event;
171 struct fsnotify_event_private_data *fsn_priv;
172 struct inotify_event_private_data *priv;
173 size_t event_size = sizeof(struct inotify_event);
174 size_t name_len = 0;
175
176 /* we get the inotify watch descriptor from the event private data */
177 spin_lock(&event->lock);
178 fsn_priv = fsnotify_remove_priv_from_event(group, event);
179 spin_unlock(&event->lock);
180
181 if (!fsn_priv)
182 inotify_event.wd = -1;
183 else {
184 priv = container_of(fsn_priv, struct inotify_event_private_data,
185 fsnotify_event_priv_data);
186 inotify_event.wd = priv->wd;
187 inotify_free_event_priv(fsn_priv);
188 }
189
190 /*
191 * round up event->name_len so it is a multiple of event_size
192 * plus an extra byte for the terminating '\0'.
193 */
194 if (event->name_len)
195 name_len = roundup(event->name_len + 1, event_size);
196 inotify_event.len = name_len;
197
198 inotify_event.mask = inotify_mask_to_arg(event->mask);
199 inotify_event.cookie = event->sync_cookie;
200
201 /* send the main event */
202 if (copy_to_user(buf, &inotify_event, event_size))
203 return -EFAULT;
204
205 buf += event_size;
206
207 /*
208 * fsnotify only stores the pathname, so here we have to send the pathname
209 * and then pad that pathname out to a multiple of sizeof(inotify_event)
210 * with zeros. I get my zeros from the nul_inotify_event.
211 */
212 if (name_len) {
213 unsigned int len_to_zero = name_len - event->name_len;
214 /* copy the path name */
215 if (copy_to_user(buf, event->file_name, event->name_len))
216 return -EFAULT;
217 buf += event->name_len;
218
219 /* fill userspace with 0's */
220 if (clear_user(buf, len_to_zero))
221 return -EFAULT;
222 buf += len_to_zero;
223 event_size += name_len;
224 }
225
226 return event_size;
227 }
228
229 static ssize_t inotify_read(struct file *file, char __user *buf,
230 size_t count, loff_t *pos)
231 {
232 struct fsnotify_group *group;
233 struct fsnotify_event *kevent;
234 char __user *start;
235 int ret;
236 DEFINE_WAIT(wait);
237
238 start = buf;
239 group = file->private_data;
240
241 while (1) {
242 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
243
244 mutex_lock(&group->notification_mutex);
245 kevent = get_one_event(group, count);
246 mutex_unlock(&group->notification_mutex);
247
248 if (kevent) {
249 ret = PTR_ERR(kevent);
250 if (IS_ERR(kevent))
251 break;
252 ret = copy_event_to_user(group, kevent, buf);
253 fsnotify_put_event(kevent);
254 if (ret < 0)
255 break;
256 buf += ret;
257 count -= ret;
258 continue;
259 }
260
261 ret = -EAGAIN;
262 if (file->f_flags & O_NONBLOCK)
263 break;
264 ret = -EINTR;
265 if (signal_pending(current))
266 break;
267
268 if (start != buf)
269 break;
270
271 schedule();
272 }
273
274 finish_wait(&group->notification_waitq, &wait);
275 if (start != buf && ret != -EFAULT)
276 ret = buf - start;
277 return ret;
278 }
279
280 static int inotify_fasync(int fd, struct file *file, int on)
281 {
282 struct fsnotify_group *group = file->private_data;
283
284 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
285 }
286
287 static int inotify_release(struct inode *ignored, struct file *file)
288 {
289 struct fsnotify_group *group = file->private_data;
290 struct user_struct *user = group->inotify_data.user;
291
292 fsnotify_clear_marks_by_group(group);
293
294 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
295 fsnotify_put_group(group);
296
297 atomic_dec(&user->inotify_devs);
298
299 return 0;
300 }
301
302 static long inotify_ioctl(struct file *file, unsigned int cmd,
303 unsigned long arg)
304 {
305 struct fsnotify_group *group;
306 struct fsnotify_event_holder *holder;
307 struct fsnotify_event *event;
308 void __user *p;
309 int ret = -ENOTTY;
310 size_t send_len = 0;
311
312 group = file->private_data;
313 p = (void __user *) arg;
314
315 switch (cmd) {
316 case FIONREAD:
317 mutex_lock(&group->notification_mutex);
318 list_for_each_entry(holder, &group->notification_list, event_list) {
319 event = holder->event;
320 send_len += sizeof(struct inotify_event);
321 if (event->name_len)
322 send_len += roundup(event->name_len + 1,
323 sizeof(struct inotify_event));
324 }
325 mutex_unlock(&group->notification_mutex);
326 ret = put_user(send_len, (int __user *) p);
327 break;
328 }
329
330 return ret;
331 }
332
333 static const struct file_operations inotify_fops = {
334 .poll = inotify_poll,
335 .read = inotify_read,
336 .fasync = inotify_fasync,
337 .release = inotify_release,
338 .unlocked_ioctl = inotify_ioctl,
339 .compat_ioctl = inotify_ioctl,
340 };
341
342
343 /*
344 * find_inode - resolve a user-given path to a specific inode
345 */
346 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
347 {
348 int error;
349
350 error = user_path_at(AT_FDCWD, dirname, flags, path);
351 if (error)
352 return error;
353 /* you can only watch an inode if you have read permissions on it */
354 error = inode_permission(path->dentry->d_inode, MAY_READ);
355 if (error)
356 path_put(path);
357 return error;
358 }
359
360 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
361 int *last_wd,
362 struct inotify_inode_mark_entry *ientry)
363 {
364 int ret;
365
366 do {
367 if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
368 return -ENOMEM;
369
370 spin_lock(idr_lock);
371 ret = idr_get_new_above(idr, ientry, *last_wd + 1,
372 &ientry->wd);
373 /* we added the mark to the idr, take a reference */
374 if (!ret) {
375 fsnotify_get_mark(&ientry->fsn_entry);
376 *last_wd = ientry->wd;
377 }
378 spin_unlock(idr_lock);
379 } while (ret == -EAGAIN);
380
381 return ret;
382 }
383
384 static struct inotify_inode_mark_entry *inotify_idr_find_locked(struct fsnotify_group *group,
385 int wd)
386 {
387 struct idr *idr = &group->inotify_data.idr;
388 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
389 struct inotify_inode_mark_entry *ientry;
390
391 assert_spin_locked(idr_lock);
392
393 ientry = idr_find(idr, wd);
394 if (ientry) {
395 struct fsnotify_mark_entry *fsn_entry = &ientry->fsn_entry;
396
397 fsnotify_get_mark(fsn_entry);
398 /* One ref for being in the idr, one ref we just took */
399 BUG_ON(atomic_read(&fsn_entry->refcnt) < 2);
400 }
401
402 return ientry;
403 }
404
405 static struct inotify_inode_mark_entry *inotify_idr_find(struct fsnotify_group *group,
406 int wd)
407 {
408 struct inotify_inode_mark_entry *ientry;
409 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
410
411 spin_lock(idr_lock);
412 ientry = inotify_idr_find_locked(group, wd);
413 spin_unlock(idr_lock);
414
415 return ientry;
416 }
417
418 static void do_inotify_remove_from_idr(struct fsnotify_group *group,
419 struct inotify_inode_mark_entry *ientry)
420 {
421 struct idr *idr = &group->inotify_data.idr;
422 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
423 int wd = ientry->wd;
424
425 assert_spin_locked(idr_lock);
426
427 idr_remove(idr, wd);
428
429 /* removed from the idr, drop that ref */
430 fsnotify_put_mark(&ientry->fsn_entry);
431 }
432
433 /*
434 * Remove the mark from the idr (if present) and drop the reference
435 * on the mark because it was in the idr.
436 */
437 static void inotify_remove_from_idr(struct fsnotify_group *group,
438 struct inotify_inode_mark_entry *ientry)
439 {
440 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
441 struct inotify_inode_mark_entry *found_ientry = NULL;
442 int wd;
443
444 spin_lock(idr_lock);
445 wd = ientry->wd;
446
447 /*
448 * does this ientry think it is in the idr? we shouldn't get called
449 * if it wasn't....
450 */
451 if (wd == -1) {
452 WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p"
453 " ientry->inode=%p\n", __func__, ientry, ientry->wd,
454 ientry->fsn_entry.group, ientry->fsn_entry.inode);
455 goto out;
456 }
457
458 /* Lets look in the idr to see if we find it */
459 found_ientry = inotify_idr_find_locked(group, wd);
460 if (unlikely(!found_ientry)) {
461 WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p"
462 " ientry->inode=%p\n", __func__, ientry, ientry->wd,
463 ientry->fsn_entry.group, ientry->fsn_entry.inode);
464 goto out;
465 }
466
467 /*
468 * We found an entry in the idr at the right wd, but it's
469 * not the entry we were told to remove. eparis seriously
470 * fucked up somewhere.
471 */
472 if (unlikely(found_ientry != ientry)) {
473 WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p "
474 "entry->inode=%p found_ientry=%p found_ientry->wd=%d "
475 "found_ientry->group=%p found_ientry->inode=%p\n",
476 __func__, ientry, ientry->wd, ientry->fsn_entry.group,
477 ientry->fsn_entry.inode, found_ientry, found_ientry->wd,
478 found_ientry->fsn_entry.group,
479 found_ientry->fsn_entry.inode);
480 goto out;
481 }
482
483 /*
484 * One ref for being in the idr
485 * one ref held by the caller trying to kill us
486 * one ref grabbed by inotify_idr_find
487 */
488 if (unlikely(atomic_read(&ientry->fsn_entry.refcnt) < 3)) {
489 printk(KERN_ERR "%s: ientry=%p ientry->wd=%d ientry->group=%p"
490 " ientry->inode=%p\n", __func__, ientry, ientry->wd,
491 ientry->fsn_entry.group, ientry->fsn_entry.inode);
492 /* we can't really recover with bad ref cnting.. */
493 BUG();
494 }
495
496 do_inotify_remove_from_idr(group, ientry);
497 out:
498 /* match the ref taken by inotify_idr_find_locked() */
499 if (found_ientry)
500 fsnotify_put_mark(&found_ientry->fsn_entry);
501 ientry->wd = -1;
502 spin_unlock(idr_lock);
503 }
504
505 /*
506 * Send IN_IGNORED for this wd, remove this wd from the idr.
507 */
508 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
509 struct fsnotify_group *group)
510 {
511 struct inotify_inode_mark_entry *ientry;
512 struct fsnotify_event *ignored_event;
513 struct inotify_event_private_data *event_priv;
514 struct fsnotify_event_private_data *fsn_event_priv;
515 int ret;
516
517 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
518 FSNOTIFY_EVENT_NONE, NULL, 0,
519 GFP_NOFS);
520 if (!ignored_event)
521 return;
522
523 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
524
525 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
526 if (unlikely(!event_priv))
527 goto skip_send_ignore;
528
529 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
530
531 fsn_event_priv->group = group;
532 event_priv->wd = ientry->wd;
533
534 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
535 if (ret)
536 inotify_free_event_priv(fsn_event_priv);
537
538 skip_send_ignore:
539
540 /* matches the reference taken when the event was created */
541 fsnotify_put_event(ignored_event);
542
543 /* remove this entry from the idr */
544 inotify_remove_from_idr(group, ientry);
545
546 atomic_dec(&group->inotify_data.user->inotify_watches);
547 }
548
549 /* ding dong the mark is dead */
550 static void inotify_free_mark(struct fsnotify_mark_entry *entry)
551 {
552 struct inotify_inode_mark_entry *ientry;
553
554 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
555
556 kmem_cache_free(inotify_inode_mark_cachep, ientry);
557 }
558
559 static int inotify_update_existing_watch(struct fsnotify_group *group,
560 struct inode *inode,
561 u32 arg)
562 {
563 struct fsnotify_mark_entry *entry;
564 struct inotify_inode_mark_entry *ientry;
565 __u32 old_mask, new_mask;
566 __u32 mask;
567 int add = (arg & IN_MASK_ADD);
568 int ret;
569
570 /* don't allow invalid bits: we don't want flags set */
571 mask = inotify_arg_to_mask(arg);
572 if (unlikely(!mask))
573 return -EINVAL;
574
575 spin_lock(&inode->i_lock);
576 entry = fsnotify_find_mark_entry(group, inode);
577 spin_unlock(&inode->i_lock);
578 if (!entry)
579 return -ENOENT;
580
581 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
582
583 spin_lock(&entry->lock);
584
585 old_mask = entry->mask;
586 if (add) {
587 entry->mask |= mask;
588 new_mask = entry->mask;
589 } else {
590 entry->mask = mask;
591 new_mask = entry->mask;
592 }
593
594 spin_unlock(&entry->lock);
595
596 if (old_mask != new_mask) {
597 /* more bits in old than in new? */
598 int dropped = (old_mask & ~new_mask);
599 /* more bits in this entry than the inode's mask? */
600 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
601 /* more bits in this entry than the group? */
602 int do_group = (new_mask & ~group->mask);
603
604 /* update the inode with this new entry */
605 if (dropped || do_inode)
606 fsnotify_recalc_inode_mask(inode);
607
608 /* update the group mask with the new mask */
609 if (dropped || do_group)
610 fsnotify_recalc_group_mask(group);
611 }
612
613 /* return the wd */
614 ret = ientry->wd;
615
616 /* match the get from fsnotify_find_mark_entry() */
617 fsnotify_put_mark(entry);
618
619 return ret;
620 }
621
622 static int inotify_new_watch(struct fsnotify_group *group,
623 struct inode *inode,
624 u32 arg)
625 {
626 struct inotify_inode_mark_entry *tmp_ientry;
627 __u32 mask;
628 int ret;
629 struct idr *idr = &group->inotify_data.idr;
630 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
631
632 /* don't allow invalid bits: we don't want flags set */
633 mask = inotify_arg_to_mask(arg);
634 if (unlikely(!mask))
635 return -EINVAL;
636
637 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
638 if (unlikely(!tmp_ientry))
639 return -ENOMEM;
640
641 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
642 tmp_ientry->fsn_entry.mask = mask;
643 tmp_ientry->wd = -1;
644
645 ret = -ENOSPC;
646 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
647 goto out_err;
648
649 ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
650 tmp_ientry);
651 if (ret)
652 goto out_err;
653
654 /* we are on the idr, now get on the inode */
655 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode, 0);
656 if (ret) {
657 /* we failed to get on the inode, get off the idr */
658 inotify_remove_from_idr(group, tmp_ientry);
659 goto out_err;
660 }
661
662 /* increment the number of watches the user has */
663 atomic_inc(&group->inotify_data.user->inotify_watches);
664
665 /* return the watch descriptor for this new entry */
666 ret = tmp_ientry->wd;
667
668 /* if this mark added a new event update the group mask */
669 if (mask & ~group->mask)
670 fsnotify_recalc_group_mask(group);
671
672 out_err:
673 /* match the ref from fsnotify_init_markentry() */
674 fsnotify_put_mark(&tmp_ientry->fsn_entry);
675
676 return ret;
677 }
678
679 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
680 {
681 int ret = 0;
682
683 retry:
684 /* try to update and existing watch with the new arg */
685 ret = inotify_update_existing_watch(group, inode, arg);
686 /* no mark present, try to add a new one */
687 if (ret == -ENOENT)
688 ret = inotify_new_watch(group, inode, arg);
689 /*
690 * inotify_new_watch could race with another thread which did an
691 * inotify_new_watch between the update_existing and the add watch
692 * here, go back and try to update an existing mark again.
693 */
694 if (ret == -EEXIST)
695 goto retry;
696
697 return ret;
698 }
699
700 static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
701 {
702 struct fsnotify_group *group;
703 unsigned int grp_num;
704
705 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
706 grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
707 group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
708 if (IS_ERR(group))
709 return group;
710
711 group->max_events = max_events;
712
713 spin_lock_init(&group->inotify_data.idr_lock);
714 idr_init(&group->inotify_data.idr);
715 group->inotify_data.last_wd = 0;
716 group->inotify_data.user = user;
717 group->inotify_data.fa = NULL;
718
719 return group;
720 }
721
722
723 /* inotify syscalls */
724 SYSCALL_DEFINE1(inotify_init1, int, flags)
725 {
726 struct fsnotify_group *group;
727 struct user_struct *user;
728 int ret;
729
730 /* Check the IN_* constants for consistency. */
731 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
732 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
733
734 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
735 return -EINVAL;
736
737 user = get_current_user();
738 if (unlikely(atomic_read(&user->inotify_devs) >=
739 inotify_max_user_instances)) {
740 ret = -EMFILE;
741 goto out_free_uid;
742 }
743
744 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
745 group = inotify_new_group(user, inotify_max_queued_events);
746 if (IS_ERR(group)) {
747 ret = PTR_ERR(group);
748 goto out_free_uid;
749 }
750
751 atomic_inc(&user->inotify_devs);
752
753 ret = anon_inode_getfd("inotify", &inotify_fops, group,
754 O_RDONLY | flags);
755 if (ret >= 0)
756 return ret;
757
758 atomic_dec(&user->inotify_devs);
759 out_free_uid:
760 free_uid(user);
761 return ret;
762 }
763
764 SYSCALL_DEFINE0(inotify_init)
765 {
766 return sys_inotify_init1(0);
767 }
768
769 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
770 u32, mask)
771 {
772 struct fsnotify_group *group;
773 struct inode *inode;
774 struct path path;
775 struct file *filp;
776 int ret, fput_needed;
777 unsigned flags = 0;
778
779 filp = fget_light(fd, &fput_needed);
780 if (unlikely(!filp))
781 return -EBADF;
782
783 /* verify that this is indeed an inotify instance */
784 if (unlikely(filp->f_op != &inotify_fops)) {
785 ret = -EINVAL;
786 goto fput_and_out;
787 }
788
789 if (!(mask & IN_DONT_FOLLOW))
790 flags |= LOOKUP_FOLLOW;
791 if (mask & IN_ONLYDIR)
792 flags |= LOOKUP_DIRECTORY;
793
794 ret = inotify_find_inode(pathname, &path, flags);
795 if (ret)
796 goto fput_and_out;
797
798 /* inode held in place by reference to path; group by fget on fd */
799 inode = path.dentry->d_inode;
800 group = filp->private_data;
801
802 /* create/update an inode mark */
803 ret = inotify_update_watch(group, inode, mask);
804 path_put(&path);
805 fput_and_out:
806 fput_light(filp, fput_needed);
807 return ret;
808 }
809
810 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
811 {
812 struct fsnotify_group *group;
813 struct inotify_inode_mark_entry *ientry;
814 struct file *filp;
815 int ret = 0, fput_needed;
816
817 filp = fget_light(fd, &fput_needed);
818 if (unlikely(!filp))
819 return -EBADF;
820
821 /* verify that this is indeed an inotify instance */
822 ret = -EINVAL;
823 if (unlikely(filp->f_op != &inotify_fops))
824 goto out;
825
826 group = filp->private_data;
827
828 ret = -EINVAL;
829 ientry = inotify_idr_find(group, wd);
830 if (unlikely(!ientry))
831 goto out;
832
833 ret = 0;
834
835 fsnotify_destroy_mark_by_entry(&ientry->fsn_entry);
836
837 /* match ref taken by inotify_idr_find */
838 fsnotify_put_mark(&ientry->fsn_entry);
839
840 out:
841 fput_light(filp, fput_needed);
842 return ret;
843 }
844
845 /*
846 * inotify_user_setup - Our initialization function. Note that we cannnot return
847 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
848 * must result in panic().
849 */
850 static int __init inotify_user_setup(void)
851 {
852 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
853 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
854
855 inotify_max_queued_events = 16384;
856 inotify_max_user_instances = 128;
857 inotify_max_user_watches = 8192;
858
859 return 0;
860 }
861 module_init(inotify_user_setup);
This page took 0.127266 seconds and 6 git commands to generate.