| 1 | /* |
| 2 | * fs/inotify_user.c - inotify support for userspace |
| 3 | * |
| 4 | * Authors: |
| 5 | * John McCutchan <ttb@tentacle.dhs.org> |
| 6 | * Robert Love <rml@novell.com> |
| 7 | * |
| 8 | * Copyright (C) 2005 John McCutchan |
| 9 | * Copyright 2006 Hewlett-Packard Development Company, L.P. |
| 10 | * |
| 11 | * Copyright (C) 2009 Eric Paris <Red Hat Inc> |
| 12 | * inotify was largely rewriten to make use of the fsnotify infrastructure |
| 13 | * |
| 14 | * This program is free software; you can redistribute it and/or modify it |
| 15 | * under the terms of the GNU General Public License as published by the |
| 16 | * Free Software Foundation; either version 2, or (at your option) any |
| 17 | * later version. |
| 18 | * |
| 19 | * This program is distributed in the hope that it will be useful, but |
| 20 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 22 | * General Public License for more details. |
| 23 | */ |
| 24 | |
| 25 | #include <linux/file.h> |
| 26 | #include <linux/fs.h> /* struct inode */ |
| 27 | #include <linux/fsnotify_backend.h> |
| 28 | #include <linux/idr.h> |
| 29 | #include <linux/init.h> /* fs_initcall */ |
| 30 | #include <linux/inotify.h> |
| 31 | #include <linux/kernel.h> /* roundup() */ |
| 32 | #include <linux/namei.h> /* LOOKUP_FOLLOW */ |
| 33 | #include <linux/sched.h> /* struct user */ |
| 34 | #include <linux/slab.h> /* struct kmem_cache */ |
| 35 | #include <linux/syscalls.h> |
| 36 | #include <linux/types.h> |
| 37 | #include <linux/anon_inodes.h> |
| 38 | #include <linux/uaccess.h> |
| 39 | #include <linux/poll.h> |
| 40 | #include <linux/wait.h> |
| 41 | |
| 42 | #include "inotify.h" |
| 43 | #include "../fdinfo.h" |
| 44 | |
| 45 | #include <asm/ioctls.h> |
| 46 | |
| 47 | /* these are configurable via /proc/sys/fs/inotify/ */ |
| 48 | static int inotify_max_user_instances __read_mostly; |
| 49 | static int inotify_max_queued_events __read_mostly; |
| 50 | static int inotify_max_user_watches __read_mostly; |
| 51 | |
| 52 | static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; |
| 53 | |
| 54 | #ifdef CONFIG_SYSCTL |
| 55 | |
| 56 | #include <linux/sysctl.h> |
| 57 | |
| 58 | static int zero; |
| 59 | |
| 60 | struct ctl_table inotify_table[] = { |
| 61 | { |
| 62 | .procname = "max_user_instances", |
| 63 | .data = &inotify_max_user_instances, |
| 64 | .maxlen = sizeof(int), |
| 65 | .mode = 0644, |
| 66 | .proc_handler = proc_dointvec_minmax, |
| 67 | .extra1 = &zero, |
| 68 | }, |
| 69 | { |
| 70 | .procname = "max_user_watches", |
| 71 | .data = &inotify_max_user_watches, |
| 72 | .maxlen = sizeof(int), |
| 73 | .mode = 0644, |
| 74 | .proc_handler = proc_dointvec_minmax, |
| 75 | .extra1 = &zero, |
| 76 | }, |
| 77 | { |
| 78 | .procname = "max_queued_events", |
| 79 | .data = &inotify_max_queued_events, |
| 80 | .maxlen = sizeof(int), |
| 81 | .mode = 0644, |
| 82 | .proc_handler = proc_dointvec_minmax, |
| 83 | .extra1 = &zero |
| 84 | }, |
| 85 | { } |
| 86 | }; |
| 87 | #endif /* CONFIG_SYSCTL */ |
| 88 | |
| 89 | static inline __u32 inotify_arg_to_mask(u32 arg) |
| 90 | { |
| 91 | __u32 mask; |
| 92 | |
| 93 | /* |
| 94 | * everything should accept their own ignored, cares about children, |
| 95 | * and should receive events when the inode is unmounted |
| 96 | */ |
| 97 | mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT); |
| 98 | |
| 99 | /* mask off the flags used to open the fd */ |
| 100 | mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)); |
| 101 | |
| 102 | return mask; |
| 103 | } |
| 104 | |
| 105 | static inline u32 inotify_mask_to_arg(__u32 mask) |
| 106 | { |
| 107 | return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | |
| 108 | IN_Q_OVERFLOW); |
| 109 | } |
| 110 | |
| 111 | /* intofiy userspace file descriptor functions */ |
| 112 | static unsigned int inotify_poll(struct file *file, poll_table *wait) |
| 113 | { |
| 114 | struct fsnotify_group *group = file->private_data; |
| 115 | int ret = 0; |
| 116 | |
| 117 | poll_wait(file, &group->notification_waitq, wait); |
| 118 | mutex_lock(&group->notification_mutex); |
| 119 | if (!fsnotify_notify_queue_is_empty(group)) |
| 120 | ret = POLLIN | POLLRDNORM; |
| 121 | mutex_unlock(&group->notification_mutex); |
| 122 | |
| 123 | return ret; |
| 124 | } |
| 125 | |
| 126 | static int round_event_name_len(struct fsnotify_event *fsn_event) |
| 127 | { |
| 128 | struct inotify_event_info *event; |
| 129 | |
| 130 | event = INOTIFY_E(fsn_event); |
| 131 | if (!event->name_len) |
| 132 | return 0; |
| 133 | return roundup(event->name_len + 1, sizeof(struct inotify_event)); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Get an inotify_kernel_event if one exists and is small |
| 138 | * enough to fit in "count". Return an error pointer if |
| 139 | * not large enough. |
| 140 | * |
| 141 | * Called with the group->notification_mutex held. |
| 142 | */ |
| 143 | static struct fsnotify_event *get_one_event(struct fsnotify_group *group, |
| 144 | size_t count) |
| 145 | { |
| 146 | size_t event_size = sizeof(struct inotify_event); |
| 147 | struct fsnotify_event *event; |
| 148 | |
| 149 | if (fsnotify_notify_queue_is_empty(group)) |
| 150 | return NULL; |
| 151 | |
| 152 | event = fsnotify_peek_first_event(group); |
| 153 | |
| 154 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
| 155 | |
| 156 | event_size += round_event_name_len(event); |
| 157 | if (event_size > count) |
| 158 | return ERR_PTR(-EINVAL); |
| 159 | |
| 160 | /* held the notification_mutex the whole time, so this is the |
| 161 | * same event we peeked above */ |
| 162 | fsnotify_remove_first_event(group); |
| 163 | |
| 164 | return event; |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Copy an event to user space, returning how much we copied. |
| 169 | * |
| 170 | * We already checked that the event size is smaller than the |
| 171 | * buffer we had in "get_one_event()" above. |
| 172 | */ |
| 173 | static ssize_t copy_event_to_user(struct fsnotify_group *group, |
| 174 | struct fsnotify_event *fsn_event, |
| 175 | char __user *buf) |
| 176 | { |
| 177 | struct inotify_event inotify_event; |
| 178 | struct inotify_event_info *event; |
| 179 | size_t event_size = sizeof(struct inotify_event); |
| 180 | size_t name_len; |
| 181 | size_t pad_name_len; |
| 182 | |
| 183 | pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); |
| 184 | |
| 185 | event = INOTIFY_E(fsn_event); |
| 186 | name_len = event->name_len; |
| 187 | /* |
| 188 | * round up name length so it is a multiple of event_size |
| 189 | * plus an extra byte for the terminating '\0'. |
| 190 | */ |
| 191 | pad_name_len = round_event_name_len(fsn_event); |
| 192 | inotify_event.len = pad_name_len; |
| 193 | inotify_event.mask = inotify_mask_to_arg(fsn_event->mask); |
| 194 | inotify_event.wd = event->wd; |
| 195 | inotify_event.cookie = event->sync_cookie; |
| 196 | |
| 197 | /* send the main event */ |
| 198 | if (copy_to_user(buf, &inotify_event, event_size)) |
| 199 | return -EFAULT; |
| 200 | |
| 201 | buf += event_size; |
| 202 | |
| 203 | /* |
| 204 | * fsnotify only stores the pathname, so here we have to send the pathname |
| 205 | * and then pad that pathname out to a multiple of sizeof(inotify_event) |
| 206 | * with zeros. |
| 207 | */ |
| 208 | if (pad_name_len) { |
| 209 | /* copy the path name */ |
| 210 | if (copy_to_user(buf, event->name, name_len)) |
| 211 | return -EFAULT; |
| 212 | buf += name_len; |
| 213 | |
| 214 | /* fill userspace with 0's */ |
| 215 | if (clear_user(buf, pad_name_len - name_len)) |
| 216 | return -EFAULT; |
| 217 | event_size += pad_name_len; |
| 218 | } |
| 219 | |
| 220 | return event_size; |
| 221 | } |
| 222 | |
| 223 | static ssize_t inotify_read(struct file *file, char __user *buf, |
| 224 | size_t count, loff_t *pos) |
| 225 | { |
| 226 | struct fsnotify_group *group; |
| 227 | struct fsnotify_event *kevent; |
| 228 | char __user *start; |
| 229 | int ret; |
| 230 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| 231 | |
| 232 | start = buf; |
| 233 | group = file->private_data; |
| 234 | |
| 235 | add_wait_queue(&group->notification_waitq, &wait); |
| 236 | while (1) { |
| 237 | mutex_lock(&group->notification_mutex); |
| 238 | kevent = get_one_event(group, count); |
| 239 | mutex_unlock(&group->notification_mutex); |
| 240 | |
| 241 | pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); |
| 242 | |
| 243 | if (kevent) { |
| 244 | ret = PTR_ERR(kevent); |
| 245 | if (IS_ERR(kevent)) |
| 246 | break; |
| 247 | ret = copy_event_to_user(group, kevent, buf); |
| 248 | fsnotify_destroy_event(group, kevent); |
| 249 | if (ret < 0) |
| 250 | break; |
| 251 | buf += ret; |
| 252 | count -= ret; |
| 253 | continue; |
| 254 | } |
| 255 | |
| 256 | ret = -EAGAIN; |
| 257 | if (file->f_flags & O_NONBLOCK) |
| 258 | break; |
| 259 | ret = -ERESTARTSYS; |
| 260 | if (signal_pending(current)) |
| 261 | break; |
| 262 | |
| 263 | if (start != buf) |
| 264 | break; |
| 265 | |
| 266 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
| 267 | } |
| 268 | remove_wait_queue(&group->notification_waitq, &wait); |
| 269 | |
| 270 | if (start != buf && ret != -EFAULT) |
| 271 | ret = buf - start; |
| 272 | return ret; |
| 273 | } |
| 274 | |
| 275 | static int inotify_release(struct inode *ignored, struct file *file) |
| 276 | { |
| 277 | struct fsnotify_group *group = file->private_data; |
| 278 | |
| 279 | pr_debug("%s: group=%p\n", __func__, group); |
| 280 | |
| 281 | /* free this group, matching get was inotify_init->fsnotify_obtain_group */ |
| 282 | fsnotify_destroy_group(group); |
| 283 | |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | static long inotify_ioctl(struct file *file, unsigned int cmd, |
| 288 | unsigned long arg) |
| 289 | { |
| 290 | struct fsnotify_group *group; |
| 291 | struct fsnotify_event *fsn_event; |
| 292 | void __user *p; |
| 293 | int ret = -ENOTTY; |
| 294 | size_t send_len = 0; |
| 295 | |
| 296 | group = file->private_data; |
| 297 | p = (void __user *) arg; |
| 298 | |
| 299 | pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); |
| 300 | |
| 301 | switch (cmd) { |
| 302 | case FIONREAD: |
| 303 | mutex_lock(&group->notification_mutex); |
| 304 | list_for_each_entry(fsn_event, &group->notification_list, |
| 305 | list) { |
| 306 | send_len += sizeof(struct inotify_event); |
| 307 | send_len += round_event_name_len(fsn_event); |
| 308 | } |
| 309 | mutex_unlock(&group->notification_mutex); |
| 310 | ret = put_user(send_len, (int __user *) p); |
| 311 | break; |
| 312 | } |
| 313 | |
| 314 | return ret; |
| 315 | } |
| 316 | |
| 317 | static const struct file_operations inotify_fops = { |
| 318 | .show_fdinfo = inotify_show_fdinfo, |
| 319 | .poll = inotify_poll, |
| 320 | .read = inotify_read, |
| 321 | .fasync = fsnotify_fasync, |
| 322 | .release = inotify_release, |
| 323 | .unlocked_ioctl = inotify_ioctl, |
| 324 | .compat_ioctl = inotify_ioctl, |
| 325 | .llseek = noop_llseek, |
| 326 | }; |
| 327 | |
| 328 | |
| 329 | /* |
| 330 | * find_inode - resolve a user-given path to a specific inode |
| 331 | */ |
| 332 | static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags) |
| 333 | { |
| 334 | int error; |
| 335 | |
| 336 | error = user_path_at(AT_FDCWD, dirname, flags, path); |
| 337 | if (error) |
| 338 | return error; |
| 339 | /* you can only watch an inode if you have read permissions on it */ |
| 340 | error = inode_permission(path->dentry->d_inode, MAY_READ); |
| 341 | if (error) |
| 342 | path_put(path); |
| 343 | return error; |
| 344 | } |
| 345 | |
| 346 | static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, |
| 347 | struct inotify_inode_mark *i_mark) |
| 348 | { |
| 349 | int ret; |
| 350 | |
| 351 | idr_preload(GFP_KERNEL); |
| 352 | spin_lock(idr_lock); |
| 353 | |
| 354 | ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); |
| 355 | if (ret >= 0) { |
| 356 | /* we added the mark to the idr, take a reference */ |
| 357 | i_mark->wd = ret; |
| 358 | fsnotify_get_mark(&i_mark->fsn_mark); |
| 359 | } |
| 360 | |
| 361 | spin_unlock(idr_lock); |
| 362 | idr_preload_end(); |
| 363 | return ret < 0 ? ret : 0; |
| 364 | } |
| 365 | |
| 366 | static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, |
| 367 | int wd) |
| 368 | { |
| 369 | struct idr *idr = &group->inotify_data.idr; |
| 370 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
| 371 | struct inotify_inode_mark *i_mark; |
| 372 | |
| 373 | assert_spin_locked(idr_lock); |
| 374 | |
| 375 | i_mark = idr_find(idr, wd); |
| 376 | if (i_mark) { |
| 377 | struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; |
| 378 | |
| 379 | fsnotify_get_mark(fsn_mark); |
| 380 | /* One ref for being in the idr, one ref we just took */ |
| 381 | BUG_ON(atomic_read(&fsn_mark->refcnt) < 2); |
| 382 | } |
| 383 | |
| 384 | return i_mark; |
| 385 | } |
| 386 | |
| 387 | static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, |
| 388 | int wd) |
| 389 | { |
| 390 | struct inotify_inode_mark *i_mark; |
| 391 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
| 392 | |
| 393 | spin_lock(idr_lock); |
| 394 | i_mark = inotify_idr_find_locked(group, wd); |
| 395 | spin_unlock(idr_lock); |
| 396 | |
| 397 | return i_mark; |
| 398 | } |
| 399 | |
| 400 | static void do_inotify_remove_from_idr(struct fsnotify_group *group, |
| 401 | struct inotify_inode_mark *i_mark) |
| 402 | { |
| 403 | struct idr *idr = &group->inotify_data.idr; |
| 404 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
| 405 | int wd = i_mark->wd; |
| 406 | |
| 407 | assert_spin_locked(idr_lock); |
| 408 | |
| 409 | idr_remove(idr, wd); |
| 410 | |
| 411 | /* removed from the idr, drop that ref */ |
| 412 | fsnotify_put_mark(&i_mark->fsn_mark); |
| 413 | } |
| 414 | |
| 415 | /* |
| 416 | * Remove the mark from the idr (if present) and drop the reference |
| 417 | * on the mark because it was in the idr. |
| 418 | */ |
| 419 | static void inotify_remove_from_idr(struct fsnotify_group *group, |
| 420 | struct inotify_inode_mark *i_mark) |
| 421 | { |
| 422 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
| 423 | struct inotify_inode_mark *found_i_mark = NULL; |
| 424 | int wd; |
| 425 | |
| 426 | spin_lock(idr_lock); |
| 427 | wd = i_mark->wd; |
| 428 | |
| 429 | /* |
| 430 | * does this i_mark think it is in the idr? we shouldn't get called |
| 431 | * if it wasn't.... |
| 432 | */ |
| 433 | if (wd == -1) { |
| 434 | WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" |
| 435 | " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd, |
| 436 | i_mark->fsn_mark.group, i_mark->fsn_mark.inode); |
| 437 | goto out; |
| 438 | } |
| 439 | |
| 440 | /* Lets look in the idr to see if we find it */ |
| 441 | found_i_mark = inotify_idr_find_locked(group, wd); |
| 442 | if (unlikely(!found_i_mark)) { |
| 443 | WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" |
| 444 | " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd, |
| 445 | i_mark->fsn_mark.group, i_mark->fsn_mark.inode); |
| 446 | goto out; |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * We found an mark in the idr at the right wd, but it's |
| 451 | * not the mark we were told to remove. eparis seriously |
| 452 | * fucked up somewhere. |
| 453 | */ |
| 454 | if (unlikely(found_i_mark != i_mark)) { |
| 455 | WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " |
| 456 | "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d " |
| 457 | "found_i_mark->group=%p found_i_mark->inode=%p\n", |
| 458 | __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group, |
| 459 | i_mark->fsn_mark.inode, found_i_mark, found_i_mark->wd, |
| 460 | found_i_mark->fsn_mark.group, |
| 461 | found_i_mark->fsn_mark.inode); |
| 462 | goto out; |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * One ref for being in the idr |
| 467 | * one ref held by the caller trying to kill us |
| 468 | * one ref grabbed by inotify_idr_find |
| 469 | */ |
| 470 | if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) { |
| 471 | printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" |
| 472 | " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd, |
| 473 | i_mark->fsn_mark.group, i_mark->fsn_mark.inode); |
| 474 | /* we can't really recover with bad ref cnting.. */ |
| 475 | BUG(); |
| 476 | } |
| 477 | |
| 478 | do_inotify_remove_from_idr(group, i_mark); |
| 479 | out: |
| 480 | /* match the ref taken by inotify_idr_find_locked() */ |
| 481 | if (found_i_mark) |
| 482 | fsnotify_put_mark(&found_i_mark->fsn_mark); |
| 483 | i_mark->wd = -1; |
| 484 | spin_unlock(idr_lock); |
| 485 | } |
| 486 | |
| 487 | /* |
| 488 | * Send IN_IGNORED for this wd, remove this wd from the idr. |
| 489 | */ |
| 490 | void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, |
| 491 | struct fsnotify_group *group) |
| 492 | { |
| 493 | struct inotify_inode_mark *i_mark; |
| 494 | |
| 495 | /* Queue ignore event for the watch */ |
| 496 | inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED, |
| 497 | NULL, FSNOTIFY_EVENT_NONE, NULL, 0); |
| 498 | |
| 499 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); |
| 500 | /* remove this mark from the idr */ |
| 501 | inotify_remove_from_idr(group, i_mark); |
| 502 | |
| 503 | atomic_dec(&group->inotify_data.user->inotify_watches); |
| 504 | } |
| 505 | |
| 506 | /* ding dong the mark is dead */ |
| 507 | static void inotify_free_mark(struct fsnotify_mark *fsn_mark) |
| 508 | { |
| 509 | struct inotify_inode_mark *i_mark; |
| 510 | |
| 511 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); |
| 512 | |
| 513 | kmem_cache_free(inotify_inode_mark_cachep, i_mark); |
| 514 | } |
| 515 | |
| 516 | static int inotify_update_existing_watch(struct fsnotify_group *group, |
| 517 | struct inode *inode, |
| 518 | u32 arg) |
| 519 | { |
| 520 | struct fsnotify_mark *fsn_mark; |
| 521 | struct inotify_inode_mark *i_mark; |
| 522 | __u32 old_mask, new_mask; |
| 523 | __u32 mask; |
| 524 | int add = (arg & IN_MASK_ADD); |
| 525 | int ret; |
| 526 | |
| 527 | mask = inotify_arg_to_mask(arg); |
| 528 | |
| 529 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
| 530 | if (!fsn_mark) |
| 531 | return -ENOENT; |
| 532 | |
| 533 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); |
| 534 | |
| 535 | spin_lock(&fsn_mark->lock); |
| 536 | |
| 537 | old_mask = fsn_mark->mask; |
| 538 | if (add) |
| 539 | fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask)); |
| 540 | else |
| 541 | fsnotify_set_mark_mask_locked(fsn_mark, mask); |
| 542 | new_mask = fsn_mark->mask; |
| 543 | |
| 544 | spin_unlock(&fsn_mark->lock); |
| 545 | |
| 546 | if (old_mask != new_mask) { |
| 547 | /* more bits in old than in new? */ |
| 548 | int dropped = (old_mask & ~new_mask); |
| 549 | /* more bits in this fsn_mark than the inode's mask? */ |
| 550 | int do_inode = (new_mask & ~inode->i_fsnotify_mask); |
| 551 | |
| 552 | /* update the inode with this new fsn_mark */ |
| 553 | if (dropped || do_inode) |
| 554 | fsnotify_recalc_inode_mask(inode); |
| 555 | |
| 556 | } |
| 557 | |
| 558 | /* return the wd */ |
| 559 | ret = i_mark->wd; |
| 560 | |
| 561 | /* match the get from fsnotify_find_mark() */ |
| 562 | fsnotify_put_mark(fsn_mark); |
| 563 | |
| 564 | return ret; |
| 565 | } |
| 566 | |
| 567 | static int inotify_new_watch(struct fsnotify_group *group, |
| 568 | struct inode *inode, |
| 569 | u32 arg) |
| 570 | { |
| 571 | struct inotify_inode_mark *tmp_i_mark; |
| 572 | __u32 mask; |
| 573 | int ret; |
| 574 | struct idr *idr = &group->inotify_data.idr; |
| 575 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
| 576 | |
| 577 | mask = inotify_arg_to_mask(arg); |
| 578 | |
| 579 | tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); |
| 580 | if (unlikely(!tmp_i_mark)) |
| 581 | return -ENOMEM; |
| 582 | |
| 583 | fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark); |
| 584 | tmp_i_mark->fsn_mark.mask = mask; |
| 585 | tmp_i_mark->wd = -1; |
| 586 | |
| 587 | ret = -ENOSPC; |
| 588 | if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) |
| 589 | goto out_err; |
| 590 | |
| 591 | ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark); |
| 592 | if (ret) |
| 593 | goto out_err; |
| 594 | |
| 595 | /* we are on the idr, now get on the inode */ |
| 596 | ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, group, inode, |
| 597 | NULL, 0); |
| 598 | if (ret) { |
| 599 | /* we failed to get on the inode, get off the idr */ |
| 600 | inotify_remove_from_idr(group, tmp_i_mark); |
| 601 | goto out_err; |
| 602 | } |
| 603 | |
| 604 | /* increment the number of watches the user has */ |
| 605 | atomic_inc(&group->inotify_data.user->inotify_watches); |
| 606 | |
| 607 | /* return the watch descriptor for this new mark */ |
| 608 | ret = tmp_i_mark->wd; |
| 609 | |
| 610 | out_err: |
| 611 | /* match the ref from fsnotify_init_mark() */ |
| 612 | fsnotify_put_mark(&tmp_i_mark->fsn_mark); |
| 613 | |
| 614 | return ret; |
| 615 | } |
| 616 | |
| 617 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) |
| 618 | { |
| 619 | int ret = 0; |
| 620 | |
| 621 | mutex_lock(&group->mark_mutex); |
| 622 | /* try to update and existing watch with the new arg */ |
| 623 | ret = inotify_update_existing_watch(group, inode, arg); |
| 624 | /* no mark present, try to add a new one */ |
| 625 | if (ret == -ENOENT) |
| 626 | ret = inotify_new_watch(group, inode, arg); |
| 627 | mutex_unlock(&group->mark_mutex); |
| 628 | |
| 629 | return ret; |
| 630 | } |
| 631 | |
| 632 | static struct fsnotify_group *inotify_new_group(unsigned int max_events) |
| 633 | { |
| 634 | struct fsnotify_group *group; |
| 635 | struct inotify_event_info *oevent; |
| 636 | |
| 637 | group = fsnotify_alloc_group(&inotify_fsnotify_ops); |
| 638 | if (IS_ERR(group)) |
| 639 | return group; |
| 640 | |
| 641 | oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); |
| 642 | if (unlikely(!oevent)) { |
| 643 | fsnotify_destroy_group(group); |
| 644 | return ERR_PTR(-ENOMEM); |
| 645 | } |
| 646 | group->overflow_event = &oevent->fse; |
| 647 | fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); |
| 648 | oevent->wd = -1; |
| 649 | oevent->sync_cookie = 0; |
| 650 | oevent->name_len = 0; |
| 651 | |
| 652 | group->max_events = max_events; |
| 653 | |
| 654 | spin_lock_init(&group->inotify_data.idr_lock); |
| 655 | idr_init(&group->inotify_data.idr); |
| 656 | group->inotify_data.user = get_current_user(); |
| 657 | |
| 658 | if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > |
| 659 | inotify_max_user_instances) { |
| 660 | fsnotify_destroy_group(group); |
| 661 | return ERR_PTR(-EMFILE); |
| 662 | } |
| 663 | |
| 664 | return group; |
| 665 | } |
| 666 | |
| 667 | |
| 668 | /* inotify syscalls */ |
| 669 | SYSCALL_DEFINE1(inotify_init1, int, flags) |
| 670 | { |
| 671 | struct fsnotify_group *group; |
| 672 | int ret; |
| 673 | |
| 674 | /* Check the IN_* constants for consistency. */ |
| 675 | BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); |
| 676 | BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); |
| 677 | |
| 678 | if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) |
| 679 | return -EINVAL; |
| 680 | |
| 681 | /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ |
| 682 | group = inotify_new_group(inotify_max_queued_events); |
| 683 | if (IS_ERR(group)) |
| 684 | return PTR_ERR(group); |
| 685 | |
| 686 | ret = anon_inode_getfd("inotify", &inotify_fops, group, |
| 687 | O_RDONLY | flags); |
| 688 | if (ret < 0) |
| 689 | fsnotify_destroy_group(group); |
| 690 | |
| 691 | return ret; |
| 692 | } |
| 693 | |
| 694 | SYSCALL_DEFINE0(inotify_init) |
| 695 | { |
| 696 | return sys_inotify_init1(0); |
| 697 | } |
| 698 | |
| 699 | SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, |
| 700 | u32, mask) |
| 701 | { |
| 702 | struct fsnotify_group *group; |
| 703 | struct inode *inode; |
| 704 | struct path path; |
| 705 | struct fd f; |
| 706 | int ret; |
| 707 | unsigned flags = 0; |
| 708 | |
| 709 | /* |
| 710 | * We share a lot of code with fs/dnotify. We also share |
| 711 | * the bit layout between inotify's IN_* and the fsnotify |
| 712 | * FS_*. This check ensures that only the inotify IN_* |
| 713 | * bits get passed in and set in watches/events. |
| 714 | */ |
| 715 | if (unlikely(mask & ~ALL_INOTIFY_BITS)) |
| 716 | return -EINVAL; |
| 717 | /* |
| 718 | * Require at least one valid bit set in the mask. |
| 719 | * Without _something_ set, we would have no events to |
| 720 | * watch for. |
| 721 | */ |
| 722 | if (unlikely(!(mask & ALL_INOTIFY_BITS))) |
| 723 | return -EINVAL; |
| 724 | |
| 725 | f = fdget(fd); |
| 726 | if (unlikely(!f.file)) |
| 727 | return -EBADF; |
| 728 | |
| 729 | /* verify that this is indeed an inotify instance */ |
| 730 | if (unlikely(f.file->f_op != &inotify_fops)) { |
| 731 | ret = -EINVAL; |
| 732 | goto fput_and_out; |
| 733 | } |
| 734 | |
| 735 | if (!(mask & IN_DONT_FOLLOW)) |
| 736 | flags |= LOOKUP_FOLLOW; |
| 737 | if (mask & IN_ONLYDIR) |
| 738 | flags |= LOOKUP_DIRECTORY; |
| 739 | |
| 740 | ret = inotify_find_inode(pathname, &path, flags); |
| 741 | if (ret) |
| 742 | goto fput_and_out; |
| 743 | |
| 744 | /* inode held in place by reference to path; group by fget on fd */ |
| 745 | inode = path.dentry->d_inode; |
| 746 | group = f.file->private_data; |
| 747 | |
| 748 | /* create/update an inode mark */ |
| 749 | ret = inotify_update_watch(group, inode, mask); |
| 750 | path_put(&path); |
| 751 | fput_and_out: |
| 752 | fdput(f); |
| 753 | return ret; |
| 754 | } |
| 755 | |
| 756 | SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) |
| 757 | { |
| 758 | struct fsnotify_group *group; |
| 759 | struct inotify_inode_mark *i_mark; |
| 760 | struct fd f; |
| 761 | int ret = 0; |
| 762 | |
| 763 | f = fdget(fd); |
| 764 | if (unlikely(!f.file)) |
| 765 | return -EBADF; |
| 766 | |
| 767 | /* verify that this is indeed an inotify instance */ |
| 768 | ret = -EINVAL; |
| 769 | if (unlikely(f.file->f_op != &inotify_fops)) |
| 770 | goto out; |
| 771 | |
| 772 | group = f.file->private_data; |
| 773 | |
| 774 | ret = -EINVAL; |
| 775 | i_mark = inotify_idr_find(group, wd); |
| 776 | if (unlikely(!i_mark)) |
| 777 | goto out; |
| 778 | |
| 779 | ret = 0; |
| 780 | |
| 781 | fsnotify_destroy_mark(&i_mark->fsn_mark, group); |
| 782 | |
| 783 | /* match ref taken by inotify_idr_find */ |
| 784 | fsnotify_put_mark(&i_mark->fsn_mark); |
| 785 | |
| 786 | out: |
| 787 | fdput(f); |
| 788 | return ret; |
| 789 | } |
| 790 | |
| 791 | /* |
| 792 | * inotify_user_setup - Our initialization function. Note that we cannot return |
| 793 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here |
| 794 | * must result in panic(). |
| 795 | */ |
| 796 | static int __init inotify_user_setup(void) |
| 797 | { |
| 798 | BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); |
| 799 | BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); |
| 800 | BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); |
| 801 | BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); |
| 802 | BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); |
| 803 | BUILD_BUG_ON(IN_OPEN != FS_OPEN); |
| 804 | BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); |
| 805 | BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); |
| 806 | BUILD_BUG_ON(IN_CREATE != FS_CREATE); |
| 807 | BUILD_BUG_ON(IN_DELETE != FS_DELETE); |
| 808 | BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); |
| 809 | BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); |
| 810 | BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); |
| 811 | BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); |
| 812 | BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); |
| 813 | BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK); |
| 814 | BUILD_BUG_ON(IN_ISDIR != FS_ISDIR); |
| 815 | BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT); |
| 816 | |
| 817 | BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21); |
| 818 | |
| 819 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC); |
| 820 | |
| 821 | inotify_max_queued_events = 16384; |
| 822 | inotify_max_user_instances = 128; |
| 823 | inotify_max_user_watches = 8192; |
| 824 | |
| 825 | return 0; |
| 826 | } |
| 827 | fs_initcall(inotify_user_setup); |