Commit | Line | Data |
---|---|---|
33d3dfff | 1 | #include <linux/fanotify.h> |
11637e4b | 2 | #include <linux/fcntl.h> |
2a3edf86 | 3 | #include <linux/file.h> |
11637e4b | 4 | #include <linux/fs.h> |
52c923dd | 5 | #include <linux/anon_inodes.h> |
11637e4b | 6 | #include <linux/fsnotify_backend.h> |
2a3edf86 | 7 | #include <linux/init.h> |
a1014f10 | 8 | #include <linux/mount.h> |
2a3edf86 | 9 | #include <linux/namei.h> |
a1014f10 | 10 | #include <linux/poll.h> |
11637e4b EP |
11 | #include <linux/security.h> |
12 | #include <linux/syscalls.h> | |
e4e047a2 | 13 | #include <linux/slab.h> |
2a3edf86 | 14 | #include <linux/types.h> |
a1014f10 EP |
15 | #include <linux/uaccess.h> |
16 | ||
17 | #include <asm/ioctls.h> | |
11637e4b | 18 | |
33d3dfff | 19 | extern const struct fsnotify_ops fanotify_fsnotify_ops; |
11637e4b | 20 | |
2a3edf86 | 21 | static struct kmem_cache *fanotify_mark_cache __read_mostly; |
b2d87909 EP |
22 | static struct kmem_cache *fanotify_response_event_cache __read_mostly; |
23 | ||
24 | struct fanotify_response_event { | |
25 | struct list_head list; | |
26 | __s32 fd; | |
27 | struct fsnotify_event *event; | |
28 | }; | |
2a3edf86 | 29 | |
a1014f10 EP |
30 | /* |
31 | * Get an fsnotify notification event if one exists and is small | |
32 | * enough to fit in "count". Return an error pointer if the count | |
33 | * is not large enough. | |
34 | * | |
35 | * Called with the group->notification_mutex held. | |
36 | */ | |
37 | static struct fsnotify_event *get_one_event(struct fsnotify_group *group, | |
38 | size_t count) | |
39 | { | |
40 | BUG_ON(!mutex_is_locked(&group->notification_mutex)); | |
41 | ||
42 | pr_debug("%s: group=%p count=%zd\n", __func__, group, count); | |
43 | ||
44 | if (fsnotify_notify_queue_is_empty(group)) | |
45 | return NULL; | |
46 | ||
47 | if (FAN_EVENT_METADATA_LEN > count) | |
48 | return ERR_PTR(-EINVAL); | |
49 | ||
50 | /* held the notification_mutex the whole time, so this is the | |
51 | * same event we peeked above */ | |
52 | return fsnotify_remove_notify_event(group); | |
53 | } | |
54 | ||
22aa425d | 55 | static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) |
a1014f10 EP |
56 | { |
57 | int client_fd; | |
58 | struct dentry *dentry; | |
59 | struct vfsmount *mnt; | |
60 | struct file *new_file; | |
61 | ||
22aa425d | 62 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
a1014f10 EP |
63 | |
64 | client_fd = get_unused_fd(); | |
65 | if (client_fd < 0) | |
66 | return client_fd; | |
67 | ||
68 | if (event->data_type != FSNOTIFY_EVENT_PATH) { | |
69 | WARN_ON(1); | |
70 | put_unused_fd(client_fd); | |
71 | return -EINVAL; | |
72 | } | |
73 | ||
74 | /* | |
75 | * we need a new file handle for the userspace program so it can read even if it was | |
76 | * originally opened O_WRONLY. | |
77 | */ | |
78 | dentry = dget(event->path.dentry); | |
79 | mnt = mntget(event->path.mnt); | |
80 | /* it's possible this event was an overflow event. in that case dentry and mnt | |
81 | * are NULL; That's fine, just don't call dentry open */ | |
82 | if (dentry && mnt) | |
83 | new_file = dentry_open(dentry, mnt, | |
84 | O_RDONLY | O_LARGEFILE | FMODE_NONOTIFY, | |
85 | current_cred()); | |
86 | else | |
87 | new_file = ERR_PTR(-EOVERFLOW); | |
88 | if (IS_ERR(new_file)) { | |
89 | /* | |
90 | * we still send an event even if we can't open the file. this | |
91 | * can happen when say tasks are gone and we try to open their | |
92 | * /proc files or we try to open a WRONLY file like in sysfs | |
93 | * we just send the errno to userspace since there isn't much | |
94 | * else we can do. | |
95 | */ | |
96 | put_unused_fd(client_fd); | |
97 | client_fd = PTR_ERR(new_file); | |
98 | } else { | |
99 | fd_install(client_fd, new_file); | |
100 | } | |
101 | ||
22aa425d | 102 | return client_fd; |
a1014f10 EP |
103 | } |
104 | ||
105 | static ssize_t fill_event_metadata(struct fsnotify_group *group, | |
106 | struct fanotify_event_metadata *metadata, | |
107 | struct fsnotify_event *event) | |
108 | { | |
109 | pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, | |
110 | group, metadata, event); | |
111 | ||
112 | metadata->event_len = FAN_EVENT_METADATA_LEN; | |
113 | metadata->vers = FANOTIFY_METADATA_VERSION; | |
33d3dfff | 114 | metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; |
32c32632 | 115 | metadata->pid = pid_vnr(event->tgid); |
22aa425d | 116 | metadata->fd = create_fd(group, event); |
a1014f10 | 117 | |
22aa425d | 118 | return metadata->fd; |
a1014f10 EP |
119 | } |
120 | ||
b2d87909 EP |
121 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
122 | static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, | |
123 | __s32 fd) | |
124 | { | |
125 | struct fanotify_response_event *re, *return_re = NULL; | |
126 | ||
127 | mutex_lock(&group->fanotify_data.access_mutex); | |
128 | list_for_each_entry(re, &group->fanotify_data.access_list, list) { | |
129 | if (re->fd != fd) | |
130 | continue; | |
131 | ||
132 | list_del_init(&re->list); | |
133 | return_re = re; | |
134 | break; | |
135 | } | |
136 | mutex_unlock(&group->fanotify_data.access_mutex); | |
137 | ||
138 | pr_debug("%s: found return_re=%p\n", __func__, return_re); | |
139 | ||
140 | return return_re; | |
141 | } | |
142 | ||
143 | static int process_access_response(struct fsnotify_group *group, | |
144 | struct fanotify_response *response_struct) | |
145 | { | |
146 | struct fanotify_response_event *re; | |
147 | __s32 fd = response_struct->fd; | |
148 | __u32 response = response_struct->response; | |
149 | ||
150 | pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, | |
151 | fd, response); | |
152 | /* | |
153 | * make sure the response is valid, if invalid we do nothing and either | |
154 | * userspace can send a valid responce or we will clean it up after the | |
155 | * timeout | |
156 | */ | |
157 | switch (response) { | |
158 | case FAN_ALLOW: | |
159 | case FAN_DENY: | |
160 | break; | |
161 | default: | |
162 | return -EINVAL; | |
163 | } | |
164 | ||
165 | if (fd < 0) | |
166 | return -EINVAL; | |
167 | ||
168 | re = dequeue_re(group, fd); | |
169 | if (!re) | |
170 | return -ENOENT; | |
171 | ||
172 | re->event->response = response; | |
173 | ||
174 | wake_up(&group->fanotify_data.access_waitq); | |
175 | ||
176 | kmem_cache_free(fanotify_response_event_cache, re); | |
177 | ||
178 | return 0; | |
179 | } | |
180 | ||
181 | static int prepare_for_access_response(struct fsnotify_group *group, | |
182 | struct fsnotify_event *event, | |
183 | __s32 fd) | |
184 | { | |
185 | struct fanotify_response_event *re; | |
186 | ||
187 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | |
188 | return 0; | |
189 | ||
190 | re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); | |
191 | if (!re) | |
192 | return -ENOMEM; | |
193 | ||
194 | re->event = event; | |
195 | re->fd = fd; | |
196 | ||
197 | mutex_lock(&group->fanotify_data.access_mutex); | |
198 | list_add_tail(&re->list, &group->fanotify_data.access_list); | |
199 | mutex_unlock(&group->fanotify_data.access_mutex); | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static void remove_access_response(struct fsnotify_group *group, | |
205 | struct fsnotify_event *event, | |
206 | __s32 fd) | |
207 | { | |
208 | struct fanotify_response_event *re; | |
209 | ||
210 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | |
211 | return; | |
212 | ||
213 | re = dequeue_re(group, fd); | |
214 | if (!re) | |
215 | return; | |
216 | ||
217 | BUG_ON(re->event != event); | |
218 | ||
219 | kmem_cache_free(fanotify_response_event_cache, re); | |
220 | ||
221 | return; | |
222 | } | |
223 | #else | |
224 | static int prepare_for_access_response(struct fsnotify_group *group, | |
225 | struct fsnotify_event *event, | |
226 | __s32 fd) | |
227 | { | |
228 | return 0; | |
229 | } | |
230 | ||
231 | static void remove_access_response(struct fsnotify_group *group, | |
232 | struct fsnotify_event *event, | |
233 | __s32 fd) | |
234 | { | |
8860f060 | 235 | return; |
b2d87909 EP |
236 | } |
237 | #endif | |
238 | ||
a1014f10 EP |
239 | static ssize_t copy_event_to_user(struct fsnotify_group *group, |
240 | struct fsnotify_event *event, | |
241 | char __user *buf) | |
242 | { | |
243 | struct fanotify_event_metadata fanotify_event_metadata; | |
b2d87909 | 244 | int fd, ret; |
a1014f10 EP |
245 | |
246 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); | |
247 | ||
b2d87909 EP |
248 | fd = fill_event_metadata(group, &fanotify_event_metadata, event); |
249 | if (fd < 0) | |
250 | return fd; | |
251 | ||
252 | ret = prepare_for_access_response(group, event, fd); | |
253 | if (ret) | |
254 | goto out_close_fd; | |
a1014f10 | 255 | |
b2d87909 | 256 | ret = -EFAULT; |
a1014f10 | 257 | if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) |
b2d87909 | 258 | goto out_kill_access_response; |
a1014f10 EP |
259 | |
260 | return FAN_EVENT_METADATA_LEN; | |
b2d87909 EP |
261 | |
262 | out_kill_access_response: | |
263 | remove_access_response(group, event, fd); | |
264 | out_close_fd: | |
265 | sys_close(fd); | |
266 | return ret; | |
a1014f10 EP |
267 | } |
268 | ||
269 | /* intofiy userspace file descriptor functions */ | |
270 | static unsigned int fanotify_poll(struct file *file, poll_table *wait) | |
271 | { | |
272 | struct fsnotify_group *group = file->private_data; | |
273 | int ret = 0; | |
274 | ||
275 | poll_wait(file, &group->notification_waitq, wait); | |
276 | mutex_lock(&group->notification_mutex); | |
277 | if (!fsnotify_notify_queue_is_empty(group)) | |
278 | ret = POLLIN | POLLRDNORM; | |
279 | mutex_unlock(&group->notification_mutex); | |
280 | ||
281 | return ret; | |
282 | } | |
283 | ||
284 | static ssize_t fanotify_read(struct file *file, char __user *buf, | |
285 | size_t count, loff_t *pos) | |
286 | { | |
287 | struct fsnotify_group *group; | |
288 | struct fsnotify_event *kevent; | |
289 | char __user *start; | |
290 | int ret; | |
291 | DEFINE_WAIT(wait); | |
292 | ||
293 | start = buf; | |
294 | group = file->private_data; | |
295 | ||
296 | pr_debug("%s: group=%p\n", __func__, group); | |
297 | ||
298 | while (1) { | |
299 | prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); | |
300 | ||
301 | mutex_lock(&group->notification_mutex); | |
302 | kevent = get_one_event(group, count); | |
303 | mutex_unlock(&group->notification_mutex); | |
304 | ||
305 | if (kevent) { | |
306 | ret = PTR_ERR(kevent); | |
307 | if (IS_ERR(kevent)) | |
308 | break; | |
309 | ret = copy_event_to_user(group, kevent, buf); | |
310 | fsnotify_put_event(kevent); | |
311 | if (ret < 0) | |
312 | break; | |
313 | buf += ret; | |
314 | count -= ret; | |
315 | continue; | |
316 | } | |
317 | ||
318 | ret = -EAGAIN; | |
319 | if (file->f_flags & O_NONBLOCK) | |
320 | break; | |
321 | ret = -EINTR; | |
322 | if (signal_pending(current)) | |
323 | break; | |
324 | ||
325 | if (start != buf) | |
326 | break; | |
327 | ||
328 | schedule(); | |
329 | } | |
330 | ||
331 | finish_wait(&group->notification_waitq, &wait); | |
332 | if (start != buf && ret != -EFAULT) | |
333 | ret = buf - start; | |
334 | return ret; | |
335 | } | |
336 | ||
b2d87909 EP |
337 | static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) |
338 | { | |
339 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | |
340 | struct fanotify_response response = { .fd = -1, .response = -1 }; | |
341 | struct fsnotify_group *group; | |
342 | int ret; | |
343 | ||
344 | group = file->private_data; | |
345 | ||
346 | if (count > sizeof(response)) | |
347 | count = sizeof(response); | |
348 | ||
349 | pr_debug("%s: group=%p count=%zu\n", __func__, group, count); | |
350 | ||
351 | if (copy_from_user(&response, buf, count)) | |
352 | return -EFAULT; | |
353 | ||
354 | ret = process_access_response(group, &response); | |
355 | if (ret < 0) | |
356 | count = ret; | |
357 | ||
358 | return count; | |
359 | #else | |
360 | return -EINVAL; | |
361 | #endif | |
362 | } | |
363 | ||
52c923dd EP |
364 | static int fanotify_release(struct inode *ignored, struct file *file) |
365 | { | |
366 | struct fsnotify_group *group = file->private_data; | |
367 | ||
368 | pr_debug("%s: file=%p group=%p\n", __func__, file, group); | |
369 | ||
370 | /* matches the fanotify_init->fsnotify_alloc_group */ | |
371 | fsnotify_put_group(group); | |
372 | ||
373 | return 0; | |
374 | } | |
375 | ||
a1014f10 EP |
376 | static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
377 | { | |
378 | struct fsnotify_group *group; | |
379 | struct fsnotify_event_holder *holder; | |
380 | void __user *p; | |
381 | int ret = -ENOTTY; | |
382 | size_t send_len = 0; | |
383 | ||
384 | group = file->private_data; | |
385 | ||
386 | p = (void __user *) arg; | |
387 | ||
388 | switch (cmd) { | |
389 | case FIONREAD: | |
390 | mutex_lock(&group->notification_mutex); | |
391 | list_for_each_entry(holder, &group->notification_list, event_list) | |
392 | send_len += FAN_EVENT_METADATA_LEN; | |
393 | mutex_unlock(&group->notification_mutex); | |
394 | ret = put_user(send_len, (int __user *) p); | |
395 | break; | |
396 | } | |
397 | ||
398 | return ret; | |
399 | } | |
400 | ||
52c923dd | 401 | static const struct file_operations fanotify_fops = { |
a1014f10 EP |
402 | .poll = fanotify_poll, |
403 | .read = fanotify_read, | |
b2d87909 | 404 | .write = fanotify_write, |
52c923dd EP |
405 | .fasync = NULL, |
406 | .release = fanotify_release, | |
a1014f10 EP |
407 | .unlocked_ioctl = fanotify_ioctl, |
408 | .compat_ioctl = fanotify_ioctl, | |
52c923dd EP |
409 | }; |
410 | ||
2a3edf86 EP |
411 | static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) |
412 | { | |
413 | kmem_cache_free(fanotify_mark_cache, fsn_mark); | |
414 | } | |
415 | ||
416 | static int fanotify_find_path(int dfd, const char __user *filename, | |
417 | struct path *path, unsigned int flags) | |
418 | { | |
419 | int ret; | |
420 | ||
421 | pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, | |
422 | dfd, filename, flags); | |
423 | ||
424 | if (filename == NULL) { | |
425 | struct file *file; | |
426 | int fput_needed; | |
427 | ||
428 | ret = -EBADF; | |
429 | file = fget_light(dfd, &fput_needed); | |
430 | if (!file) | |
431 | goto out; | |
432 | ||
433 | ret = -ENOTDIR; | |
434 | if ((flags & FAN_MARK_ONLYDIR) && | |
435 | !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) { | |
436 | fput_light(file, fput_needed); | |
437 | goto out; | |
438 | } | |
439 | ||
440 | *path = file->f_path; | |
441 | path_get(path); | |
442 | fput_light(file, fput_needed); | |
443 | } else { | |
444 | unsigned int lookup_flags = 0; | |
445 | ||
446 | if (!(flags & FAN_MARK_DONT_FOLLOW)) | |
447 | lookup_flags |= LOOKUP_FOLLOW; | |
448 | if (flags & FAN_MARK_ONLYDIR) | |
449 | lookup_flags |= LOOKUP_DIRECTORY; | |
450 | ||
451 | ret = user_path_at(dfd, filename, lookup_flags, path); | |
452 | if (ret) | |
453 | goto out; | |
454 | } | |
455 | ||
456 | /* you can only watch an inode if you have read permissions on it */ | |
457 | ret = inode_permission(path->dentry->d_inode, MAY_READ); | |
458 | if (ret) | |
459 | path_put(path); | |
460 | out: | |
461 | return ret; | |
462 | } | |
463 | ||
b9e4e3bd EP |
464 | static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, |
465 | __u32 mask, | |
466 | unsigned int flags) | |
088b09b0 AG |
467 | { |
468 | __u32 oldmask; | |
469 | ||
470 | spin_lock(&fsn_mark->lock); | |
b9e4e3bd EP |
471 | if (!(flags & FAN_MARK_IGNORED_MASK)) { |
472 | oldmask = fsn_mark->mask; | |
473 | fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); | |
474 | } else { | |
475 | oldmask = fsn_mark->ignored_mask; | |
476 | fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); | |
477 | } | |
088b09b0 AG |
478 | spin_unlock(&fsn_mark->lock); |
479 | ||
480 | if (!(oldmask & ~mask)) | |
481 | fsnotify_destroy_mark(fsn_mark); | |
482 | ||
483 | return mask & oldmask; | |
484 | } | |
485 | ||
f3640192 | 486 | static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
487 | struct vfsmount *mnt, __u32 mask, |
488 | unsigned int flags) | |
88826276 EP |
489 | { |
490 | struct fsnotify_mark *fsn_mark = NULL; | |
088b09b0 | 491 | __u32 removed; |
88826276 | 492 | |
f3640192 AG |
493 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
494 | if (!fsn_mark) | |
495 | return -ENOENT; | |
88826276 | 496 | |
b9e4e3bd | 497 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); |
f3640192 AG |
498 | fsnotify_put_mark(fsn_mark); |
499 | if (removed & group->mask) | |
500 | fsnotify_recalc_group_mask(group); | |
501 | if (removed & mnt->mnt_fsnotify_mask) | |
502 | fsnotify_recalc_vfsmount_mask(mnt); | |
503 | ||
504 | return 0; | |
505 | } | |
2a3edf86 | 506 | |
f3640192 | 507 | static int fanotify_remove_inode_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
508 | struct inode *inode, __u32 mask, |
509 | unsigned int flags) | |
f3640192 AG |
510 | { |
511 | struct fsnotify_mark *fsn_mark = NULL; | |
512 | __u32 removed; | |
513 | ||
514 | fsn_mark = fsnotify_find_inode_mark(group, inode); | |
88826276 EP |
515 | if (!fsn_mark) |
516 | return -ENOENT; | |
517 | ||
b9e4e3bd | 518 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); |
5444e298 | 519 | /* matches the fsnotify_find_inode_mark() */ |
2a3edf86 EP |
520 | fsnotify_put_mark(fsn_mark); |
521 | ||
088b09b0 AG |
522 | if (removed & group->mask) |
523 | fsnotify_recalc_group_mask(group); | |
f3640192 AG |
524 | if (removed & inode->i_fsnotify_mask) |
525 | fsnotify_recalc_inode_mask(inode); | |
088b09b0 | 526 | |
2a3edf86 EP |
527 | return 0; |
528 | } | |
529 | ||
b9e4e3bd EP |
530 | static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, |
531 | __u32 mask, | |
532 | unsigned int flags) | |
912ee394 AG |
533 | { |
534 | __u32 oldmask; | |
535 | ||
536 | spin_lock(&fsn_mark->lock); | |
b9e4e3bd EP |
537 | if (!(flags & FAN_MARK_IGNORED_MASK)) { |
538 | oldmask = fsn_mark->mask; | |
539 | fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); | |
540 | } else { | |
541 | oldmask = fsn_mark->ignored_mask; | |
542 | fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask)); | |
c9778a98 EP |
543 | if (flags & FAN_MARK_IGNORED_SURV_MODIFY) |
544 | fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; | |
b9e4e3bd | 545 | } |
912ee394 AG |
546 | spin_unlock(&fsn_mark->lock); |
547 | ||
548 | return mask & ~oldmask; | |
549 | } | |
550 | ||
52202dfb | 551 | static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
552 | struct vfsmount *mnt, __u32 mask, |
553 | unsigned int flags) | |
2a3edf86 EP |
554 | { |
555 | struct fsnotify_mark *fsn_mark; | |
912ee394 | 556 | __u32 added; |
2a3edf86 | 557 | |
88826276 EP |
558 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
559 | if (!fsn_mark) { | |
88826276 EP |
560 | int ret; |
561 | ||
912ee394 AG |
562 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); |
563 | if (!fsn_mark) | |
52202dfb | 564 | return -ENOMEM; |
88826276 | 565 | |
912ee394 AG |
566 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); |
567 | ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); | |
88826276 | 568 | if (ret) { |
912ee394 | 569 | fanotify_free_mark(fsn_mark); |
52202dfb | 570 | return ret; |
88826276 | 571 | } |
88826276 | 572 | } |
b9e4e3bd | 573 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
52202dfb | 574 | fsnotify_put_mark(fsn_mark); |
912ee394 AG |
575 | if (added) { |
576 | if (added & ~group->mask) | |
577 | fsnotify_recalc_group_mask(group); | |
578 | if (added & ~mnt->mnt_fsnotify_mask) | |
579 | fsnotify_recalc_vfsmount_mask(mnt); | |
580 | } | |
52202dfb | 581 | return 0; |
88826276 EP |
582 | } |
583 | ||
52202dfb | 584 | static int fanotify_add_inode_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
585 | struct inode *inode, __u32 mask, |
586 | unsigned int flags) | |
88826276 EP |
587 | { |
588 | struct fsnotify_mark *fsn_mark; | |
912ee394 | 589 | __u32 added; |
88826276 EP |
590 | |
591 | pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); | |
2a3edf86 | 592 | |
5444e298 | 593 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
2a3edf86 | 594 | if (!fsn_mark) { |
88826276 | 595 | int ret; |
2a3edf86 | 596 | |
912ee394 AG |
597 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); |
598 | if (!fsn_mark) | |
52202dfb | 599 | return -ENOMEM; |
2a3edf86 | 600 | |
912ee394 AG |
601 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); |
602 | ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); | |
2a3edf86 | 603 | if (ret) { |
912ee394 | 604 | fanotify_free_mark(fsn_mark); |
52202dfb | 605 | return ret; |
2a3edf86 | 606 | } |
2a3edf86 | 607 | } |
b9e4e3bd | 608 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
52202dfb | 609 | fsnotify_put_mark(fsn_mark); |
912ee394 AG |
610 | if (added) { |
611 | if (added & ~group->mask) | |
612 | fsnotify_recalc_group_mask(group); | |
613 | if (added & ~inode->i_fsnotify_mask) | |
614 | fsnotify_recalc_inode_mask(inode); | |
615 | } | |
52202dfb | 616 | return 0; |
88826276 | 617 | } |
2a3edf86 | 618 | |
52c923dd | 619 | /* fanotify syscalls */ |
08ae8938 | 620 | SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) |
11637e4b | 621 | { |
52c923dd EP |
622 | struct fsnotify_group *group; |
623 | int f_flags, fd; | |
624 | ||
08ae8938 EP |
625 | pr_debug("%s: flags=%d event_f_flags=%d\n", |
626 | __func__, flags, event_f_flags); | |
52c923dd EP |
627 | |
628 | if (event_f_flags) | |
629 | return -EINVAL; | |
52c923dd EP |
630 | |
631 | if (!capable(CAP_SYS_ADMIN)) | |
632 | return -EACCES; | |
633 | ||
634 | if (flags & ~FAN_ALL_INIT_FLAGS) | |
635 | return -EINVAL; | |
636 | ||
b2d87909 | 637 | f_flags = O_RDWR | FMODE_NONOTIFY; |
52c923dd EP |
638 | if (flags & FAN_CLOEXEC) |
639 | f_flags |= O_CLOEXEC; | |
640 | if (flags & FAN_NONBLOCK) | |
641 | f_flags |= O_NONBLOCK; | |
642 | ||
643 | /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ | |
644 | group = fsnotify_alloc_group(&fanotify_fsnotify_ops); | |
645 | if (IS_ERR(group)) | |
646 | return PTR_ERR(group); | |
647 | ||
9e66e423 EP |
648 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
649 | mutex_init(&group->fanotify_data.access_mutex); | |
650 | init_waitqueue_head(&group->fanotify_data.access_waitq); | |
651 | INIT_LIST_HEAD(&group->fanotify_data.access_list); | |
652 | #endif | |
cb2d429f | 653 | |
52c923dd EP |
654 | fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); |
655 | if (fd < 0) | |
656 | goto out_put_group; | |
657 | ||
658 | return fd; | |
659 | ||
660 | out_put_group: | |
661 | fsnotify_put_group(group); | |
662 | return fd; | |
11637e4b | 663 | } |
bbaa4168 | 664 | |
9bbfc964 HC |
665 | SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, |
666 | __u64 mask, int dfd, | |
667 | const char __user * pathname) | |
bbaa4168 | 668 | { |
0ff21db9 EP |
669 | struct inode *inode = NULL; |
670 | struct vfsmount *mnt = NULL; | |
2a3edf86 EP |
671 | struct fsnotify_group *group; |
672 | struct file *filp; | |
673 | struct path path; | |
674 | int ret, fput_needed; | |
675 | ||
676 | pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", | |
677 | __func__, fanotify_fd, flags, dfd, pathname, mask); | |
678 | ||
679 | /* we only use the lower 32 bits as of right now. */ | |
680 | if (mask & ((__u64)0xffffffff << 32)) | |
681 | return -EINVAL; | |
682 | ||
88380fe6 AG |
683 | if (flags & ~FAN_ALL_MARK_FLAGS) |
684 | return -EINVAL; | |
4d92604c | 685 | switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { |
88380fe6 AG |
686 | case FAN_MARK_ADD: |
687 | case FAN_MARK_REMOVE: | |
4d92604c | 688 | case FAN_MARK_FLUSH: |
88380fe6 AG |
689 | break; |
690 | default: | |
691 | return -EINVAL; | |
692 | } | |
b2d87909 EP |
693 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
694 | if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) | |
695 | #else | |
88380fe6 | 696 | if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) |
b2d87909 | 697 | #endif |
2a3edf86 EP |
698 | return -EINVAL; |
699 | ||
700 | filp = fget_light(fanotify_fd, &fput_needed); | |
701 | if (unlikely(!filp)) | |
702 | return -EBADF; | |
703 | ||
704 | /* verify that this is indeed an fanotify instance */ | |
705 | ret = -EINVAL; | |
706 | if (unlikely(filp->f_op != &fanotify_fops)) | |
707 | goto fput_and_out; | |
708 | ||
709 | ret = fanotify_find_path(dfd, pathname, &path, flags); | |
710 | if (ret) | |
711 | goto fput_and_out; | |
712 | ||
713 | /* inode held in place by reference to path; group by fget on fd */ | |
eac8e9e8 | 714 | if (!(flags & FAN_MARK_MOUNT)) |
0ff21db9 EP |
715 | inode = path.dentry->d_inode; |
716 | else | |
717 | mnt = path.mnt; | |
2a3edf86 EP |
718 | group = filp->private_data; |
719 | ||
720 | /* create/update an inode mark */ | |
4d92604c | 721 | switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { |
c6223f46 | 722 | case FAN_MARK_ADD: |
eac8e9e8 | 723 | if (flags & FAN_MARK_MOUNT) |
b9e4e3bd | 724 | ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); |
0ff21db9 | 725 | else |
b9e4e3bd | 726 | ret = fanotify_add_inode_mark(group, inode, mask, flags); |
c6223f46 AG |
727 | break; |
728 | case FAN_MARK_REMOVE: | |
f3640192 | 729 | if (flags & FAN_MARK_MOUNT) |
b9e4e3bd | 730 | ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); |
f3640192 | 731 | else |
b9e4e3bd | 732 | ret = fanotify_remove_inode_mark(group, inode, mask, flags); |
c6223f46 | 733 | break; |
4d92604c EP |
734 | case FAN_MARK_FLUSH: |
735 | if (flags & FAN_MARK_MOUNT) | |
736 | fsnotify_clear_vfsmount_marks_by_group(group); | |
737 | else | |
738 | fsnotify_clear_inode_marks_by_group(group); | |
739 | fsnotify_recalc_group_mask(group); | |
740 | break; | |
c6223f46 AG |
741 | default: |
742 | ret = -EINVAL; | |
743 | } | |
2a3edf86 EP |
744 | |
745 | path_put(&path); | |
746 | fput_and_out: | |
747 | fput_light(filp, fput_needed); | |
748 | return ret; | |
749 | } | |
750 | ||
9bbfc964 HC |
751 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
752 | asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, | |
753 | long dfd, long pathname) | |
754 | { | |
755 | return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, | |
756 | mask, (int) dfd, | |
757 | (const char __user *) pathname); | |
758 | } | |
759 | SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); | |
760 | #endif | |
761 | ||
2a3edf86 EP |
762 | /* |
763 | * fanotify_user_setup - Our initialization function. Note that we cannnot return | |
764 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | |
765 | * must result in panic(). | |
766 | */ | |
767 | static int __init fanotify_user_setup(void) | |
768 | { | |
769 | fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); | |
b2d87909 EP |
770 | fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, |
771 | SLAB_PANIC); | |
2a3edf86 EP |
772 | |
773 | return 0; | |
bbaa4168 | 774 | } |
2a3edf86 | 775 | device_initcall(fanotify_user_setup); |