Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
[deliverable/linux.git] / fs / fuse / inode.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/parser.h>
18 #include <linux/statfs.h>
19 #include <linux/random.h>
20
21 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
22 MODULE_DESCRIPTION("Filesystem in Userspace");
23 MODULE_LICENSE("GPL");
24
25 static struct kmem_cache *fuse_inode_cachep;
26 struct list_head fuse_conn_list;
27 DEFINE_MUTEX(fuse_mutex);
28
29 #define FUSE_SUPER_MAGIC 0x65735546
30
31 struct fuse_mount_data {
32 int fd;
33 unsigned rootmode;
34 unsigned user_id;
35 unsigned group_id;
36 unsigned fd_present : 1;
37 unsigned rootmode_present : 1;
38 unsigned user_id_present : 1;
39 unsigned group_id_present : 1;
40 unsigned flags;
41 unsigned max_read;
42 unsigned blksize;
43 };
44
45 static struct inode *fuse_alloc_inode(struct super_block *sb)
46 {
47 struct inode *inode;
48 struct fuse_inode *fi;
49
50 inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
51 if (!inode)
52 return NULL;
53
54 fi = get_fuse_inode(inode);
55 fi->i_time = 0;
56 fi->nodeid = 0;
57 fi->nlookup = 0;
58 fi->forget_req = fuse_request_alloc();
59 if (!fi->forget_req) {
60 kmem_cache_free(fuse_inode_cachep, inode);
61 return NULL;
62 }
63
64 return inode;
65 }
66
67 static void fuse_destroy_inode(struct inode *inode)
68 {
69 struct fuse_inode *fi = get_fuse_inode(inode);
70 if (fi->forget_req)
71 fuse_request_free(fi->forget_req);
72 kmem_cache_free(fuse_inode_cachep, inode);
73 }
74
75 static void fuse_read_inode(struct inode *inode)
76 {
77 /* No op */
78 }
79
80 void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
81 unsigned long nodeid, u64 nlookup)
82 {
83 struct fuse_forget_in *inarg = &req->misc.forget_in;
84 inarg->nlookup = nlookup;
85 req->in.h.opcode = FUSE_FORGET;
86 req->in.h.nodeid = nodeid;
87 req->in.numargs = 1;
88 req->in.args[0].size = sizeof(struct fuse_forget_in);
89 req->in.args[0].value = inarg;
90 request_send_noreply(fc, req);
91 }
92
93 static void fuse_clear_inode(struct inode *inode)
94 {
95 if (inode->i_sb->s_flags & MS_ACTIVE) {
96 struct fuse_conn *fc = get_fuse_conn(inode);
97 struct fuse_inode *fi = get_fuse_inode(inode);
98 fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup);
99 fi->forget_req = NULL;
100 }
101 }
102
103 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
104 {
105 if (*flags & MS_MANDLOCK)
106 return -EINVAL;
107
108 return 0;
109 }
110
111 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
112 {
113 struct fuse_conn *fc = get_fuse_conn(inode);
114 if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size)
115 invalidate_mapping_pages(inode->i_mapping, 0, -1);
116
117 inode->i_ino = attr->ino;
118 inode->i_mode = (inode->i_mode & S_IFMT) + (attr->mode & 07777);
119 inode->i_nlink = attr->nlink;
120 inode->i_uid = attr->uid;
121 inode->i_gid = attr->gid;
122 spin_lock(&fc->lock);
123 i_size_write(inode, attr->size);
124 spin_unlock(&fc->lock);
125 inode->i_blocks = attr->blocks;
126 inode->i_atime.tv_sec = attr->atime;
127 inode->i_atime.tv_nsec = attr->atimensec;
128 inode->i_mtime.tv_sec = attr->mtime;
129 inode->i_mtime.tv_nsec = attr->mtimensec;
130 inode->i_ctime.tv_sec = attr->ctime;
131 inode->i_ctime.tv_nsec = attr->ctimensec;
132 }
133
134 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
135 {
136 inode->i_mode = attr->mode & S_IFMT;
137 inode->i_size = attr->size;
138 if (S_ISREG(inode->i_mode)) {
139 fuse_init_common(inode);
140 fuse_init_file_inode(inode);
141 } else if (S_ISDIR(inode->i_mode))
142 fuse_init_dir(inode);
143 else if (S_ISLNK(inode->i_mode))
144 fuse_init_symlink(inode);
145 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
146 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
147 fuse_init_common(inode);
148 init_special_inode(inode, inode->i_mode,
149 new_decode_dev(attr->rdev));
150 } else
151 BUG();
152 }
153
154 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
155 {
156 unsigned long nodeid = *(unsigned long *) _nodeidp;
157 if (get_node_id(inode) == nodeid)
158 return 1;
159 else
160 return 0;
161 }
162
163 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
164 {
165 unsigned long nodeid = *(unsigned long *) _nodeidp;
166 get_fuse_inode(inode)->nodeid = nodeid;
167 return 0;
168 }
169
170 struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
171 int generation, struct fuse_attr *attr)
172 {
173 struct inode *inode;
174 struct fuse_inode *fi;
175 struct fuse_conn *fc = get_fuse_conn_super(sb);
176
177 retry:
178 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
179 if (!inode)
180 return NULL;
181
182 if ((inode->i_state & I_NEW)) {
183 inode->i_flags |= S_NOATIME|S_NOCMTIME;
184 inode->i_generation = generation;
185 inode->i_data.backing_dev_info = &fc->bdi;
186 fuse_init_inode(inode, attr);
187 unlock_new_inode(inode);
188 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
189 /* Inode has changed type, any I/O on the old should fail */
190 make_bad_inode(inode);
191 iput(inode);
192 goto retry;
193 }
194
195 fi = get_fuse_inode(inode);
196 spin_lock(&fc->lock);
197 fi->nlookup ++;
198 spin_unlock(&fc->lock);
199 fuse_change_attributes(inode, attr);
200 return inode;
201 }
202
203 static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags)
204 {
205 if (flags & MNT_FORCE)
206 fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
207 }
208
209 static void fuse_send_destroy(struct fuse_conn *fc)
210 {
211 struct fuse_req *req = fc->destroy_req;
212 if (req && fc->conn_init) {
213 fc->destroy_req = NULL;
214 req->in.h.opcode = FUSE_DESTROY;
215 req->force = 1;
216 request_send(fc, req);
217 fuse_put_request(fc, req);
218 }
219 }
220
221 static void fuse_put_super(struct super_block *sb)
222 {
223 struct fuse_conn *fc = get_fuse_conn_super(sb);
224
225 fuse_send_destroy(fc);
226 spin_lock(&fc->lock);
227 fc->connected = 0;
228 fc->blocked = 0;
229 spin_unlock(&fc->lock);
230 /* Flush all readers on this fs */
231 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
232 wake_up_all(&fc->waitq);
233 wake_up_all(&fc->blocked_waitq);
234 mutex_lock(&fuse_mutex);
235 list_del(&fc->entry);
236 fuse_ctl_remove_conn(fc);
237 mutex_unlock(&fuse_mutex);
238 fuse_conn_put(fc);
239 }
240
241 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
242 {
243 stbuf->f_type = FUSE_SUPER_MAGIC;
244 stbuf->f_bsize = attr->bsize;
245 stbuf->f_frsize = attr->frsize;
246 stbuf->f_blocks = attr->blocks;
247 stbuf->f_bfree = attr->bfree;
248 stbuf->f_bavail = attr->bavail;
249 stbuf->f_files = attr->files;
250 stbuf->f_ffree = attr->ffree;
251 stbuf->f_namelen = attr->namelen;
252 /* fsid is left zero */
253 }
254
255 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
256 {
257 struct super_block *sb = dentry->d_sb;
258 struct fuse_conn *fc = get_fuse_conn_super(sb);
259 struct fuse_req *req;
260 struct fuse_statfs_out outarg;
261 int err;
262
263 req = fuse_get_req(fc);
264 if (IS_ERR(req))
265 return PTR_ERR(req);
266
267 memset(&outarg, 0, sizeof(outarg));
268 req->in.numargs = 0;
269 req->in.h.opcode = FUSE_STATFS;
270 req->in.h.nodeid = get_node_id(dentry->d_inode);
271 req->out.numargs = 1;
272 req->out.args[0].size =
273 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
274 req->out.args[0].value = &outarg;
275 request_send(fc, req);
276 err = req->out.h.error;
277 if (!err)
278 convert_fuse_statfs(buf, &outarg.st);
279 fuse_put_request(fc, req);
280 return err;
281 }
282
283 enum {
284 OPT_FD,
285 OPT_ROOTMODE,
286 OPT_USER_ID,
287 OPT_GROUP_ID,
288 OPT_DEFAULT_PERMISSIONS,
289 OPT_ALLOW_OTHER,
290 OPT_MAX_READ,
291 OPT_BLKSIZE,
292 OPT_ERR
293 };
294
295 static match_table_t tokens = {
296 {OPT_FD, "fd=%u"},
297 {OPT_ROOTMODE, "rootmode=%o"},
298 {OPT_USER_ID, "user_id=%u"},
299 {OPT_GROUP_ID, "group_id=%u"},
300 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
301 {OPT_ALLOW_OTHER, "allow_other"},
302 {OPT_MAX_READ, "max_read=%u"},
303 {OPT_BLKSIZE, "blksize=%u"},
304 {OPT_ERR, NULL}
305 };
306
307 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
308 {
309 char *p;
310 memset(d, 0, sizeof(struct fuse_mount_data));
311 d->max_read = ~0;
312 d->blksize = 512;
313
314 while ((p = strsep(&opt, ",")) != NULL) {
315 int token;
316 int value;
317 substring_t args[MAX_OPT_ARGS];
318 if (!*p)
319 continue;
320
321 token = match_token(p, tokens, args);
322 switch (token) {
323 case OPT_FD:
324 if (match_int(&args[0], &value))
325 return 0;
326 d->fd = value;
327 d->fd_present = 1;
328 break;
329
330 case OPT_ROOTMODE:
331 if (match_octal(&args[0], &value))
332 return 0;
333 if (!fuse_valid_type(value))
334 return 0;
335 d->rootmode = value;
336 d->rootmode_present = 1;
337 break;
338
339 case OPT_USER_ID:
340 if (match_int(&args[0], &value))
341 return 0;
342 d->user_id = value;
343 d->user_id_present = 1;
344 break;
345
346 case OPT_GROUP_ID:
347 if (match_int(&args[0], &value))
348 return 0;
349 d->group_id = value;
350 d->group_id_present = 1;
351 break;
352
353 case OPT_DEFAULT_PERMISSIONS:
354 d->flags |= FUSE_DEFAULT_PERMISSIONS;
355 break;
356
357 case OPT_ALLOW_OTHER:
358 d->flags |= FUSE_ALLOW_OTHER;
359 break;
360
361 case OPT_MAX_READ:
362 if (match_int(&args[0], &value))
363 return 0;
364 d->max_read = value;
365 break;
366
367 case OPT_BLKSIZE:
368 if (!is_bdev || match_int(&args[0], &value))
369 return 0;
370 d->blksize = value;
371 break;
372
373 default:
374 return 0;
375 }
376 }
377
378 if (!d->fd_present || !d->rootmode_present ||
379 !d->user_id_present || !d->group_id_present)
380 return 0;
381
382 return 1;
383 }
384
385 static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
386 {
387 struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb);
388
389 seq_printf(m, ",user_id=%u", fc->user_id);
390 seq_printf(m, ",group_id=%u", fc->group_id);
391 if (fc->flags & FUSE_DEFAULT_PERMISSIONS)
392 seq_puts(m, ",default_permissions");
393 if (fc->flags & FUSE_ALLOW_OTHER)
394 seq_puts(m, ",allow_other");
395 if (fc->max_read != ~0)
396 seq_printf(m, ",max_read=%u", fc->max_read);
397 return 0;
398 }
399
400 static struct fuse_conn *new_conn(void)
401 {
402 struct fuse_conn *fc;
403
404 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
405 if (fc) {
406 spin_lock_init(&fc->lock);
407 mutex_init(&fc->inst_mutex);
408 atomic_set(&fc->count, 1);
409 init_waitqueue_head(&fc->waitq);
410 init_waitqueue_head(&fc->blocked_waitq);
411 INIT_LIST_HEAD(&fc->pending);
412 INIT_LIST_HEAD(&fc->processing);
413 INIT_LIST_HEAD(&fc->io);
414 INIT_LIST_HEAD(&fc->interrupts);
415 atomic_set(&fc->num_waiting, 0);
416 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
417 fc->bdi.unplug_io_fn = default_unplug_io_fn;
418 fc->reqctr = 0;
419 fc->blocked = 1;
420 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
421 }
422 return fc;
423 }
424
425 void fuse_conn_put(struct fuse_conn *fc)
426 {
427 if (atomic_dec_and_test(&fc->count)) {
428 if (fc->destroy_req)
429 fuse_request_free(fc->destroy_req);
430 mutex_destroy(&fc->inst_mutex);
431 kfree(fc);
432 }
433 }
434
435 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
436 {
437 atomic_inc(&fc->count);
438 return fc;
439 }
440
441 static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
442 {
443 struct fuse_attr attr;
444 memset(&attr, 0, sizeof(attr));
445
446 attr.mode = mode;
447 attr.ino = FUSE_ROOT_ID;
448 return fuse_iget(sb, 1, 0, &attr);
449 }
450
451 static const struct super_operations fuse_super_operations = {
452 .alloc_inode = fuse_alloc_inode,
453 .destroy_inode = fuse_destroy_inode,
454 .read_inode = fuse_read_inode,
455 .clear_inode = fuse_clear_inode,
456 .remount_fs = fuse_remount_fs,
457 .put_super = fuse_put_super,
458 .umount_begin = fuse_umount_begin,
459 .statfs = fuse_statfs,
460 .show_options = fuse_show_options,
461 };
462
463 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
464 {
465 struct fuse_init_out *arg = &req->misc.init_out;
466
467 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
468 fc->conn_error = 1;
469 else {
470 unsigned long ra_pages;
471
472 if (arg->minor >= 6) {
473 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
474 if (arg->flags & FUSE_ASYNC_READ)
475 fc->async_read = 1;
476 if (!(arg->flags & FUSE_POSIX_LOCKS))
477 fc->no_lock = 1;
478 } else {
479 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
480 fc->no_lock = 1;
481 }
482
483 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
484 fc->minor = arg->minor;
485 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
486 fc->conn_init = 1;
487 }
488 fuse_put_request(fc, req);
489 fc->blocked = 0;
490 wake_up_all(&fc->blocked_waitq);
491 }
492
493 static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
494 {
495 struct fuse_init_in *arg = &req->misc.init_in;
496
497 arg->major = FUSE_KERNEL_VERSION;
498 arg->minor = FUSE_KERNEL_MINOR_VERSION;
499 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
500 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS;
501 req->in.h.opcode = FUSE_INIT;
502 req->in.numargs = 1;
503 req->in.args[0].size = sizeof(*arg);
504 req->in.args[0].value = arg;
505 req->out.numargs = 1;
506 /* Variable length arguement used for backward compatibility
507 with interface version < 7.5. Rest of init_out is zeroed
508 by do_get_request(), so a short reply is not a problem */
509 req->out.argvar = 1;
510 req->out.args[0].size = sizeof(struct fuse_init_out);
511 req->out.args[0].value = &req->misc.init_out;
512 req->end = process_init_reply;
513 request_send_background(fc, req);
514 }
515
516 static u64 conn_id(void)
517 {
518 static u64 ctr = 1;
519 return ctr++;
520 }
521
522 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
523 {
524 struct fuse_conn *fc;
525 struct inode *root;
526 struct fuse_mount_data d;
527 struct file *file;
528 struct dentry *root_dentry;
529 struct fuse_req *init_req;
530 int err;
531 int is_bdev = sb->s_bdev != NULL;
532
533 if (sb->s_flags & MS_MANDLOCK)
534 return -EINVAL;
535
536 if (!parse_fuse_opt((char *) data, &d, is_bdev))
537 return -EINVAL;
538
539 if (is_bdev) {
540 #ifdef CONFIG_BLOCK
541 if (!sb_set_blocksize(sb, d.blksize))
542 return -EINVAL;
543 #endif
544 } else {
545 sb->s_blocksize = PAGE_CACHE_SIZE;
546 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
547 }
548 sb->s_magic = FUSE_SUPER_MAGIC;
549 sb->s_op = &fuse_super_operations;
550 sb->s_maxbytes = MAX_LFS_FILESIZE;
551
552 file = fget(d.fd);
553 if (!file)
554 return -EINVAL;
555
556 if (file->f_op != &fuse_dev_operations)
557 return -EINVAL;
558
559 fc = new_conn();
560 if (!fc)
561 return -ENOMEM;
562
563 fc->flags = d.flags;
564 fc->user_id = d.user_id;
565 fc->group_id = d.group_id;
566 fc->max_read = d.max_read;
567
568 /* Used by get_root_inode() */
569 sb->s_fs_info = fc;
570
571 err = -ENOMEM;
572 root = get_root_inode(sb, d.rootmode);
573 if (!root)
574 goto err;
575
576 root_dentry = d_alloc_root(root);
577 if (!root_dentry) {
578 iput(root);
579 goto err;
580 }
581
582 init_req = fuse_request_alloc();
583 if (!init_req)
584 goto err_put_root;
585
586 if (is_bdev) {
587 fc->destroy_req = fuse_request_alloc();
588 if (!fc->destroy_req)
589 goto err_put_root;
590 }
591
592 mutex_lock(&fuse_mutex);
593 err = -EINVAL;
594 if (file->private_data)
595 goto err_unlock;
596
597 fc->id = conn_id();
598 err = fuse_ctl_add_conn(fc);
599 if (err)
600 goto err_unlock;
601
602 list_add_tail(&fc->entry, &fuse_conn_list);
603 sb->s_root = root_dentry;
604 fc->connected = 1;
605 file->private_data = fuse_conn_get(fc);
606 mutex_unlock(&fuse_mutex);
607 /*
608 * atomic_dec_and_test() in fput() provides the necessary
609 * memory barrier for file->private_data to be visible on all
610 * CPUs after this
611 */
612 fput(file);
613
614 fuse_send_init(fc, init_req);
615
616 return 0;
617
618 err_unlock:
619 mutex_unlock(&fuse_mutex);
620 fuse_request_free(init_req);
621 err_put_root:
622 dput(root_dentry);
623 err:
624 fput(file);
625 fuse_conn_put(fc);
626 return err;
627 }
628
629 static int fuse_get_sb(struct file_system_type *fs_type,
630 int flags, const char *dev_name,
631 void *raw_data, struct vfsmount *mnt)
632 {
633 return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
634 }
635
636 static struct file_system_type fuse_fs_type = {
637 .owner = THIS_MODULE,
638 .name = "fuse",
639 .fs_flags = FS_HAS_SUBTYPE,
640 .get_sb = fuse_get_sb,
641 .kill_sb = kill_anon_super,
642 };
643
644 #ifdef CONFIG_BLOCK
645 static int fuse_get_sb_blk(struct file_system_type *fs_type,
646 int flags, const char *dev_name,
647 void *raw_data, struct vfsmount *mnt)
648 {
649 return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
650 mnt);
651 }
652
653 static struct file_system_type fuseblk_fs_type = {
654 .owner = THIS_MODULE,
655 .name = "fuseblk",
656 .fs_flags = FS_HAS_SUBTYPE,
657 .get_sb = fuse_get_sb_blk,
658 .kill_sb = kill_block_super,
659 .fs_flags = FS_REQUIRES_DEV,
660 };
661
662 static inline int register_fuseblk(void)
663 {
664 return register_filesystem(&fuseblk_fs_type);
665 }
666
667 static inline void unregister_fuseblk(void)
668 {
669 unregister_filesystem(&fuseblk_fs_type);
670 }
671 #else
672 static inline int register_fuseblk(void)
673 {
674 return 0;
675 }
676
677 static inline void unregister_fuseblk(void)
678 {
679 }
680 #endif
681
682 static decl_subsys(fuse, NULL, NULL);
683 static decl_subsys(connections, NULL, NULL);
684
685 static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
686 unsigned long flags)
687 {
688 struct inode * inode = foo;
689
690 if (flags & SLAB_CTOR_CONSTRUCTOR)
691 inode_init_once(inode);
692 }
693
694 static int __init fuse_fs_init(void)
695 {
696 int err;
697
698 err = register_filesystem(&fuse_fs_type);
699 if (err)
700 goto out;
701
702 err = register_fuseblk();
703 if (err)
704 goto out_unreg;
705
706 fuse_inode_cachep = kmem_cache_create("fuse_inode",
707 sizeof(struct fuse_inode),
708 0, SLAB_HWCACHE_ALIGN,
709 fuse_inode_init_once, NULL);
710 err = -ENOMEM;
711 if (!fuse_inode_cachep)
712 goto out_unreg2;
713
714 return 0;
715
716 out_unreg2:
717 unregister_fuseblk();
718 out_unreg:
719 unregister_filesystem(&fuse_fs_type);
720 out:
721 return err;
722 }
723
724 static void fuse_fs_cleanup(void)
725 {
726 unregister_filesystem(&fuse_fs_type);
727 unregister_fuseblk();
728 kmem_cache_destroy(fuse_inode_cachep);
729 }
730
731 static int fuse_sysfs_init(void)
732 {
733 int err;
734
735 kobj_set_kset_s(&fuse_subsys, fs_subsys);
736 err = subsystem_register(&fuse_subsys);
737 if (err)
738 goto out_err;
739
740 kobj_set_kset_s(&connections_subsys, fuse_subsys);
741 err = subsystem_register(&connections_subsys);
742 if (err)
743 goto out_fuse_unregister;
744
745 return 0;
746
747 out_fuse_unregister:
748 subsystem_unregister(&fuse_subsys);
749 out_err:
750 return err;
751 }
752
753 static void fuse_sysfs_cleanup(void)
754 {
755 subsystem_unregister(&connections_subsys);
756 subsystem_unregister(&fuse_subsys);
757 }
758
759 static int __init fuse_init(void)
760 {
761 int res;
762
763 printk("fuse init (API version %i.%i)\n",
764 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
765
766 INIT_LIST_HEAD(&fuse_conn_list);
767 res = fuse_fs_init();
768 if (res)
769 goto err;
770
771 res = fuse_dev_init();
772 if (res)
773 goto err_fs_cleanup;
774
775 res = fuse_sysfs_init();
776 if (res)
777 goto err_dev_cleanup;
778
779 res = fuse_ctl_init();
780 if (res)
781 goto err_sysfs_cleanup;
782
783 return 0;
784
785 err_sysfs_cleanup:
786 fuse_sysfs_cleanup();
787 err_dev_cleanup:
788 fuse_dev_cleanup();
789 err_fs_cleanup:
790 fuse_fs_cleanup();
791 err:
792 return res;
793 }
794
795 static void __exit fuse_exit(void)
796 {
797 printk(KERN_DEBUG "fuse exit\n");
798
799 fuse_ctl_cleanup();
800 fuse_sysfs_cleanup();
801 fuse_fs_cleanup();
802 fuse_dev_cleanup();
803 }
804
805 module_init(fuse_init);
806 module_exit(fuse_exit);
This page took 0.06236 seconds and 6 git commands to generate.