2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
22 static kmem_cache_t
*fuse_req_cachep
;
24 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file
->private_data
;
33 static void fuse_request_init(struct fuse_req
*req
)
35 memset(req
, 0, sizeof(*req
));
36 INIT_LIST_HEAD(&req
->list
);
37 init_waitqueue_head(&req
->waitq
);
38 atomic_set(&req
->count
, 1);
41 struct fuse_req
*fuse_request_alloc(void)
43 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, SLAB_KERNEL
);
45 fuse_request_init(req
);
49 void fuse_request_free(struct fuse_req
*req
)
51 kmem_cache_free(fuse_req_cachep
, req
);
54 static void block_sigs(sigset_t
*oldset
)
58 siginitsetinv(&mask
, sigmask(SIGKILL
));
59 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
62 static void restore_sigs(sigset_t
*oldset
)
64 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
68 * Reset request, so that it can be reused
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
73 void fuse_reset_request(struct fuse_req
*req
)
75 BUG_ON(atomic_read(&req
->count
) != 1);
76 fuse_request_init(req
);
79 static void __fuse_get_request(struct fuse_req
*req
)
81 atomic_inc(&req
->count
);
84 /* Must be called with > 1 refcount */
85 static void __fuse_put_request(struct fuse_req
*req
)
87 BUG_ON(atomic_read(&req
->count
) < 2);
88 atomic_dec(&req
->count
);
91 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
)
98 err
= wait_event_interruptible(fc
->blocked_waitq
, !fc
->blocked
);
99 restore_sigs(&oldset
);
101 return ERR_PTR(-EINTR
);
103 req
= fuse_request_alloc();
105 return ERR_PTR(-ENOMEM
);
107 atomic_inc(&fc
->num_waiting
);
108 fuse_request_init(req
);
109 req
->in
.h
.uid
= current
->fsuid
;
110 req
->in
.h
.gid
= current
->fsgid
;
111 req
->in
.h
.pid
= current
->pid
;
115 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
117 if (atomic_dec_and_test(&req
->count
)) {
118 atomic_dec(&fc
->num_waiting
);
119 fuse_request_free(req
);
123 void fuse_release_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
129 spin_lock(&fc
->lock
);
130 list_del(&req
->bg_entry
);
131 if (fc
->num_background
== FUSE_MAX_BACKGROUND
) {
133 wake_up_all(&fc
->blocked_waitq
);
135 fc
->num_background
--;
136 spin_unlock(&fc
->lock
);
140 * This function is called when a request is finished. Either a reply
141 * has arrived or it was interrupted (and not yet sent) or some error
142 * occurred during communication with userspace, or the device file
143 * was closed. In case of a background request the reference to the
144 * stored objects are released. The requester thread is woken up (if
145 * still waiting), the 'end' callback is called if given, else the
146 * reference to the request is released
148 * Releasing extra reference for foreground requests must be done
149 * within the same locked region as setting state to finished. This
150 * is because fuse_reset_request() may be called after request is
151 * finished and it must be the sole possessor. If request is
152 * interrupted and put in the background, it will return with an error
153 * and hence never be reset and reused.
155 * Called with fc->lock, unlocks it
157 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
159 list_del(&req
->list
);
160 req
->state
= FUSE_REQ_FINISHED
;
161 if (!req
->background
) {
162 spin_unlock(&fc
->lock
);
163 wake_up(&req
->waitq
);
164 fuse_put_request(fc
, req
);
166 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
168 spin_unlock(&fc
->lock
);
169 down_read(&fc
->sbput_sem
);
171 fuse_release_background(fc
, req
);
172 up_read(&fc
->sbput_sem
);
176 fuse_put_request(fc
, req
);
181 * Unfortunately request interruption not just solves the deadlock
182 * problem, it causes problems too. These stem from the fact, that an
183 * interrupted request is continued to be processed in userspace,
184 * while all the locks and object references (inode and file) held
185 * during the operation are released.
187 * To release the locks is exactly why there's a need to interrupt the
188 * request, so there's not a lot that can be done about this, except
189 * introduce additional locking in userspace.
191 * More important is to keep inode and file references until userspace
192 * has replied, otherwise FORGET and RELEASE could be sent while the
193 * inode/file is still used by the filesystem.
195 * For this reason the concept of "background" request is introduced.
196 * An interrupted request is backgrounded if it has been already sent
197 * to userspace. Backgrounding involves getting an extra reference to
198 * inode(s) or file used in the request, and adding the request to
199 * fc->background list. When a reply is received for a background
200 * request, the object references are released, and the request is
201 * removed from the list. If the filesystem is unmounted while there
202 * are still background requests, the list is walked and references
203 * are released as if a reply was received.
205 * There's one more use for a background request. The RELEASE message is
206 * always sent as background, since it doesn't return an error or
209 static void background_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
212 list_add(&req
->bg_entry
, &fc
->background
);
213 fc
->num_background
++;
214 if (fc
->num_background
== FUSE_MAX_BACKGROUND
)
217 req
->inode
= igrab(req
->inode
);
219 req
->inode2
= igrab(req
->inode2
);
224 /* Called with fc->lock held. Releases, and then reacquires it. */
225 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
229 spin_unlock(&fc
->lock
);
231 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
232 restore_sigs(&oldset
);
233 spin_lock(&fc
->lock
);
234 if (req
->state
== FUSE_REQ_FINISHED
&& !req
->interrupted
)
237 if (!req
->interrupted
) {
238 req
->out
.h
.error
= -EINTR
;
239 req
->interrupted
= 1;
242 /* This is uninterruptible sleep, because data is
243 being copied to/from the buffers of req. During
244 locked state, there mustn't be any filesystem
245 operation (e.g. page fault), since that could lead
247 spin_unlock(&fc
->lock
);
248 wait_event(req
->waitq
, !req
->locked
);
249 spin_lock(&fc
->lock
);
251 if (req
->state
== FUSE_REQ_PENDING
) {
252 list_del(&req
->list
);
253 __fuse_put_request(req
);
254 } else if (req
->state
== FUSE_REQ_SENT
)
255 background_request(fc
, req
);
258 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
263 for (i
= 0; i
< numargs
; i
++)
264 nbytes
+= args
[i
].size
;
269 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
272 /* zero is special */
275 req
->in
.h
.unique
= fc
->reqctr
;
276 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
277 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
278 list_add_tail(&req
->list
, &fc
->pending
);
279 req
->state
= FUSE_REQ_PENDING
;
281 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
285 * This can only be interrupted by a SIGKILL
287 void request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
290 spin_lock(&fc
->lock
);
292 req
->out
.h
.error
= -ENOTCONN
;
293 else if (fc
->conn_error
)
294 req
->out
.h
.error
= -ECONNREFUSED
;
296 queue_request(fc
, req
);
297 /* acquire extra reference, since request is still needed
298 after request_end() */
299 __fuse_get_request(req
);
301 request_wait_answer(fc
, req
);
303 spin_unlock(&fc
->lock
);
306 static void request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
308 spin_lock(&fc
->lock
);
309 background_request(fc
, req
);
311 queue_request(fc
, req
);
312 spin_unlock(&fc
->lock
);
314 req
->out
.h
.error
= -ENOTCONN
;
315 request_end(fc
, req
);
319 void request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
322 request_send_nowait(fc
, req
);
325 void request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
328 request_send_nowait(fc
, req
);
332 * Lock the request. Up to the next unlock_request() there mustn't be
333 * anything that could cause a page-fault. If the request was already
334 * interrupted bail out.
336 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
340 spin_lock(&fc
->lock
);
341 if (req
->interrupted
)
345 spin_unlock(&fc
->lock
);
351 * Unlock request. If it was interrupted during being locked, the
352 * requester thread is currently waiting for it to be unlocked, so
355 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
358 spin_lock(&fc
->lock
);
360 if (req
->interrupted
)
361 wake_up(&req
->waitq
);
362 spin_unlock(&fc
->lock
);
366 struct fuse_copy_state
{
367 struct fuse_conn
*fc
;
369 struct fuse_req
*req
;
370 const struct iovec
*iov
;
371 unsigned long nr_segs
;
372 unsigned long seglen
;
380 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
381 int write
, struct fuse_req
*req
,
382 const struct iovec
*iov
, unsigned long nr_segs
)
384 memset(cs
, 0, sizeof(*cs
));
389 cs
->nr_segs
= nr_segs
;
392 /* Unmap and put previous page of userspace buffer */
393 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
396 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
398 flush_dcache_page(cs
->pg
);
399 set_page_dirty_lock(cs
->pg
);
407 * Get another pagefull of userspace buffer, and map it to kernel
408 * address space, and lock request
410 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
412 unsigned long offset
;
415 unlock_request(cs
->fc
, cs
->req
);
416 fuse_copy_finish(cs
);
418 BUG_ON(!cs
->nr_segs
);
419 cs
->seglen
= cs
->iov
[0].iov_len
;
420 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
424 down_read(¤t
->mm
->mmap_sem
);
425 err
= get_user_pages(current
, current
->mm
, cs
->addr
, 1, cs
->write
, 0,
427 up_read(¤t
->mm
->mmap_sem
);
431 offset
= cs
->addr
% PAGE_SIZE
;
432 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
433 cs
->buf
= cs
->mapaddr
+ offset
;
434 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
435 cs
->seglen
-= cs
->len
;
438 return lock_request(cs
->fc
, cs
->req
);
441 /* Do as much copy to/from userspace buffer as we can */
442 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
444 unsigned ncpy
= min(*size
, cs
->len
);
447 memcpy(cs
->buf
, *val
, ncpy
);
449 memcpy(*val
, cs
->buf
, ncpy
);
459 * Copy a page in the request to/from the userspace buffer. Must be
462 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
*page
,
463 unsigned offset
, unsigned count
, int zeroing
)
465 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
466 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
467 memset(mapaddr
, 0, PAGE_SIZE
);
468 kunmap_atomic(mapaddr
, KM_USER1
);
472 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
475 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
476 void *buf
= mapaddr
+ offset
;
477 offset
+= fuse_copy_do(cs
, &buf
, &count
);
478 kunmap_atomic(mapaddr
, KM_USER1
);
480 offset
+= fuse_copy_do(cs
, NULL
, &count
);
482 if (page
&& !cs
->write
)
483 flush_dcache_page(page
);
487 /* Copy pages in the request to/from userspace buffer */
488 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
492 struct fuse_req
*req
= cs
->req
;
493 unsigned offset
= req
->page_offset
;
494 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
496 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
497 struct page
*page
= req
->pages
[i
];
498 int err
= fuse_copy_page(cs
, page
, offset
, count
, zeroing
);
503 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
509 /* Copy a single argument in the request to/from userspace buffer */
510 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
514 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
516 fuse_copy_do(cs
, &val
, &size
);
521 /* Copy request arguments to/from userspace buffer */
522 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
523 unsigned argpages
, struct fuse_arg
*args
,
529 for (i
= 0; !err
&& i
< numargs
; i
++) {
530 struct fuse_arg
*arg
= &args
[i
];
531 if (i
== numargs
- 1 && argpages
)
532 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
534 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
539 /* Wait until a request is available on the pending list */
540 static void request_wait(struct fuse_conn
*fc
)
542 DECLARE_WAITQUEUE(wait
, current
);
544 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
545 while (fc
->connected
&& list_empty(&fc
->pending
)) {
546 set_current_state(TASK_INTERRUPTIBLE
);
547 if (signal_pending(current
))
550 spin_unlock(&fc
->lock
);
552 spin_lock(&fc
->lock
);
554 set_current_state(TASK_RUNNING
);
555 remove_wait_queue(&fc
->waitq
, &wait
);
559 * Read a single request into the userspace filesystem's buffer. This
560 * function waits until a request is available, then removes it from
561 * the pending list and copies request data to userspace buffer. If
562 * no reply is needed (FORGET) or request has been interrupted or
563 * there was an error during the copying then it's finished by calling
564 * request_end(). Otherwise add it to the processing list, and set
567 static ssize_t
fuse_dev_readv(struct file
*file
, const struct iovec
*iov
,
568 unsigned long nr_segs
, loff_t
*off
)
571 struct fuse_req
*req
;
573 struct fuse_copy_state cs
;
575 struct fuse_conn
*fc
= fuse_get_conn(file
);
580 spin_lock(&fc
->lock
);
582 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
583 list_empty(&fc
->pending
))
591 if (list_empty(&fc
->pending
))
594 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
595 req
->state
= FUSE_REQ_READING
;
596 list_move(&req
->list
, &fc
->io
);
600 /* If request is too large, reply with an error and restart the read */
601 if (iov_length(iov
, nr_segs
) < reqsize
) {
602 req
->out
.h
.error
= -EIO
;
603 /* SETXATTR is special, since it may contain too large data */
604 if (in
->h
.opcode
== FUSE_SETXATTR
)
605 req
->out
.h
.error
= -E2BIG
;
606 request_end(fc
, req
);
609 spin_unlock(&fc
->lock
);
610 fuse_copy_init(&cs
, fc
, 1, req
, iov
, nr_segs
);
611 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
613 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
614 (struct fuse_arg
*) in
->args
, 0);
615 fuse_copy_finish(&cs
);
616 spin_lock(&fc
->lock
);
618 if (!err
&& req
->interrupted
)
621 if (!req
->interrupted
)
622 req
->out
.h
.error
= -EIO
;
623 request_end(fc
, req
);
627 request_end(fc
, req
);
629 req
->state
= FUSE_REQ_SENT
;
630 list_move_tail(&req
->list
, &fc
->processing
);
631 spin_unlock(&fc
->lock
);
636 spin_unlock(&fc
->lock
);
640 static ssize_t
fuse_dev_read(struct file
*file
, char __user
*buf
,
641 size_t nbytes
, loff_t
*off
)
644 iov
.iov_len
= nbytes
;
646 return fuse_dev_readv(file
, &iov
, 1, off
);
649 /* Look up request on processing list by unique ID */
650 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
652 struct list_head
*entry
;
654 list_for_each(entry
, &fc
->processing
) {
655 struct fuse_req
*req
;
656 req
= list_entry(entry
, struct fuse_req
, list
);
657 if (req
->in
.h
.unique
== unique
)
663 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
666 unsigned reqsize
= sizeof(struct fuse_out_header
);
669 return nbytes
!= reqsize
? -EINVAL
: 0;
671 reqsize
+= len_args(out
->numargs
, out
->args
);
673 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
675 else if (reqsize
> nbytes
) {
676 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
677 unsigned diffsize
= reqsize
- nbytes
;
678 if (diffsize
> lastarg
->size
)
680 lastarg
->size
-= diffsize
;
682 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
687 * Write a single reply to a request. First the header is copied from
688 * the write buffer. The request is then searched on the processing
689 * list by the unique ID found in the header. If found, then remove
690 * it from the list and copy the rest of the buffer to the request.
691 * The request is finished by calling request_end()
693 static ssize_t
fuse_dev_writev(struct file
*file
, const struct iovec
*iov
,
694 unsigned long nr_segs
, loff_t
*off
)
697 unsigned nbytes
= iov_length(iov
, nr_segs
);
698 struct fuse_req
*req
;
699 struct fuse_out_header oh
;
700 struct fuse_copy_state cs
;
701 struct fuse_conn
*fc
= fuse_get_conn(file
);
705 fuse_copy_init(&cs
, fc
, 0, NULL
, iov
, nr_segs
);
706 if (nbytes
< sizeof(struct fuse_out_header
))
709 err
= fuse_copy_one(&cs
, &oh
, sizeof(oh
));
713 if (!oh
.unique
|| oh
.error
<= -1000 || oh
.error
> 0 ||
717 spin_lock(&fc
->lock
);
722 req
= request_find(fc
, oh
.unique
);
727 if (req
->interrupted
) {
728 spin_unlock(&fc
->lock
);
729 fuse_copy_finish(&cs
);
730 spin_lock(&fc
->lock
);
731 request_end(fc
, req
);
734 list_move(&req
->list
, &fc
->io
);
738 spin_unlock(&fc
->lock
);
740 err
= copy_out_args(&cs
, &req
->out
, nbytes
);
741 fuse_copy_finish(&cs
);
743 spin_lock(&fc
->lock
);
746 if (req
->interrupted
)
748 } else if (!req
->interrupted
)
749 req
->out
.h
.error
= -EIO
;
750 request_end(fc
, req
);
752 return err
? err
: nbytes
;
755 spin_unlock(&fc
->lock
);
757 fuse_copy_finish(&cs
);
761 static ssize_t
fuse_dev_write(struct file
*file
, const char __user
*buf
,
762 size_t nbytes
, loff_t
*off
)
765 iov
.iov_len
= nbytes
;
766 iov
.iov_base
= (char __user
*) buf
;
767 return fuse_dev_writev(file
, &iov
, 1, off
);
770 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
772 unsigned mask
= POLLOUT
| POLLWRNORM
;
773 struct fuse_conn
*fc
= fuse_get_conn(file
);
777 poll_wait(file
, &fc
->waitq
, wait
);
779 spin_lock(&fc
->lock
);
782 else if (!list_empty(&fc
->pending
))
783 mask
|= POLLIN
| POLLRDNORM
;
784 spin_unlock(&fc
->lock
);
790 * Abort all requests on the given list (pending or processing)
792 * This function releases and reacquires fc->lock
794 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
796 while (!list_empty(head
)) {
797 struct fuse_req
*req
;
798 req
= list_entry(head
->next
, struct fuse_req
, list
);
799 req
->out
.h
.error
= -ECONNABORTED
;
800 request_end(fc
, req
);
801 spin_lock(&fc
->lock
);
806 * Abort requests under I/O
808 * The requests are set to interrupted and finished, and the request
809 * waiter is woken up. This will make request_wait_answer() wait
810 * until the request is unlocked and then return.
812 * If the request is asynchronous, then the end function needs to be
813 * called after waiting for the request to be unlocked (if it was
816 static void end_io_requests(struct fuse_conn
*fc
)
818 while (!list_empty(&fc
->io
)) {
819 struct fuse_req
*req
=
820 list_entry(fc
->io
.next
, struct fuse_req
, list
);
821 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
823 req
->interrupted
= 1;
824 req
->out
.h
.error
= -ECONNABORTED
;
825 req
->state
= FUSE_REQ_FINISHED
;
826 list_del_init(&req
->list
);
827 wake_up(&req
->waitq
);
830 /* The end function will consume this reference */
831 __fuse_get_request(req
);
832 spin_unlock(&fc
->lock
);
833 wait_event(req
->waitq
, !req
->locked
);
835 spin_lock(&fc
->lock
);
841 * Abort all requests.
843 * Emergency exit in case of a malicious or accidental deadlock, or
844 * just a hung filesystem.
846 * The same effect is usually achievable through killing the
847 * filesystem daemon and all users of the filesystem. The exception
848 * is the combination of an asynchronous request and the tricky
849 * deadlock (see Documentation/filesystems/fuse.txt).
851 * During the aborting, progression of requests from the pending and
852 * processing lists onto the io list, and progression of new requests
853 * onto the pending list is prevented by req->connected being false.
855 * Progression of requests under I/O to the processing list is
856 * prevented by the req->interrupted flag being true for these
857 * requests. For this reason requests on the io list must be aborted
860 void fuse_abort_conn(struct fuse_conn
*fc
)
862 spin_lock(&fc
->lock
);
866 end_requests(fc
, &fc
->pending
);
867 end_requests(fc
, &fc
->processing
);
868 wake_up_all(&fc
->waitq
);
869 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
871 spin_unlock(&fc
->lock
);
874 static int fuse_dev_release(struct inode
*inode
, struct file
*file
)
876 struct fuse_conn
*fc
= fuse_get_conn(file
);
878 spin_lock(&fc
->lock
);
880 end_requests(fc
, &fc
->pending
);
881 end_requests(fc
, &fc
->processing
);
882 spin_unlock(&fc
->lock
);
883 fasync_helper(-1, file
, 0, &fc
->fasync
);
884 kobject_put(&fc
->kobj
);
890 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
892 struct fuse_conn
*fc
= fuse_get_conn(file
);
896 /* No locking - fasync_helper does its own locking */
897 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
900 const struct file_operations fuse_dev_operations
= {
901 .owner
= THIS_MODULE
,
903 .read
= fuse_dev_read
,
904 .readv
= fuse_dev_readv
,
905 .write
= fuse_dev_write
,
906 .writev
= fuse_dev_writev
,
907 .poll
= fuse_dev_poll
,
908 .release
= fuse_dev_release
,
909 .fasync
= fuse_dev_fasync
,
912 static struct miscdevice fuse_miscdevice
= {
915 .fops
= &fuse_dev_operations
,
918 int __init
fuse_dev_init(void)
921 fuse_req_cachep
= kmem_cache_create("fuse_request",
922 sizeof(struct fuse_req
),
924 if (!fuse_req_cachep
)
927 err
= misc_register(&fuse_miscdevice
);
929 goto out_cache_clean
;
934 kmem_cache_destroy(fuse_req_cachep
);
939 void fuse_dev_cleanup(void)
941 misc_deregister(&fuse_miscdevice
);
942 kmem_cache_destroy(fuse_req_cachep
);