2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
24 MODULE_ALIAS("devname:fuse");
26 static struct kmem_cache
*fuse_req_cachep
;
28 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
34 return file
->private_data
;
37 static void fuse_request_init(struct fuse_req
*req
, struct page
**pages
,
38 struct fuse_page_desc
*page_descs
,
41 memset(req
, 0, sizeof(*req
));
42 memset(pages
, 0, sizeof(*pages
) * npages
);
43 memset(page_descs
, 0, sizeof(*page_descs
) * npages
);
44 INIT_LIST_HEAD(&req
->list
);
45 INIT_LIST_HEAD(&req
->intr_entry
);
46 init_waitqueue_head(&req
->waitq
);
47 atomic_set(&req
->count
, 1);
49 req
->page_descs
= page_descs
;
50 req
->max_pages
= npages
;
53 static struct fuse_req
*__fuse_request_alloc(unsigned npages
, gfp_t flags
)
55 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, flags
);
58 struct fuse_page_desc
*page_descs
;
60 if (npages
<= FUSE_REQ_INLINE_PAGES
) {
61 pages
= req
->inline_pages
;
62 page_descs
= req
->inline_page_descs
;
64 pages
= kmalloc(sizeof(struct page
*) * npages
, flags
);
65 page_descs
= kmalloc(sizeof(struct fuse_page_desc
) *
69 if (!pages
|| !page_descs
) {
72 kmem_cache_free(fuse_req_cachep
, req
);
76 fuse_request_init(req
, pages
, page_descs
, npages
);
81 struct fuse_req
*fuse_request_alloc(unsigned npages
)
83 return __fuse_request_alloc(npages
, GFP_KERNEL
);
85 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
87 struct fuse_req
*fuse_request_alloc_nofs(unsigned npages
)
89 return __fuse_request_alloc(npages
, GFP_NOFS
);
92 void fuse_request_free(struct fuse_req
*req
)
94 if (req
->pages
!= req
->inline_pages
) {
96 kfree(req
->page_descs
);
98 kmem_cache_free(fuse_req_cachep
, req
);
101 static void block_sigs(sigset_t
*oldset
)
105 siginitsetinv(&mask
, sigmask(SIGKILL
));
106 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
109 static void restore_sigs(sigset_t
*oldset
)
111 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
114 void __fuse_get_request(struct fuse_req
*req
)
116 atomic_inc(&req
->count
);
119 /* Must be called with > 1 refcount */
120 static void __fuse_put_request(struct fuse_req
*req
)
122 BUG_ON(atomic_read(&req
->count
) < 2);
123 atomic_dec(&req
->count
);
126 static void fuse_req_init_context(struct fuse_req
*req
)
128 req
->in
.h
.uid
= from_kuid_munged(&init_user_ns
, current_fsuid());
129 req
->in
.h
.gid
= from_kgid_munged(&init_user_ns
, current_fsgid());
130 req
->in
.h
.pid
= current
->pid
;
133 void fuse_set_initialized(struct fuse_conn
*fc
)
135 /* Make sure stores before this are seen on another CPU */
140 static bool fuse_block_alloc(struct fuse_conn
*fc
, bool for_background
)
142 return !fc
->initialized
|| (for_background
&& fc
->blocked
);
145 static struct fuse_req
*__fuse_get_req(struct fuse_conn
*fc
, unsigned npages
,
148 struct fuse_req
*req
;
150 atomic_inc(&fc
->num_waiting
);
152 if (fuse_block_alloc(fc
, for_background
)) {
157 intr
= wait_event_interruptible_exclusive(fc
->blocked_waitq
,
158 !fuse_block_alloc(fc
, for_background
));
159 restore_sigs(&oldset
);
164 /* Matches smp_wmb() in fuse_set_initialized() */
171 req
= fuse_request_alloc(npages
);
175 wake_up(&fc
->blocked_waitq
);
179 fuse_req_init_context(req
);
181 req
->background
= for_background
;
185 atomic_dec(&fc
->num_waiting
);
189 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
, unsigned npages
)
191 return __fuse_get_req(fc
, npages
, false);
193 EXPORT_SYMBOL_GPL(fuse_get_req
);
195 struct fuse_req
*fuse_get_req_for_background(struct fuse_conn
*fc
,
198 return __fuse_get_req(fc
, npages
, true);
200 EXPORT_SYMBOL_GPL(fuse_get_req_for_background
);
203 * Return request in fuse_file->reserved_req. However that may
204 * currently be in use. If that is the case, wait for it to become
207 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
210 struct fuse_req
*req
= NULL
;
211 struct fuse_file
*ff
= file
->private_data
;
214 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
215 spin_lock(&fc
->lock
);
216 if (ff
->reserved_req
) {
217 req
= ff
->reserved_req
;
218 ff
->reserved_req
= NULL
;
219 req
->stolen_file
= get_file(file
);
221 spin_unlock(&fc
->lock
);
228 * Put stolen request back into fuse_file->reserved_req
230 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
232 struct file
*file
= req
->stolen_file
;
233 struct fuse_file
*ff
= file
->private_data
;
235 spin_lock(&fc
->lock
);
236 fuse_request_init(req
, req
->pages
, req
->page_descs
, req
->max_pages
);
237 BUG_ON(ff
->reserved_req
);
238 ff
->reserved_req
= req
;
239 wake_up_all(&fc
->reserved_req_waitq
);
240 spin_unlock(&fc
->lock
);
245 * Gets a requests for a file operation, always succeeds
247 * This is used for sending the FLUSH request, which must get to
248 * userspace, due to POSIX locks which may need to be unlocked.
250 * If allocation fails due to OOM, use the reserved request in
253 * This is very unlikely to deadlock accidentally, since the
254 * filesystem should not have it's own file open. If deadlock is
255 * intentional, it can still be broken by "aborting" the filesystem.
257 struct fuse_req
*fuse_get_req_nofail_nopages(struct fuse_conn
*fc
,
260 struct fuse_req
*req
;
262 atomic_inc(&fc
->num_waiting
);
263 wait_event(fc
->blocked_waitq
, fc
->initialized
);
264 /* Matches smp_wmb() in fuse_set_initialized() */
266 req
= fuse_request_alloc(0);
268 req
= get_reserved_req(fc
, file
);
270 fuse_req_init_context(req
);
276 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
278 if (atomic_dec_and_test(&req
->count
)) {
279 if (unlikely(req
->background
)) {
281 * We get here in the unlikely case that a background
282 * request was allocated but not sent
284 spin_lock(&fc
->lock
);
286 wake_up(&fc
->blocked_waitq
);
287 spin_unlock(&fc
->lock
);
291 atomic_dec(&fc
->num_waiting
);
293 if (req
->stolen_file
)
294 put_reserved_req(fc
, req
);
296 fuse_request_free(req
);
299 EXPORT_SYMBOL_GPL(fuse_put_request
);
301 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
306 for (i
= 0; i
< numargs
; i
++)
307 nbytes
+= args
[i
].size
;
312 static u64
fuse_get_unique(struct fuse_conn
*fc
)
315 /* zero is special */
322 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
324 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
325 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
326 list_add_tail(&req
->list
, &fc
->pending
);
327 req
->state
= FUSE_REQ_PENDING
;
330 atomic_inc(&fc
->num_waiting
);
333 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
336 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
337 u64 nodeid
, u64 nlookup
)
339 forget
->forget_one
.nodeid
= nodeid
;
340 forget
->forget_one
.nlookup
= nlookup
;
342 spin_lock(&fc
->lock
);
344 fc
->forget_list_tail
->next
= forget
;
345 fc
->forget_list_tail
= forget
;
347 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
351 spin_unlock(&fc
->lock
);
354 static void flush_bg_queue(struct fuse_conn
*fc
)
356 while (fc
->active_background
< fc
->max_background
&&
357 !list_empty(&fc
->bg_queue
)) {
358 struct fuse_req
*req
;
360 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
361 list_del(&req
->list
);
362 fc
->active_background
++;
363 req
->in
.h
.unique
= fuse_get_unique(fc
);
364 queue_request(fc
, req
);
369 * This function is called when a request is finished. Either a reply
370 * has arrived or it was aborted (and not yet sent) or some error
371 * occurred during communication with userspace, or the device file
372 * was closed. The requester thread is woken up (if still waiting),
373 * the 'end' callback is called if given, else the reference to the
374 * request is released
376 * Called with fc->lock, unlocks it
378 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
381 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
383 list_del(&req
->list
);
384 list_del(&req
->intr_entry
);
385 req
->state
= FUSE_REQ_FINISHED
;
386 if (req
->background
) {
389 if (fc
->num_background
== fc
->max_background
)
392 /* Wake up next waiter, if any */
393 if (!fc
->blocked
&& waitqueue_active(&fc
->blocked_waitq
))
394 wake_up(&fc
->blocked_waitq
);
396 if (fc
->num_background
== fc
->congestion_threshold
&&
397 fc
->connected
&& fc
->bdi_initialized
) {
398 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
399 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
401 fc
->num_background
--;
402 fc
->active_background
--;
405 spin_unlock(&fc
->lock
);
406 wake_up(&req
->waitq
);
409 fuse_put_request(fc
, req
);
412 static void wait_answer_interruptible(struct fuse_conn
*fc
,
413 struct fuse_req
*req
)
417 if (signal_pending(current
))
420 spin_unlock(&fc
->lock
);
421 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
422 spin_lock(&fc
->lock
);
425 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
427 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
429 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
432 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
436 if (!fc
->no_interrupt
) {
437 /* Any signal may interrupt this */
438 wait_answer_interruptible(fc
, req
);
442 if (req
->state
== FUSE_REQ_FINISHED
)
445 req
->interrupted
= 1;
446 if (req
->state
== FUSE_REQ_SENT
)
447 queue_interrupt(fc
, req
);
453 /* Only fatal signals may interrupt this */
455 wait_answer_interruptible(fc
, req
);
456 restore_sigs(&oldset
);
460 if (req
->state
== FUSE_REQ_FINISHED
)
463 /* Request is not yet in userspace, bail out */
464 if (req
->state
== FUSE_REQ_PENDING
) {
465 list_del(&req
->list
);
466 __fuse_put_request(req
);
467 req
->out
.h
.error
= -EINTR
;
473 * Either request is already in userspace, or it was forced.
476 spin_unlock(&fc
->lock
);
477 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
478 spin_lock(&fc
->lock
);
484 BUG_ON(req
->state
!= FUSE_REQ_FINISHED
);
486 /* This is uninterruptible sleep, because data is
487 being copied to/from the buffers of req. During
488 locked state, there mustn't be any filesystem
489 operation (e.g. page fault), since that could lead
491 spin_unlock(&fc
->lock
);
492 wait_event(req
->waitq
, !req
->locked
);
493 spin_lock(&fc
->lock
);
497 static void __fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
499 BUG_ON(req
->background
);
500 spin_lock(&fc
->lock
);
502 req
->out
.h
.error
= -ENOTCONN
;
503 else if (fc
->conn_error
)
504 req
->out
.h
.error
= -ECONNREFUSED
;
506 req
->in
.h
.unique
= fuse_get_unique(fc
);
507 queue_request(fc
, req
);
508 /* acquire extra reference, since request is still needed
509 after request_end() */
510 __fuse_get_request(req
);
512 request_wait_answer(fc
, req
);
514 spin_unlock(&fc
->lock
);
517 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
520 __fuse_request_send(fc
, req
);
522 EXPORT_SYMBOL_GPL(fuse_request_send
);
524 static void fuse_adjust_compat(struct fuse_conn
*fc
, struct fuse_args
*args
)
526 if (fc
->minor
< 4 && args
->in
.h
.opcode
== FUSE_STATFS
)
527 args
->out
.args
[0].size
= FUSE_COMPAT_STATFS_SIZE
;
530 switch (args
->in
.h
.opcode
) {
537 args
->out
.args
[0].size
= FUSE_COMPAT_ENTRY_OUT_SIZE
;
541 args
->out
.args
[0].size
= FUSE_COMPAT_ATTR_OUT_SIZE
;
545 if (fc
->minor
< 12) {
546 switch (args
->in
.h
.opcode
) {
548 args
->in
.args
[0].size
= sizeof(struct fuse_open_in
);
551 args
->in
.args
[0].size
= FUSE_COMPAT_MKNOD_IN_SIZE
;
557 ssize_t
fuse_simple_request(struct fuse_conn
*fc
, struct fuse_args
*args
)
559 struct fuse_req
*req
;
562 req
= fuse_get_req(fc
, 0);
566 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
567 fuse_adjust_compat(fc
, args
);
569 req
->in
.h
.opcode
= args
->in
.h
.opcode
;
570 req
->in
.h
.nodeid
= args
->in
.h
.nodeid
;
571 req
->in
.numargs
= args
->in
.numargs
;
572 memcpy(req
->in
.args
, args
->in
.args
,
573 args
->in
.numargs
* sizeof(struct fuse_in_arg
));
574 req
->out
.argvar
= args
->out
.argvar
;
575 req
->out
.numargs
= args
->out
.numargs
;
576 memcpy(req
->out
.args
, args
->out
.args
,
577 args
->out
.numargs
* sizeof(struct fuse_arg
));
578 fuse_request_send(fc
, req
);
579 ret
= req
->out
.h
.error
;
580 if (!ret
&& args
->out
.argvar
) {
581 BUG_ON(args
->out
.numargs
!= 1);
582 ret
= req
->out
.args
[0].size
;
584 fuse_put_request(fc
, req
);
589 static void fuse_request_send_nowait_locked(struct fuse_conn
*fc
,
590 struct fuse_req
*req
)
592 BUG_ON(!req
->background
);
593 fc
->num_background
++;
594 if (fc
->num_background
== fc
->max_background
)
596 if (fc
->num_background
== fc
->congestion_threshold
&&
597 fc
->bdi_initialized
) {
598 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
599 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
601 list_add_tail(&req
->list
, &fc
->bg_queue
);
605 static void fuse_request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
607 spin_lock(&fc
->lock
);
609 fuse_request_send_nowait_locked(fc
, req
);
610 spin_unlock(&fc
->lock
);
612 req
->out
.h
.error
= -ENOTCONN
;
613 request_end(fc
, req
);
617 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
620 fuse_request_send_nowait(fc
, req
);
622 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
624 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
625 struct fuse_req
*req
, u64 unique
)
630 req
->in
.h
.unique
= unique
;
631 spin_lock(&fc
->lock
);
633 queue_request(fc
, req
);
636 spin_unlock(&fc
->lock
);
642 * Called under fc->lock
644 * fc->connected must have been checked previously
646 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
647 struct fuse_req
*req
)
650 fuse_request_send_nowait_locked(fc
, req
);
653 void fuse_force_forget(struct file
*file
, u64 nodeid
)
655 struct inode
*inode
= file_inode(file
);
656 struct fuse_conn
*fc
= get_fuse_conn(inode
);
657 struct fuse_req
*req
;
658 struct fuse_forget_in inarg
;
660 memset(&inarg
, 0, sizeof(inarg
));
662 req
= fuse_get_req_nofail_nopages(fc
, file
);
663 req
->in
.h
.opcode
= FUSE_FORGET
;
664 req
->in
.h
.nodeid
= nodeid
;
666 req
->in
.args
[0].size
= sizeof(inarg
);
667 req
->in
.args
[0].value
= &inarg
;
669 __fuse_request_send(fc
, req
);
671 fuse_put_request(fc
, req
);
675 * Lock the request. Up to the next unlock_request() there mustn't be
676 * anything that could cause a page-fault. If the request was already
679 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
683 spin_lock(&fc
->lock
);
688 spin_unlock(&fc
->lock
);
694 * Unlock request. If it was aborted during being locked, the
695 * requester thread is currently waiting for it to be unlocked, so
698 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
701 spin_lock(&fc
->lock
);
704 wake_up(&req
->waitq
);
705 spin_unlock(&fc
->lock
);
709 struct fuse_copy_state
{
710 struct fuse_conn
*fc
;
712 struct fuse_req
*req
;
713 struct iov_iter
*iter
;
714 struct pipe_buffer
*pipebufs
;
715 struct pipe_buffer
*currbuf
;
716 struct pipe_inode_info
*pipe
;
717 unsigned long nr_segs
;
721 unsigned move_pages
:1;
724 static void fuse_copy_init(struct fuse_copy_state
*cs
,
725 struct fuse_conn
*fc
,
727 struct iov_iter
*iter
)
729 memset(cs
, 0, sizeof(*cs
));
735 /* Unmap and put previous page of userspace buffer */
736 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
739 struct pipe_buffer
*buf
= cs
->currbuf
;
742 buf
->len
= PAGE_SIZE
- cs
->len
;
746 flush_dcache_page(cs
->pg
);
747 set_page_dirty_lock(cs
->pg
);
755 * Get another pagefull of userspace buffer, and map it to kernel
756 * address space, and lock request
758 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
763 unlock_request(cs
->fc
, cs
->req
);
764 fuse_copy_finish(cs
);
766 struct pipe_buffer
*buf
= cs
->pipebufs
;
769 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
773 BUG_ON(!cs
->nr_segs
);
776 cs
->offset
= buf
->offset
;
781 if (cs
->nr_segs
== cs
->pipe
->buffers
)
784 page
= alloc_page(GFP_HIGHUSER
);
801 err
= iov_iter_get_pages(cs
->iter
, &page
, PAGE_SIZE
, 1, &off
);
809 iov_iter_advance(cs
->iter
, err
);
812 return lock_request(cs
->fc
, cs
->req
);
815 /* Do as much copy to/from userspace buffer as we can */
816 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
818 unsigned ncpy
= min(*size
, cs
->len
);
820 void *pgaddr
= kmap_atomic(cs
->pg
);
821 void *buf
= pgaddr
+ cs
->offset
;
824 memcpy(buf
, *val
, ncpy
);
826 memcpy(*val
, buf
, ncpy
);
828 kunmap_atomic(pgaddr
);
837 static int fuse_check_page(struct page
*page
)
839 if (page_mapcount(page
) ||
840 page
->mapping
!= NULL
||
841 page_count(page
) != 1 ||
842 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
849 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
850 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
856 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
859 struct page
*oldpage
= *pagep
;
860 struct page
*newpage
;
861 struct pipe_buffer
*buf
= cs
->pipebufs
;
863 unlock_request(cs
->fc
, cs
->req
);
864 fuse_copy_finish(cs
);
866 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
870 BUG_ON(!cs
->nr_segs
);
876 if (cs
->len
!= PAGE_SIZE
)
879 if (buf
->ops
->steal(cs
->pipe
, buf
) != 0)
884 if (!PageUptodate(newpage
))
885 SetPageUptodate(newpage
);
887 ClearPageMappedToDisk(newpage
);
889 if (fuse_check_page(newpage
) != 0)
890 goto out_fallback_unlock
;
893 * This is a new and locked page, it shouldn't be mapped or
894 * have any special flags on it
896 if (WARN_ON(page_mapped(oldpage
)))
897 goto out_fallback_unlock
;
898 if (WARN_ON(page_has_private(oldpage
)))
899 goto out_fallback_unlock
;
900 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
901 goto out_fallback_unlock
;
902 if (WARN_ON(PageMlocked(oldpage
)))
903 goto out_fallback_unlock
;
905 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
907 unlock_page(newpage
);
911 page_cache_get(newpage
);
913 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
914 lru_cache_add_file(newpage
);
917 spin_lock(&cs
->fc
->lock
);
918 if (cs
->req
->aborted
)
922 spin_unlock(&cs
->fc
->lock
);
925 unlock_page(newpage
);
926 page_cache_release(newpage
);
930 unlock_page(oldpage
);
931 page_cache_release(oldpage
);
937 unlock_page(newpage
);
940 cs
->offset
= buf
->offset
;
942 err
= lock_request(cs
->fc
, cs
->req
);
949 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
950 unsigned offset
, unsigned count
)
952 struct pipe_buffer
*buf
;
954 if (cs
->nr_segs
== cs
->pipe
->buffers
)
957 unlock_request(cs
->fc
, cs
->req
);
958 fuse_copy_finish(cs
);
961 page_cache_get(page
);
963 buf
->offset
= offset
;
974 * Copy a page in the request to/from the userspace buffer. Must be
977 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
978 unsigned offset
, unsigned count
, int zeroing
)
981 struct page
*page
= *pagep
;
983 if (page
&& zeroing
&& count
< PAGE_SIZE
)
984 clear_highpage(page
);
987 if (cs
->write
&& cs
->pipebufs
&& page
) {
988 return fuse_ref_page(cs
, page
, offset
, count
);
989 } else if (!cs
->len
) {
990 if (cs
->move_pages
&& page
&&
991 offset
== 0 && count
== PAGE_SIZE
) {
992 err
= fuse_try_move_page(cs
, pagep
);
996 err
= fuse_copy_fill(cs
);
1002 void *mapaddr
= kmap_atomic(page
);
1003 void *buf
= mapaddr
+ offset
;
1004 offset
+= fuse_copy_do(cs
, &buf
, &count
);
1005 kunmap_atomic(mapaddr
);
1007 offset
+= fuse_copy_do(cs
, NULL
, &count
);
1009 if (page
&& !cs
->write
)
1010 flush_dcache_page(page
);
1014 /* Copy pages in the request to/from userspace buffer */
1015 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
1019 struct fuse_req
*req
= cs
->req
;
1021 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
1023 unsigned offset
= req
->page_descs
[i
].offset
;
1024 unsigned count
= min(nbytes
, req
->page_descs
[i
].length
);
1026 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
1036 /* Copy a single argument in the request to/from userspace buffer */
1037 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
1041 int err
= fuse_copy_fill(cs
);
1045 fuse_copy_do(cs
, &val
, &size
);
1050 /* Copy request arguments to/from userspace buffer */
1051 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
1052 unsigned argpages
, struct fuse_arg
*args
,
1058 for (i
= 0; !err
&& i
< numargs
; i
++) {
1059 struct fuse_arg
*arg
= &args
[i
];
1060 if (i
== numargs
- 1 && argpages
)
1061 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
1063 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
1068 static int forget_pending(struct fuse_conn
*fc
)
1070 return fc
->forget_list_head
.next
!= NULL
;
1073 static int request_pending(struct fuse_conn
*fc
)
1075 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
) ||
1079 /* Wait until a request is available on the pending list */
1080 static void request_wait(struct fuse_conn
*fc
)
1081 __releases(fc
->lock
)
1082 __acquires(fc
->lock
)
1084 DECLARE_WAITQUEUE(wait
, current
);
1086 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
1087 while (fc
->connected
&& !request_pending(fc
)) {
1088 set_current_state(TASK_INTERRUPTIBLE
);
1089 if (signal_pending(current
))
1092 spin_unlock(&fc
->lock
);
1094 spin_lock(&fc
->lock
);
1096 set_current_state(TASK_RUNNING
);
1097 remove_wait_queue(&fc
->waitq
, &wait
);
1101 * Transfer an interrupt request to userspace
1103 * Unlike other requests this is assembled on demand, without a need
1104 * to allocate a separate fuse_req structure.
1106 * Called with fc->lock held, releases it
1108 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1109 size_t nbytes
, struct fuse_req
*req
)
1110 __releases(fc
->lock
)
1112 struct fuse_in_header ih
;
1113 struct fuse_interrupt_in arg
;
1114 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
1117 list_del_init(&req
->intr_entry
);
1118 req
->intr_unique
= fuse_get_unique(fc
);
1119 memset(&ih
, 0, sizeof(ih
));
1120 memset(&arg
, 0, sizeof(arg
));
1122 ih
.opcode
= FUSE_INTERRUPT
;
1123 ih
.unique
= req
->intr_unique
;
1124 arg
.unique
= req
->in
.h
.unique
;
1126 spin_unlock(&fc
->lock
);
1127 if (nbytes
< reqsize
)
1130 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1132 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1133 fuse_copy_finish(cs
);
1135 return err
? err
: reqsize
;
1138 static struct fuse_forget_link
*dequeue_forget(struct fuse_conn
*fc
,
1142 struct fuse_forget_link
*head
= fc
->forget_list_head
.next
;
1143 struct fuse_forget_link
**newhead
= &head
;
1146 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
1147 newhead
= &(*newhead
)->next
;
1149 fc
->forget_list_head
.next
= *newhead
;
1151 if (fc
->forget_list_head
.next
== NULL
)
1152 fc
->forget_list_tail
= &fc
->forget_list_head
;
1160 static int fuse_read_single_forget(struct fuse_conn
*fc
,
1161 struct fuse_copy_state
*cs
,
1163 __releases(fc
->lock
)
1166 struct fuse_forget_link
*forget
= dequeue_forget(fc
, 1, NULL
);
1167 struct fuse_forget_in arg
= {
1168 .nlookup
= forget
->forget_one
.nlookup
,
1170 struct fuse_in_header ih
= {
1171 .opcode
= FUSE_FORGET
,
1172 .nodeid
= forget
->forget_one
.nodeid
,
1173 .unique
= fuse_get_unique(fc
),
1174 .len
= sizeof(ih
) + sizeof(arg
),
1177 spin_unlock(&fc
->lock
);
1179 if (nbytes
< ih
.len
)
1182 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1184 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1185 fuse_copy_finish(cs
);
1193 static int fuse_read_batch_forget(struct fuse_conn
*fc
,
1194 struct fuse_copy_state
*cs
, size_t nbytes
)
1195 __releases(fc
->lock
)
1198 unsigned max_forgets
;
1200 struct fuse_forget_link
*head
;
1201 struct fuse_batch_forget_in arg
= { .count
= 0 };
1202 struct fuse_in_header ih
= {
1203 .opcode
= FUSE_BATCH_FORGET
,
1204 .unique
= fuse_get_unique(fc
),
1205 .len
= sizeof(ih
) + sizeof(arg
),
1208 if (nbytes
< ih
.len
) {
1209 spin_unlock(&fc
->lock
);
1213 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1214 head
= dequeue_forget(fc
, max_forgets
, &count
);
1215 spin_unlock(&fc
->lock
);
1218 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1219 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1221 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1224 struct fuse_forget_link
*forget
= head
;
1227 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1228 sizeof(forget
->forget_one
));
1230 head
= forget
->next
;
1234 fuse_copy_finish(cs
);
1242 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1244 __releases(fc
->lock
)
1246 if (fc
->minor
< 16 || fc
->forget_list_head
.next
->next
== NULL
)
1247 return fuse_read_single_forget(fc
, cs
, nbytes
);
1249 return fuse_read_batch_forget(fc
, cs
, nbytes
);
1253 * Read a single request into the userspace filesystem's buffer. This
1254 * function waits until a request is available, then removes it from
1255 * the pending list and copies request data to userspace buffer. If
1256 * no reply is needed (FORGET) or request has been aborted or there
1257 * was an error during the copying then it's finished by calling
1258 * request_end(). Otherwise add it to the processing list, and set
1261 static ssize_t
fuse_dev_do_read(struct fuse_conn
*fc
, struct file
*file
,
1262 struct fuse_copy_state
*cs
, size_t nbytes
)
1265 struct fuse_req
*req
;
1270 spin_lock(&fc
->lock
);
1272 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
1273 !request_pending(fc
))
1281 if (!request_pending(fc
))
1284 if (!list_empty(&fc
->interrupts
)) {
1285 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
1287 return fuse_read_interrupt(fc
, cs
, nbytes
, req
);
1290 if (forget_pending(fc
)) {
1291 if (list_empty(&fc
->pending
) || fc
->forget_batch
-- > 0)
1292 return fuse_read_forget(fc
, cs
, nbytes
);
1294 if (fc
->forget_batch
<= -8)
1295 fc
->forget_batch
= 16;
1298 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
1299 req
->state
= FUSE_REQ_READING
;
1300 list_move(&req
->list
, &fc
->io
);
1303 reqsize
= in
->h
.len
;
1304 /* If request is too large, reply with an error and restart the read */
1305 if (nbytes
< reqsize
) {
1306 req
->out
.h
.error
= -EIO
;
1307 /* SETXATTR is special, since it may contain too large data */
1308 if (in
->h
.opcode
== FUSE_SETXATTR
)
1309 req
->out
.h
.error
= -E2BIG
;
1310 request_end(fc
, req
);
1313 spin_unlock(&fc
->lock
);
1315 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1317 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1318 (struct fuse_arg
*) in
->args
, 0);
1319 fuse_copy_finish(cs
);
1320 spin_lock(&fc
->lock
);
1323 request_end(fc
, req
);
1327 req
->out
.h
.error
= -EIO
;
1328 request_end(fc
, req
);
1332 request_end(fc
, req
);
1334 req
->state
= FUSE_REQ_SENT
;
1335 list_move_tail(&req
->list
, &fc
->processing
);
1336 if (req
->interrupted
)
1337 queue_interrupt(fc
, req
);
1338 spin_unlock(&fc
->lock
);
1343 spin_unlock(&fc
->lock
);
1347 static int fuse_dev_open(struct inode
*inode
, struct file
*file
)
1350 * The fuse device's file's private_data is used to hold
1351 * the fuse_conn(ection) when it is mounted, and is used to
1352 * keep track of whether the file has been mounted already.
1354 file
->private_data
= NULL
;
1358 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, struct iov_iter
*to
)
1360 struct fuse_copy_state cs
;
1361 struct file
*file
= iocb
->ki_filp
;
1362 struct fuse_conn
*fc
= fuse_get_conn(file
);
1366 if (!iter_is_iovec(to
))
1369 fuse_copy_init(&cs
, fc
, 1, to
);
1371 return fuse_dev_do_read(fc
, file
, &cs
, iov_iter_count(to
));
1374 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1375 struct pipe_inode_info
*pipe
,
1376 size_t len
, unsigned int flags
)
1381 struct pipe_buffer
*bufs
;
1382 struct fuse_copy_state cs
;
1383 struct fuse_conn
*fc
= fuse_get_conn(in
);
1387 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1391 fuse_copy_init(&cs
, fc
, 1, NULL
);
1394 ret
= fuse_dev_do_read(fc
, in
, &cs
, len
);
1401 if (!pipe
->readers
) {
1402 send_sig(SIGPIPE
, current
, 0);
1408 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1413 while (page_nr
< cs
.nr_segs
) {
1414 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
1415 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
1417 buf
->page
= bufs
[page_nr
].page
;
1418 buf
->offset
= bufs
[page_nr
].offset
;
1419 buf
->len
= bufs
[page_nr
].len
;
1421 * Need to be careful about this. Having buf->ops in module
1422 * code can Oops if the buffer persists after module unload.
1424 buf
->ops
= &nosteal_pipe_buf_ops
;
1439 if (waitqueue_active(&pipe
->wait
))
1440 wake_up_interruptible(&pipe
->wait
);
1441 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
1445 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1446 page_cache_release(bufs
[page_nr
].page
);
1452 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1453 struct fuse_copy_state
*cs
)
1455 struct fuse_notify_poll_wakeup_out outarg
;
1458 if (size
!= sizeof(outarg
))
1461 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1465 fuse_copy_finish(cs
);
1466 return fuse_notify_poll_wakeup(fc
, &outarg
);
1469 fuse_copy_finish(cs
);
1473 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1474 struct fuse_copy_state
*cs
)
1476 struct fuse_notify_inval_inode_out outarg
;
1479 if (size
!= sizeof(outarg
))
1482 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1485 fuse_copy_finish(cs
);
1487 down_read(&fc
->killsb
);
1490 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1491 outarg
.off
, outarg
.len
);
1493 up_read(&fc
->killsb
);
1497 fuse_copy_finish(cs
);
1501 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1502 struct fuse_copy_state
*cs
)
1504 struct fuse_notify_inval_entry_out outarg
;
1509 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1514 if (size
< sizeof(outarg
))
1517 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1521 err
= -ENAMETOOLONG
;
1522 if (outarg
.namelen
> FUSE_NAME_MAX
)
1526 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1530 name
.len
= outarg
.namelen
;
1531 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1534 fuse_copy_finish(cs
);
1535 buf
[outarg
.namelen
] = 0;
1536 name
.hash
= full_name_hash(name
.name
, name
.len
);
1538 down_read(&fc
->killsb
);
1541 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1542 up_read(&fc
->killsb
);
1548 fuse_copy_finish(cs
);
1552 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1553 struct fuse_copy_state
*cs
)
1555 struct fuse_notify_delete_out outarg
;
1560 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1565 if (size
< sizeof(outarg
))
1568 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1572 err
= -ENAMETOOLONG
;
1573 if (outarg
.namelen
> FUSE_NAME_MAX
)
1577 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1581 name
.len
= outarg
.namelen
;
1582 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1585 fuse_copy_finish(cs
);
1586 buf
[outarg
.namelen
] = 0;
1587 name
.hash
= full_name_hash(name
.name
, name
.len
);
1589 down_read(&fc
->killsb
);
1592 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1593 outarg
.child
, &name
);
1594 up_read(&fc
->killsb
);
1600 fuse_copy_finish(cs
);
1604 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1605 struct fuse_copy_state
*cs
)
1607 struct fuse_notify_store_out outarg
;
1608 struct inode
*inode
;
1609 struct address_space
*mapping
;
1613 unsigned int offset
;
1619 if (size
< sizeof(outarg
))
1622 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1627 if (size
- sizeof(outarg
) != outarg
.size
)
1630 nodeid
= outarg
.nodeid
;
1632 down_read(&fc
->killsb
);
1638 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1642 mapping
= inode
->i_mapping
;
1643 index
= outarg
.offset
>> PAGE_CACHE_SHIFT
;
1644 offset
= outarg
.offset
& ~PAGE_CACHE_MASK
;
1645 file_size
= i_size_read(inode
);
1646 end
= outarg
.offset
+ outarg
.size
;
1647 if (end
> file_size
) {
1649 fuse_write_update_size(inode
, file_size
);
1655 unsigned int this_num
;
1658 page
= find_or_create_page(mapping
, index
,
1659 mapping_gfp_mask(mapping
));
1663 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1664 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1665 if (!err
&& offset
== 0 &&
1666 (this_num
== PAGE_CACHE_SIZE
|| file_size
== end
))
1667 SetPageUptodate(page
);
1669 page_cache_release(page
);
1684 up_read(&fc
->killsb
);
1686 fuse_copy_finish(cs
);
1690 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1692 release_pages(req
->pages
, req
->num_pages
, false);
1695 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1696 struct fuse_notify_retrieve_out
*outarg
)
1699 struct address_space
*mapping
= inode
->i_mapping
;
1700 struct fuse_req
*req
;
1704 unsigned int offset
;
1705 size_t total_len
= 0;
1708 offset
= outarg
->offset
& ~PAGE_CACHE_MASK
;
1709 file_size
= i_size_read(inode
);
1712 if (outarg
->offset
> file_size
)
1714 else if (outarg
->offset
+ num
> file_size
)
1715 num
= file_size
- outarg
->offset
;
1717 num_pages
= (num
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1718 num_pages
= min(num_pages
, FUSE_MAX_PAGES_PER_REQ
);
1720 req
= fuse_get_req(fc
, num_pages
);
1722 return PTR_ERR(req
);
1724 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1725 req
->in
.h
.nodeid
= outarg
->nodeid
;
1726 req
->in
.numargs
= 2;
1727 req
->in
.argpages
= 1;
1728 req
->page_descs
[0].offset
= offset
;
1729 req
->end
= fuse_retrieve_end
;
1731 index
= outarg
->offset
>> PAGE_CACHE_SHIFT
;
1733 while (num
&& req
->num_pages
< num_pages
) {
1735 unsigned int this_num
;
1737 page
= find_get_page(mapping
, index
);
1741 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1742 req
->pages
[req
->num_pages
] = page
;
1743 req
->page_descs
[req
->num_pages
].length
= this_num
;
1748 total_len
+= this_num
;
1751 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1752 req
->misc
.retrieve_in
.size
= total_len
;
1753 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1754 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1755 req
->in
.args
[1].size
= total_len
;
1757 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1759 fuse_retrieve_end(fc
, req
);
1764 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1765 struct fuse_copy_state
*cs
)
1767 struct fuse_notify_retrieve_out outarg
;
1768 struct inode
*inode
;
1772 if (size
!= sizeof(outarg
))
1775 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1779 fuse_copy_finish(cs
);
1781 down_read(&fc
->killsb
);
1784 u64 nodeid
= outarg
.nodeid
;
1786 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1788 err
= fuse_retrieve(fc
, inode
, &outarg
);
1792 up_read(&fc
->killsb
);
1797 fuse_copy_finish(cs
);
1801 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1802 unsigned int size
, struct fuse_copy_state
*cs
)
1804 /* Don't try to move pages (yet) */
1808 case FUSE_NOTIFY_POLL
:
1809 return fuse_notify_poll(fc
, size
, cs
);
1811 case FUSE_NOTIFY_INVAL_INODE
:
1812 return fuse_notify_inval_inode(fc
, size
, cs
);
1814 case FUSE_NOTIFY_INVAL_ENTRY
:
1815 return fuse_notify_inval_entry(fc
, size
, cs
);
1817 case FUSE_NOTIFY_STORE
:
1818 return fuse_notify_store(fc
, size
, cs
);
1820 case FUSE_NOTIFY_RETRIEVE
:
1821 return fuse_notify_retrieve(fc
, size
, cs
);
1823 case FUSE_NOTIFY_DELETE
:
1824 return fuse_notify_delete(fc
, size
, cs
);
1827 fuse_copy_finish(cs
);
1832 /* Look up request on processing list by unique ID */
1833 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
1835 struct fuse_req
*req
;
1837 list_for_each_entry(req
, &fc
->processing
, list
) {
1838 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1844 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1847 unsigned reqsize
= sizeof(struct fuse_out_header
);
1850 return nbytes
!= reqsize
? -EINVAL
: 0;
1852 reqsize
+= len_args(out
->numargs
, out
->args
);
1854 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1856 else if (reqsize
> nbytes
) {
1857 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1858 unsigned diffsize
= reqsize
- nbytes
;
1859 if (diffsize
> lastarg
->size
)
1861 lastarg
->size
-= diffsize
;
1863 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1868 * Write a single reply to a request. First the header is copied from
1869 * the write buffer. The request is then searched on the processing
1870 * list by the unique ID found in the header. If found, then remove
1871 * it from the list and copy the rest of the buffer to the request.
1872 * The request is finished by calling request_end()
1874 static ssize_t
fuse_dev_do_write(struct fuse_conn
*fc
,
1875 struct fuse_copy_state
*cs
, size_t nbytes
)
1878 struct fuse_req
*req
;
1879 struct fuse_out_header oh
;
1881 if (nbytes
< sizeof(struct fuse_out_header
))
1884 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1889 if (oh
.len
!= nbytes
)
1893 * Zero oh.unique indicates unsolicited notification message
1894 * and error contains notification code.
1897 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1898 return err
? err
: nbytes
;
1902 if (oh
.error
<= -1000 || oh
.error
> 0)
1905 spin_lock(&fc
->lock
);
1910 req
= request_find(fc
, oh
.unique
);
1915 spin_unlock(&fc
->lock
);
1916 fuse_copy_finish(cs
);
1917 spin_lock(&fc
->lock
);
1918 request_end(fc
, req
);
1921 /* Is it an interrupt reply? */
1922 if (req
->intr_unique
== oh
.unique
) {
1924 if (nbytes
!= sizeof(struct fuse_out_header
))
1927 if (oh
.error
== -ENOSYS
)
1928 fc
->no_interrupt
= 1;
1929 else if (oh
.error
== -EAGAIN
)
1930 queue_interrupt(fc
, req
);
1932 spin_unlock(&fc
->lock
);
1933 fuse_copy_finish(cs
);
1937 req
->state
= FUSE_REQ_WRITING
;
1938 list_move(&req
->list
, &fc
->io
);
1942 if (!req
->out
.page_replace
)
1944 spin_unlock(&fc
->lock
);
1946 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1947 fuse_copy_finish(cs
);
1949 spin_lock(&fc
->lock
);
1954 } else if (!req
->aborted
)
1955 req
->out
.h
.error
= -EIO
;
1956 request_end(fc
, req
);
1958 return err
? err
: nbytes
;
1961 spin_unlock(&fc
->lock
);
1963 fuse_copy_finish(cs
);
1967 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1969 struct fuse_copy_state cs
;
1970 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
1974 if (!iter_is_iovec(from
))
1977 fuse_copy_init(&cs
, fc
, 0, from
);
1979 return fuse_dev_do_write(fc
, &cs
, iov_iter_count(from
));
1982 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
1983 struct file
*out
, loff_t
*ppos
,
1984 size_t len
, unsigned int flags
)
1988 struct pipe_buffer
*bufs
;
1989 struct fuse_copy_state cs
;
1990 struct fuse_conn
*fc
;
1994 fc
= fuse_get_conn(out
);
1998 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
2005 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
2006 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
2016 struct pipe_buffer
*ibuf
;
2017 struct pipe_buffer
*obuf
;
2019 BUG_ON(nbuf
>= pipe
->buffers
);
2020 BUG_ON(!pipe
->nrbufs
);
2021 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
2024 if (rem
>= ibuf
->len
) {
2027 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
2030 ibuf
->ops
->get(pipe
, ibuf
);
2032 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
2034 ibuf
->offset
+= obuf
->len
;
2035 ibuf
->len
-= obuf
->len
;
2042 fuse_copy_init(&cs
, fc
, 0, NULL
);
2047 if (flags
& SPLICE_F_MOVE
)
2050 ret
= fuse_dev_do_write(fc
, &cs
, len
);
2052 for (idx
= 0; idx
< nbuf
; idx
++) {
2053 struct pipe_buffer
*buf
= &bufs
[idx
];
2054 buf
->ops
->release(pipe
, buf
);
2061 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
2063 unsigned mask
= POLLOUT
| POLLWRNORM
;
2064 struct fuse_conn
*fc
= fuse_get_conn(file
);
2068 poll_wait(file
, &fc
->waitq
, wait
);
2070 spin_lock(&fc
->lock
);
2073 else if (request_pending(fc
))
2074 mask
|= POLLIN
| POLLRDNORM
;
2075 spin_unlock(&fc
->lock
);
2081 * Abort all requests on the given list (pending or processing)
2083 * This function releases and reacquires fc->lock
2085 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
2086 __releases(fc
->lock
)
2087 __acquires(fc
->lock
)
2089 while (!list_empty(head
)) {
2090 struct fuse_req
*req
;
2091 req
= list_entry(head
->next
, struct fuse_req
, list
);
2092 req
->out
.h
.error
= -ECONNABORTED
;
2093 request_end(fc
, req
);
2094 spin_lock(&fc
->lock
);
2099 * Abort requests under I/O
2101 * The requests are set to aborted and finished, and the request
2102 * waiter is woken up. This will make request_wait_answer() wait
2103 * until the request is unlocked and then return.
2105 * If the request is asynchronous, then the end function needs to be
2106 * called after waiting for the request to be unlocked (if it was
2109 static void end_io_requests(struct fuse_conn
*fc
)
2110 __releases(fc
->lock
)
2111 __acquires(fc
->lock
)
2113 while (!list_empty(&fc
->io
)) {
2114 struct fuse_req
*req
=
2115 list_entry(fc
->io
.next
, struct fuse_req
, list
);
2116 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
2119 req
->out
.h
.error
= -ECONNABORTED
;
2120 req
->state
= FUSE_REQ_FINISHED
;
2121 list_del_init(&req
->list
);
2122 wake_up(&req
->waitq
);
2125 __fuse_get_request(req
);
2126 spin_unlock(&fc
->lock
);
2127 wait_event(req
->waitq
, !req
->locked
);
2129 fuse_put_request(fc
, req
);
2130 spin_lock(&fc
->lock
);
2135 static void end_queued_requests(struct fuse_conn
*fc
)
2136 __releases(fc
->lock
)
2137 __acquires(fc
->lock
)
2139 fc
->max_background
= UINT_MAX
;
2141 end_requests(fc
, &fc
->pending
);
2142 end_requests(fc
, &fc
->processing
);
2143 while (forget_pending(fc
))
2144 kfree(dequeue_forget(fc
, 1, NULL
));
2147 static void end_polls(struct fuse_conn
*fc
)
2151 p
= rb_first(&fc
->polled_files
);
2154 struct fuse_file
*ff
;
2155 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
2156 wake_up_interruptible_all(&ff
->poll_wait
);
2163 * Abort all requests.
2165 * Emergency exit in case of a malicious or accidental deadlock, or
2166 * just a hung filesystem.
2168 * The same effect is usually achievable through killing the
2169 * filesystem daemon and all users of the filesystem. The exception
2170 * is the combination of an asynchronous request and the tricky
2171 * deadlock (see Documentation/filesystems/fuse.txt).
2173 * During the aborting, progression of requests from the pending and
2174 * processing lists onto the io list, and progression of new requests
2175 * onto the pending list is prevented by req->connected being false.
2177 * Progression of requests under I/O to the processing list is
2178 * prevented by the req->aborted flag being true for these requests.
2179 * For this reason requests on the io list must be aborted first.
2181 void fuse_abort_conn(struct fuse_conn
*fc
)
2183 spin_lock(&fc
->lock
);
2184 if (fc
->connected
) {
2187 fuse_set_initialized(fc
);
2188 end_io_requests(fc
);
2189 end_queued_requests(fc
);
2191 wake_up_all(&fc
->waitq
);
2192 wake_up_all(&fc
->blocked_waitq
);
2193 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
2195 spin_unlock(&fc
->lock
);
2197 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2199 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2201 struct fuse_conn
*fc
= fuse_get_conn(file
);
2203 spin_lock(&fc
->lock
);
2206 fuse_set_initialized(fc
);
2207 end_queued_requests(fc
);
2209 wake_up_all(&fc
->blocked_waitq
);
2210 spin_unlock(&fc
->lock
);
2216 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2218 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2220 struct fuse_conn
*fc
= fuse_get_conn(file
);
2224 /* No locking - fasync_helper does its own locking */
2225 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
2228 const struct file_operations fuse_dev_operations
= {
2229 .owner
= THIS_MODULE
,
2230 .open
= fuse_dev_open
,
2231 .llseek
= no_llseek
,
2232 .read_iter
= fuse_dev_read
,
2233 .splice_read
= fuse_dev_splice_read
,
2234 .write_iter
= fuse_dev_write
,
2235 .splice_write
= fuse_dev_splice_write
,
2236 .poll
= fuse_dev_poll
,
2237 .release
= fuse_dev_release
,
2238 .fasync
= fuse_dev_fasync
,
2240 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2242 static struct miscdevice fuse_miscdevice
= {
2243 .minor
= FUSE_MINOR
,
2245 .fops
= &fuse_dev_operations
,
2248 int __init
fuse_dev_init(void)
2251 fuse_req_cachep
= kmem_cache_create("fuse_request",
2252 sizeof(struct fuse_req
),
2254 if (!fuse_req_cachep
)
2257 err
= misc_register(&fuse_miscdevice
);
2259 goto out_cache_clean
;
2264 kmem_cache_destroy(fuse_req_cachep
);
2269 void fuse_dev_cleanup(void)
2271 misc_deregister(&fuse_miscdevice
);
2272 kmem_cache_destroy(fuse_req_cachep
);