2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/compat.h>
18 static const struct file_operations fuse_direct_io_file_operations
;
20 static int fuse_send_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
21 int opcode
, struct fuse_open_out
*outargp
)
23 struct fuse_open_in inarg
;
27 req
= fuse_get_req(fc
);
31 memset(&inarg
, 0, sizeof(inarg
));
32 inarg
.flags
= file
->f_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
33 if (!fc
->atomic_o_trunc
)
34 inarg
.flags
&= ~O_TRUNC
;
35 req
->in
.h
.opcode
= opcode
;
36 req
->in
.h
.nodeid
= nodeid
;
38 req
->in
.args
[0].size
= sizeof(inarg
);
39 req
->in
.args
[0].value
= &inarg
;
41 req
->out
.args
[0].size
= sizeof(*outargp
);
42 req
->out
.args
[0].value
= outargp
;
43 fuse_request_send(fc
, req
);
44 err
= req
->out
.h
.error
;
45 fuse_put_request(fc
, req
);
50 struct fuse_file
*fuse_file_alloc(struct fuse_conn
*fc
)
54 ff
= kmalloc(sizeof(struct fuse_file
), GFP_KERNEL
);
59 ff
->reserved_req
= fuse_request_alloc();
60 if (unlikely(!ff
->reserved_req
)) {
65 INIT_LIST_HEAD(&ff
->write_entry
);
66 atomic_set(&ff
->count
, 0);
67 RB_CLEAR_NODE(&ff
->polled_node
);
68 init_waitqueue_head(&ff
->poll_wait
);
72 spin_unlock(&fc
->lock
);
77 void fuse_file_free(struct fuse_file
*ff
)
79 fuse_request_free(ff
->reserved_req
);
83 struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
85 atomic_inc(&ff
->count
);
89 static void fuse_release_async(struct work_struct
*work
)
95 req
= container_of(work
, struct fuse_req
, misc
.release
.work
);
96 path
= req
->misc
.release
.path
;
97 fc
= get_fuse_conn(path
.dentry
->d_inode
);
99 fuse_put_request(fc
, req
);
103 static void fuse_release_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
105 if (fc
->destroy_req
) {
107 * If this is a fuseblk mount, then it's possible that
108 * releasing the path will result in releasing the
109 * super block and sending the DESTROY request. If
110 * the server is single threaded, this would hang.
111 * For this reason do the path_put() in a separate
114 atomic_inc(&req
->count
);
115 INIT_WORK(&req
->misc
.release
.work
, fuse_release_async
);
116 schedule_work(&req
->misc
.release
.work
);
118 path_put(&req
->misc
.release
.path
);
122 static void fuse_file_put(struct fuse_file
*ff
, bool sync
)
124 if (atomic_dec_and_test(&ff
->count
)) {
125 struct fuse_req
*req
= ff
->reserved_req
;
128 fuse_request_send(ff
->fc
, req
);
129 path_put(&req
->misc
.release
.path
);
130 fuse_put_request(ff
->fc
, req
);
132 req
->end
= fuse_release_end
;
133 fuse_request_send_background(ff
->fc
, req
);
139 int fuse_do_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
142 struct fuse_open_out outarg
;
143 struct fuse_file
*ff
;
145 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
147 ff
= fuse_file_alloc(fc
);
151 err
= fuse_send_open(fc
, nodeid
, file
, opcode
, &outarg
);
158 outarg
.open_flags
&= ~FOPEN_DIRECT_IO
;
162 ff
->open_flags
= outarg
.open_flags
;
163 file
->private_data
= fuse_file_get(ff
);
167 EXPORT_SYMBOL_GPL(fuse_do_open
);
169 void fuse_finish_open(struct inode
*inode
, struct file
*file
)
171 struct fuse_file
*ff
= file
->private_data
;
172 struct fuse_conn
*fc
= get_fuse_conn(inode
);
174 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
175 file
->f_op
= &fuse_direct_io_file_operations
;
176 if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
177 invalidate_inode_pages2(inode
->i_mapping
);
178 if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
179 nonseekable_open(inode
, file
);
180 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
)) {
181 struct fuse_inode
*fi
= get_fuse_inode(inode
);
183 spin_lock(&fc
->lock
);
184 fi
->attr_version
= ++fc
->attr_version
;
185 i_size_write(inode
, 0);
186 spin_unlock(&fc
->lock
);
187 fuse_invalidate_attr(inode
);
191 int fuse_open_common(struct inode
*inode
, struct file
*file
, bool isdir
)
193 struct fuse_conn
*fc
= get_fuse_conn(inode
);
196 /* VFS checks this, but only _after_ ->open() */
197 if (file
->f_flags
& O_DIRECT
)
200 err
= generic_file_open(inode
, file
);
204 err
= fuse_do_open(fc
, get_node_id(inode
), file
, isdir
);
208 fuse_finish_open(inode
, file
);
213 static void fuse_prepare_release(struct fuse_file
*ff
, int flags
, int opcode
)
215 struct fuse_conn
*fc
= ff
->fc
;
216 struct fuse_req
*req
= ff
->reserved_req
;
217 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
219 spin_lock(&fc
->lock
);
220 list_del(&ff
->write_entry
);
221 if (!RB_EMPTY_NODE(&ff
->polled_node
))
222 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
223 spin_unlock(&fc
->lock
);
225 wake_up_interruptible_all(&ff
->poll_wait
);
228 inarg
->flags
= flags
;
229 req
->in
.h
.opcode
= opcode
;
230 req
->in
.h
.nodeid
= ff
->nodeid
;
232 req
->in
.args
[0].size
= sizeof(struct fuse_release_in
);
233 req
->in
.args
[0].value
= inarg
;
236 void fuse_release_common(struct file
*file
, int opcode
)
238 struct fuse_file
*ff
;
239 struct fuse_req
*req
;
241 ff
= file
->private_data
;
245 req
= ff
->reserved_req
;
246 fuse_prepare_release(ff
, file
->f_flags
, opcode
);
249 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
250 inarg
->release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
251 inarg
->lock_owner
= fuse_lock_owner_id(ff
->fc
,
254 /* Hold vfsmount and dentry until release is finished */
255 path_get(&file
->f_path
);
256 req
->misc
.release
.path
= file
->f_path
;
259 * Normally this will send the RELEASE request, however if
260 * some asynchronous READ or WRITE requests are outstanding,
261 * the sending will be delayed.
263 * Make the release synchronous if this is a fuseblk mount,
264 * synchronous RELEASE is allowed (and desirable) in this case
265 * because the server can be trusted not to screw up.
267 fuse_file_put(ff
, ff
->fc
->destroy_req
!= NULL
);
270 static int fuse_open(struct inode
*inode
, struct file
*file
)
272 return fuse_open_common(inode
, file
, false);
275 static int fuse_release(struct inode
*inode
, struct file
*file
)
277 fuse_release_common(file
, FUSE_RELEASE
);
279 /* return value is ignored by VFS */
283 void fuse_sync_release(struct fuse_file
*ff
, int flags
)
285 WARN_ON(atomic_read(&ff
->count
) > 1);
286 fuse_prepare_release(ff
, flags
, FUSE_RELEASE
);
287 ff
->reserved_req
->force
= 1;
288 fuse_request_send(ff
->fc
, ff
->reserved_req
);
289 fuse_put_request(ff
->fc
, ff
->reserved_req
);
292 EXPORT_SYMBOL_GPL(fuse_sync_release
);
295 * Scramble the ID space with XTEA, so that the value of the files_struct
296 * pointer is not exposed to userspace.
298 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
300 u32
*k
= fc
->scramble_key
;
301 u64 v
= (unsigned long) id
;
307 for (i
= 0; i
< 32; i
++) {
308 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
310 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
313 return (u64
) v0
+ ((u64
) v1
<< 32);
317 * Check if page is under writeback
319 * This is currently done by walking the list of writepage requests
320 * for the inode, which can be pretty inefficient.
322 static bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
324 struct fuse_conn
*fc
= get_fuse_conn(inode
);
325 struct fuse_inode
*fi
= get_fuse_inode(inode
);
326 struct fuse_req
*req
;
329 spin_lock(&fc
->lock
);
330 list_for_each_entry(req
, &fi
->writepages
, writepages_entry
) {
333 BUG_ON(req
->inode
!= inode
);
334 curr_index
= req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
335 if (curr_index
== index
) {
340 spin_unlock(&fc
->lock
);
346 * Wait for page writeback to be completed.
348 * Since fuse doesn't rely on the VM writeback tracking, this has to
349 * use some other means.
351 static int fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
353 struct fuse_inode
*fi
= get_fuse_inode(inode
);
355 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
359 static int fuse_flush(struct file
*file
, fl_owner_t id
)
361 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
362 struct fuse_conn
*fc
= get_fuse_conn(inode
);
363 struct fuse_file
*ff
= file
->private_data
;
364 struct fuse_req
*req
;
365 struct fuse_flush_in inarg
;
368 if (is_bad_inode(inode
))
374 req
= fuse_get_req_nofail(fc
, file
);
375 memset(&inarg
, 0, sizeof(inarg
));
377 inarg
.lock_owner
= fuse_lock_owner_id(fc
, id
);
378 req
->in
.h
.opcode
= FUSE_FLUSH
;
379 req
->in
.h
.nodeid
= get_node_id(inode
);
381 req
->in
.args
[0].size
= sizeof(inarg
);
382 req
->in
.args
[0].value
= &inarg
;
384 fuse_request_send(fc
, req
);
385 err
= req
->out
.h
.error
;
386 fuse_put_request(fc
, req
);
387 if (err
== -ENOSYS
) {
395 * Wait for all pending writepages on the inode to finish.
397 * This is currently done by blocking further writes with FUSE_NOWRITE
398 * and waiting for all sent writes to complete.
400 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
401 * could conflict with truncation.
403 static void fuse_sync_writes(struct inode
*inode
)
405 fuse_set_nowrite(inode
);
406 fuse_release_nowrite(inode
);
409 int fuse_fsync_common(struct file
*file
, int datasync
, int isdir
)
411 struct inode
*inode
= file
->f_mapping
->host
;
412 struct fuse_conn
*fc
= get_fuse_conn(inode
);
413 struct fuse_file
*ff
= file
->private_data
;
414 struct fuse_req
*req
;
415 struct fuse_fsync_in inarg
;
418 if (is_bad_inode(inode
))
421 if ((!isdir
&& fc
->no_fsync
) || (isdir
&& fc
->no_fsyncdir
))
425 * Start writeback against all dirty pages of the inode, then
426 * wait for all outstanding writes, before sending the FSYNC
429 err
= write_inode_now(inode
, 0);
433 fuse_sync_writes(inode
);
435 req
= fuse_get_req(fc
);
439 memset(&inarg
, 0, sizeof(inarg
));
441 inarg
.fsync_flags
= datasync
? 1 : 0;
442 req
->in
.h
.opcode
= isdir
? FUSE_FSYNCDIR
: FUSE_FSYNC
;
443 req
->in
.h
.nodeid
= get_node_id(inode
);
445 req
->in
.args
[0].size
= sizeof(inarg
);
446 req
->in
.args
[0].value
= &inarg
;
447 fuse_request_send(fc
, req
);
448 err
= req
->out
.h
.error
;
449 fuse_put_request(fc
, req
);
450 if (err
== -ENOSYS
) {
460 static int fuse_fsync(struct file
*file
, int datasync
)
462 return fuse_fsync_common(file
, datasync
, 0);
465 void fuse_read_fill(struct fuse_req
*req
, struct file
*file
, loff_t pos
,
466 size_t count
, int opcode
)
468 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
469 struct fuse_file
*ff
= file
->private_data
;
474 inarg
->flags
= file
->f_flags
;
475 req
->in
.h
.opcode
= opcode
;
476 req
->in
.h
.nodeid
= ff
->nodeid
;
478 req
->in
.args
[0].size
= sizeof(struct fuse_read_in
);
479 req
->in
.args
[0].value
= inarg
;
481 req
->out
.numargs
= 1;
482 req
->out
.args
[0].size
= count
;
485 static size_t fuse_send_read(struct fuse_req
*req
, struct file
*file
,
486 loff_t pos
, size_t count
, fl_owner_t owner
)
488 struct fuse_file
*ff
= file
->private_data
;
489 struct fuse_conn
*fc
= ff
->fc
;
491 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
493 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
495 inarg
->read_flags
|= FUSE_READ_LOCKOWNER
;
496 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
498 fuse_request_send(fc
, req
);
499 return req
->out
.args
[0].size
;
502 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
505 struct fuse_conn
*fc
= get_fuse_conn(inode
);
506 struct fuse_inode
*fi
= get_fuse_inode(inode
);
508 spin_lock(&fc
->lock
);
509 if (attr_ver
== fi
->attr_version
&& size
< inode
->i_size
) {
510 fi
->attr_version
= ++fc
->attr_version
;
511 i_size_write(inode
, size
);
513 spin_unlock(&fc
->lock
);
516 static int fuse_readpage(struct file
*file
, struct page
*page
)
518 struct inode
*inode
= page
->mapping
->host
;
519 struct fuse_conn
*fc
= get_fuse_conn(inode
);
520 struct fuse_req
*req
;
522 loff_t pos
= page_offset(page
);
523 size_t count
= PAGE_CACHE_SIZE
;
528 if (is_bad_inode(inode
))
532 * Page writeback can extend beyond the lifetime of the
533 * page-cache page, so make sure we read a properly synced
536 fuse_wait_on_page_writeback(inode
, page
->index
);
538 req
= fuse_get_req(fc
);
543 attr_ver
= fuse_get_attr_version(fc
);
545 req
->out
.page_zeroing
= 1;
546 req
->out
.argpages
= 1;
548 req
->pages
[0] = page
;
549 num_read
= fuse_send_read(req
, file
, pos
, count
, NULL
);
550 err
= req
->out
.h
.error
;
551 fuse_put_request(fc
, req
);
555 * Short read means EOF. If file size is larger, truncate it
557 if (num_read
< count
)
558 fuse_read_update_size(inode
, pos
+ num_read
, attr_ver
);
560 SetPageUptodate(page
);
563 fuse_invalidate_attr(inode
); /* atime changed */
569 static void fuse_readpages_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
572 size_t count
= req
->misc
.read
.in
.size
;
573 size_t num_read
= req
->out
.args
[0].size
;
574 struct address_space
*mapping
= NULL
;
576 for (i
= 0; mapping
== NULL
&& i
< req
->num_pages
; i
++)
577 mapping
= req
->pages
[i
]->mapping
;
580 struct inode
*inode
= mapping
->host
;
583 * Short read means EOF. If file size is larger, truncate it
585 if (!req
->out
.h
.error
&& num_read
< count
) {
588 pos
= page_offset(req
->pages
[0]) + num_read
;
589 fuse_read_update_size(inode
, pos
,
590 req
->misc
.read
.attr_ver
);
592 fuse_invalidate_attr(inode
); /* atime changed */
595 for (i
= 0; i
< req
->num_pages
; i
++) {
596 struct page
*page
= req
->pages
[i
];
597 if (!req
->out
.h
.error
)
598 SetPageUptodate(page
);
602 page_cache_release(page
);
605 fuse_file_put(req
->ff
, false);
608 static void fuse_send_readpages(struct fuse_req
*req
, struct file
*file
)
610 struct fuse_file
*ff
= file
->private_data
;
611 struct fuse_conn
*fc
= ff
->fc
;
612 loff_t pos
= page_offset(req
->pages
[0]);
613 size_t count
= req
->num_pages
<< PAGE_CACHE_SHIFT
;
615 req
->out
.argpages
= 1;
616 req
->out
.page_zeroing
= 1;
617 req
->out
.page_replace
= 1;
618 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
619 req
->misc
.read
.attr_ver
= fuse_get_attr_version(fc
);
620 if (fc
->async_read
) {
621 req
->ff
= fuse_file_get(ff
);
622 req
->end
= fuse_readpages_end
;
623 fuse_request_send_background(fc
, req
);
625 fuse_request_send(fc
, req
);
626 fuse_readpages_end(fc
, req
);
627 fuse_put_request(fc
, req
);
631 struct fuse_fill_data
{
632 struct fuse_req
*req
;
637 static int fuse_readpages_fill(void *_data
, struct page
*page
)
639 struct fuse_fill_data
*data
= _data
;
640 struct fuse_req
*req
= data
->req
;
641 struct inode
*inode
= data
->inode
;
642 struct fuse_conn
*fc
= get_fuse_conn(inode
);
644 fuse_wait_on_page_writeback(inode
, page
->index
);
646 if (req
->num_pages
&&
647 (req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
648 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_read
||
649 req
->pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
650 fuse_send_readpages(req
, data
->file
);
651 data
->req
= req
= fuse_get_req(fc
);
657 page_cache_get(page
);
658 req
->pages
[req
->num_pages
] = page
;
663 static int fuse_readpages(struct file
*file
, struct address_space
*mapping
,
664 struct list_head
*pages
, unsigned nr_pages
)
666 struct inode
*inode
= mapping
->host
;
667 struct fuse_conn
*fc
= get_fuse_conn(inode
);
668 struct fuse_fill_data data
;
672 if (is_bad_inode(inode
))
677 data
.req
= fuse_get_req(fc
);
678 err
= PTR_ERR(data
.req
);
679 if (IS_ERR(data
.req
))
682 err
= read_cache_pages(mapping
, pages
, fuse_readpages_fill
, &data
);
684 if (data
.req
->num_pages
)
685 fuse_send_readpages(data
.req
, file
);
687 fuse_put_request(fc
, data
.req
);
693 static ssize_t
fuse_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
694 unsigned long nr_segs
, loff_t pos
)
696 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
698 if (pos
+ iov_length(iov
, nr_segs
) > i_size_read(inode
)) {
701 * If trying to read past EOF, make sure the i_size
702 * attribute is up-to-date.
704 err
= fuse_update_attributes(inode
, NULL
, iocb
->ki_filp
, NULL
);
709 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
712 static void fuse_write_fill(struct fuse_req
*req
, struct fuse_file
*ff
,
713 loff_t pos
, size_t count
)
715 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
716 struct fuse_write_out
*outarg
= &req
->misc
.write
.out
;
721 req
->in
.h
.opcode
= FUSE_WRITE
;
722 req
->in
.h
.nodeid
= ff
->nodeid
;
724 if (ff
->fc
->minor
< 9)
725 req
->in
.args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
727 req
->in
.args
[0].size
= sizeof(struct fuse_write_in
);
728 req
->in
.args
[0].value
= inarg
;
729 req
->in
.args
[1].size
= count
;
730 req
->out
.numargs
= 1;
731 req
->out
.args
[0].size
= sizeof(struct fuse_write_out
);
732 req
->out
.args
[0].value
= outarg
;
735 static size_t fuse_send_write(struct fuse_req
*req
, struct file
*file
,
736 loff_t pos
, size_t count
, fl_owner_t owner
)
738 struct fuse_file
*ff
= file
->private_data
;
739 struct fuse_conn
*fc
= ff
->fc
;
740 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
742 fuse_write_fill(req
, ff
, pos
, count
);
743 inarg
->flags
= file
->f_flags
;
745 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
746 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
748 fuse_request_send(fc
, req
);
749 return req
->misc
.write
.out
.size
;
752 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
753 loff_t pos
, unsigned len
, unsigned flags
,
754 struct page
**pagep
, void **fsdata
)
756 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
758 *pagep
= grab_cache_page_write_begin(mapping
, index
, flags
);
764 void fuse_write_update_size(struct inode
*inode
, loff_t pos
)
766 struct fuse_conn
*fc
= get_fuse_conn(inode
);
767 struct fuse_inode
*fi
= get_fuse_inode(inode
);
769 spin_lock(&fc
->lock
);
770 fi
->attr_version
= ++fc
->attr_version
;
771 if (pos
> inode
->i_size
)
772 i_size_write(inode
, pos
);
773 spin_unlock(&fc
->lock
);
776 static int fuse_buffered_write(struct file
*file
, struct inode
*inode
,
777 loff_t pos
, unsigned count
, struct page
*page
)
781 struct fuse_conn
*fc
= get_fuse_conn(inode
);
782 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
783 struct fuse_req
*req
;
785 if (is_bad_inode(inode
))
789 * Make sure writepages on the same page are not mixed up with
792 fuse_wait_on_page_writeback(inode
, page
->index
);
794 req
= fuse_get_req(fc
);
798 req
->in
.argpages
= 1;
800 req
->pages
[0] = page
;
801 req
->page_offset
= offset
;
802 nres
= fuse_send_write(req
, file
, pos
, count
, NULL
);
803 err
= req
->out
.h
.error
;
804 fuse_put_request(fc
, req
);
809 fuse_write_update_size(inode
, pos
);
810 if (count
== PAGE_CACHE_SIZE
)
811 SetPageUptodate(page
);
813 fuse_invalidate_attr(inode
);
814 return err
? err
: nres
;
817 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
818 loff_t pos
, unsigned len
, unsigned copied
,
819 struct page
*page
, void *fsdata
)
821 struct inode
*inode
= mapping
->host
;
825 res
= fuse_buffered_write(file
, inode
, pos
, copied
, page
);
828 page_cache_release(page
);
832 static size_t fuse_send_write_pages(struct fuse_req
*req
, struct file
*file
,
833 struct inode
*inode
, loff_t pos
,
840 for (i
= 0; i
< req
->num_pages
; i
++)
841 fuse_wait_on_page_writeback(inode
, req
->pages
[i
]->index
);
843 res
= fuse_send_write(req
, file
, pos
, count
, NULL
);
845 offset
= req
->page_offset
;
847 for (i
= 0; i
< req
->num_pages
; i
++) {
848 struct page
*page
= req
->pages
[i
];
850 if (!req
->out
.h
.error
&& !offset
&& count
>= PAGE_CACHE_SIZE
)
851 SetPageUptodate(page
);
853 if (count
> PAGE_CACHE_SIZE
- offset
)
854 count
-= PAGE_CACHE_SIZE
- offset
;
860 page_cache_release(page
);
866 static ssize_t
fuse_fill_write_pages(struct fuse_req
*req
,
867 struct address_space
*mapping
,
868 struct iov_iter
*ii
, loff_t pos
)
870 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
871 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
875 req
->in
.argpages
= 1;
876 req
->page_offset
= offset
;
881 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
882 size_t bytes
= min_t(size_t, PAGE_CACHE_SIZE
- offset
,
885 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
889 if (iov_iter_fault_in_readable(ii
, bytes
))
893 page
= grab_cache_page_write_begin(mapping
, index
, 0);
897 if (mapping_writably_mapped(mapping
))
898 flush_dcache_page(page
);
901 tmp
= iov_iter_copy_from_user_atomic(page
, ii
, offset
, bytes
);
903 flush_dcache_page(page
);
907 page_cache_release(page
);
908 bytes
= min(bytes
, iov_iter_single_seg_count(ii
));
913 req
->pages
[req
->num_pages
] = page
;
916 iov_iter_advance(ii
, tmp
);
920 if (offset
== PAGE_CACHE_SIZE
)
925 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
926 req
->num_pages
< FUSE_MAX_PAGES_PER_REQ
&& offset
== 0);
928 return count
> 0 ? count
: err
;
931 static ssize_t
fuse_perform_write(struct file
*file
,
932 struct address_space
*mapping
,
933 struct iov_iter
*ii
, loff_t pos
)
935 struct inode
*inode
= mapping
->host
;
936 struct fuse_conn
*fc
= get_fuse_conn(inode
);
940 if (is_bad_inode(inode
))
944 struct fuse_req
*req
;
947 req
= fuse_get_req(fc
);
953 count
= fuse_fill_write_pages(req
, mapping
, ii
, pos
);
959 num_written
= fuse_send_write_pages(req
, file
, inode
,
961 err
= req
->out
.h
.error
;
966 /* break out of the loop on short write */
967 if (num_written
!= count
)
971 fuse_put_request(fc
, req
);
972 } while (!err
&& iov_iter_count(ii
));
975 fuse_write_update_size(inode
, pos
);
977 fuse_invalidate_attr(inode
);
979 return res
> 0 ? res
: err
;
982 static ssize_t
fuse_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
983 unsigned long nr_segs
, loff_t pos
)
985 struct file
*file
= iocb
->ki_filp
;
986 struct address_space
*mapping
= file
->f_mapping
;
989 struct inode
*inode
= mapping
->host
;
993 WARN_ON(iocb
->ki_pos
!= pos
);
995 err
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_READ
);
999 mutex_lock(&inode
->i_mutex
);
1000 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
1002 /* We can write back this queue in page reclaim */
1003 current
->backing_dev_info
= mapping
->backing_dev_info
;
1005 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1012 err
= file_remove_suid(file
);
1016 file_update_time(file
);
1018 iov_iter_init(&i
, iov
, nr_segs
, count
, 0);
1019 written
= fuse_perform_write(file
, mapping
, &i
, pos
);
1021 iocb
->ki_pos
= pos
+ written
;
1024 current
->backing_dev_info
= NULL
;
1025 mutex_unlock(&inode
->i_mutex
);
1027 return written
? written
: err
;
1030 static void fuse_release_user_pages(struct fuse_req
*req
, int write
)
1034 for (i
= 0; i
< req
->num_pages
; i
++) {
1035 struct page
*page
= req
->pages
[i
];
1037 set_page_dirty_lock(page
);
1042 static int fuse_get_user_pages(struct fuse_req
*req
, const char __user
*buf
,
1043 size_t *nbytesp
, int write
)
1045 size_t nbytes
= *nbytesp
;
1046 unsigned long user_addr
= (unsigned long) buf
;
1047 unsigned offset
= user_addr
& ~PAGE_MASK
;
1050 /* Special case for kernel I/O: can copy directly into the buffer */
1051 if (segment_eq(get_fs(), KERNEL_DS
)) {
1053 req
->in
.args
[1].value
= (void *) user_addr
;
1055 req
->out
.args
[0].value
= (void *) user_addr
;
1060 nbytes
= min_t(size_t, nbytes
, FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
);
1061 npages
= (nbytes
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1062 npages
= clamp(npages
, 1, FUSE_MAX_PAGES_PER_REQ
);
1063 npages
= get_user_pages_fast(user_addr
, npages
, !write
, req
->pages
);
1067 req
->num_pages
= npages
;
1068 req
->page_offset
= offset
;
1071 req
->in
.argpages
= 1;
1073 req
->out
.argpages
= 1;
1075 nbytes
= (req
->num_pages
<< PAGE_SHIFT
) - req
->page_offset
;
1076 *nbytesp
= min(*nbytesp
, nbytes
);
1081 ssize_t
fuse_direct_io(struct file
*file
, const char __user
*buf
,
1082 size_t count
, loff_t
*ppos
, int write
)
1084 struct fuse_file
*ff
= file
->private_data
;
1085 struct fuse_conn
*fc
= ff
->fc
;
1086 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1089 struct fuse_req
*req
;
1091 req
= fuse_get_req(fc
);
1093 return PTR_ERR(req
);
1097 fl_owner_t owner
= current
->files
;
1098 size_t nbytes
= min(count
, nmax
);
1099 int err
= fuse_get_user_pages(req
, buf
, &nbytes
, write
);
1106 nres
= fuse_send_write(req
, file
, pos
, nbytes
, owner
);
1108 nres
= fuse_send_read(req
, file
, pos
, nbytes
, owner
);
1110 fuse_release_user_pages(req
, !write
);
1111 if (req
->out
.h
.error
) {
1113 res
= req
->out
.h
.error
;
1115 } else if (nres
> nbytes
) {
1126 fuse_put_request(fc
, req
);
1127 req
= fuse_get_req(fc
);
1133 fuse_put_request(fc
, req
);
1139 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1141 static ssize_t
fuse_direct_read(struct file
*file
, char __user
*buf
,
1142 size_t count
, loff_t
*ppos
)
1145 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1147 if (is_bad_inode(inode
))
1150 res
= fuse_direct_io(file
, buf
, count
, ppos
, 0);
1152 fuse_invalidate_attr(inode
);
1157 static ssize_t
fuse_direct_write(struct file
*file
, const char __user
*buf
,
1158 size_t count
, loff_t
*ppos
)
1160 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1163 if (is_bad_inode(inode
))
1166 /* Don't allow parallel writes to the same file */
1167 mutex_lock(&inode
->i_mutex
);
1168 res
= generic_write_checks(file
, ppos
, &count
, 0);
1170 res
= fuse_direct_io(file
, buf
, count
, ppos
, 1);
1172 fuse_write_update_size(inode
, *ppos
);
1174 mutex_unlock(&inode
->i_mutex
);
1176 fuse_invalidate_attr(inode
);
1181 static void fuse_writepage_free(struct fuse_conn
*fc
, struct fuse_req
*req
)
1183 __free_page(req
->pages
[0]);
1184 fuse_file_put(req
->ff
, false);
1187 static void fuse_writepage_finish(struct fuse_conn
*fc
, struct fuse_req
*req
)
1189 struct inode
*inode
= req
->inode
;
1190 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1191 struct backing_dev_info
*bdi
= inode
->i_mapping
->backing_dev_info
;
1193 list_del(&req
->writepages_entry
);
1194 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1195 dec_zone_page_state(req
->pages
[0], NR_WRITEBACK_TEMP
);
1196 bdi_writeout_inc(bdi
);
1197 wake_up(&fi
->page_waitq
);
1200 /* Called under fc->lock, may release and reacquire it */
1201 static void fuse_send_writepage(struct fuse_conn
*fc
, struct fuse_req
*req
)
1202 __releases(fc
->lock
)
1203 __acquires(fc
->lock
)
1205 struct fuse_inode
*fi
= get_fuse_inode(req
->inode
);
1206 loff_t size
= i_size_read(req
->inode
);
1207 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1212 if (inarg
->offset
+ PAGE_CACHE_SIZE
<= size
) {
1213 inarg
->size
= PAGE_CACHE_SIZE
;
1214 } else if (inarg
->offset
< size
) {
1215 inarg
->size
= size
& (PAGE_CACHE_SIZE
- 1);
1217 /* Got truncated off completely */
1221 req
->in
.args
[1].size
= inarg
->size
;
1223 fuse_request_send_background_locked(fc
, req
);
1227 fuse_writepage_finish(fc
, req
);
1228 spin_unlock(&fc
->lock
);
1229 fuse_writepage_free(fc
, req
);
1230 fuse_put_request(fc
, req
);
1231 spin_lock(&fc
->lock
);
1235 * If fi->writectr is positive (no truncate or fsync going on) send
1236 * all queued writepage requests.
1238 * Called with fc->lock
1240 void fuse_flush_writepages(struct inode
*inode
)
1241 __releases(fc
->lock
)
1242 __acquires(fc
->lock
)
1244 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1245 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1246 struct fuse_req
*req
;
1248 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1249 req
= list_entry(fi
->queued_writes
.next
, struct fuse_req
, list
);
1250 list_del_init(&req
->list
);
1251 fuse_send_writepage(fc
, req
);
1255 static void fuse_writepage_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1257 struct inode
*inode
= req
->inode
;
1258 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1260 mapping_set_error(inode
->i_mapping
, req
->out
.h
.error
);
1261 spin_lock(&fc
->lock
);
1263 fuse_writepage_finish(fc
, req
);
1264 spin_unlock(&fc
->lock
);
1265 fuse_writepage_free(fc
, req
);
1268 static int fuse_writepage_locked(struct page
*page
)
1270 struct address_space
*mapping
= page
->mapping
;
1271 struct inode
*inode
= mapping
->host
;
1272 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1273 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1274 struct fuse_req
*req
;
1275 struct fuse_file
*ff
;
1276 struct page
*tmp_page
;
1278 set_page_writeback(page
);
1280 req
= fuse_request_alloc_nofs();
1284 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1288 spin_lock(&fc
->lock
);
1289 BUG_ON(list_empty(&fi
->write_files
));
1290 ff
= list_entry(fi
->write_files
.next
, struct fuse_file
, write_entry
);
1291 req
->ff
= fuse_file_get(ff
);
1292 spin_unlock(&fc
->lock
);
1294 fuse_write_fill(req
, ff
, page_offset(page
), 0);
1296 copy_highpage(tmp_page
, page
);
1297 req
->misc
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
1298 req
->in
.argpages
= 1;
1300 req
->pages
[0] = tmp_page
;
1301 req
->page_offset
= 0;
1302 req
->end
= fuse_writepage_end
;
1305 inc_bdi_stat(mapping
->backing_dev_info
, BDI_WRITEBACK
);
1306 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1307 end_page_writeback(page
);
1309 spin_lock(&fc
->lock
);
1310 list_add(&req
->writepages_entry
, &fi
->writepages
);
1311 list_add_tail(&req
->list
, &fi
->queued_writes
);
1312 fuse_flush_writepages(inode
);
1313 spin_unlock(&fc
->lock
);
1318 fuse_request_free(req
);
1320 end_page_writeback(page
);
1324 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
1328 err
= fuse_writepage_locked(page
);
1334 static int fuse_launder_page(struct page
*page
)
1337 if (clear_page_dirty_for_io(page
)) {
1338 struct inode
*inode
= page
->mapping
->host
;
1339 err
= fuse_writepage_locked(page
);
1341 fuse_wait_on_page_writeback(inode
, page
->index
);
1347 * Write back dirty pages now, because there may not be any suitable
1350 static void fuse_vma_close(struct vm_area_struct
*vma
)
1352 filemap_write_and_wait(vma
->vm_file
->f_mapping
);
1356 * Wait for writeback against this page to complete before allowing it
1357 * to be marked dirty again, and hence written back again, possibly
1358 * before the previous writepage completed.
1360 * Block here, instead of in ->writepage(), so that the userspace fs
1361 * can only block processes actually operating on the filesystem.
1363 * Otherwise unprivileged userspace fs would be able to block
1368 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1370 static int fuse_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1372 struct page
*page
= vmf
->page
;
1374 * Don't use page->mapping as it may become NULL from a
1375 * concurrent truncate.
1377 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1379 fuse_wait_on_page_writeback(inode
, page
->index
);
1383 static const struct vm_operations_struct fuse_file_vm_ops
= {
1384 .close
= fuse_vma_close
,
1385 .fault
= filemap_fault
,
1386 .page_mkwrite
= fuse_page_mkwrite
,
1389 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1391 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
)) {
1392 struct inode
*inode
= file
->f_dentry
->d_inode
;
1393 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1394 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1395 struct fuse_file
*ff
= file
->private_data
;
1397 * file may be written through mmap, so chain it onto the
1398 * inodes's write_file list
1400 spin_lock(&fc
->lock
);
1401 if (list_empty(&ff
->write_entry
))
1402 list_add(&ff
->write_entry
, &fi
->write_files
);
1403 spin_unlock(&fc
->lock
);
1405 file_accessed(file
);
1406 vma
->vm_ops
= &fuse_file_vm_ops
;
1410 static int fuse_direct_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1412 /* Can't provide the coherency needed for MAP_SHARED */
1413 if (vma
->vm_flags
& VM_MAYSHARE
)
1416 invalidate_inode_pages2(file
->f_mapping
);
1418 return generic_file_mmap(file
, vma
);
1421 static int convert_fuse_file_lock(const struct fuse_file_lock
*ffl
,
1422 struct file_lock
*fl
)
1424 switch (ffl
->type
) {
1430 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
1431 ffl
->end
< ffl
->start
)
1434 fl
->fl_start
= ffl
->start
;
1435 fl
->fl_end
= ffl
->end
;
1436 fl
->fl_pid
= ffl
->pid
;
1442 fl
->fl_type
= ffl
->type
;
1446 static void fuse_lk_fill(struct fuse_req
*req
, struct file
*file
,
1447 const struct file_lock
*fl
, int opcode
, pid_t pid
,
1450 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1451 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1452 struct fuse_file
*ff
= file
->private_data
;
1453 struct fuse_lk_in
*arg
= &req
->misc
.lk_in
;
1456 arg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
1457 arg
->lk
.start
= fl
->fl_start
;
1458 arg
->lk
.end
= fl
->fl_end
;
1459 arg
->lk
.type
= fl
->fl_type
;
1462 arg
->lk_flags
|= FUSE_LK_FLOCK
;
1463 req
->in
.h
.opcode
= opcode
;
1464 req
->in
.h
.nodeid
= get_node_id(inode
);
1465 req
->in
.numargs
= 1;
1466 req
->in
.args
[0].size
= sizeof(*arg
);
1467 req
->in
.args
[0].value
= arg
;
1470 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
1472 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1473 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1474 struct fuse_req
*req
;
1475 struct fuse_lk_out outarg
;
1478 req
= fuse_get_req(fc
);
1480 return PTR_ERR(req
);
1482 fuse_lk_fill(req
, file
, fl
, FUSE_GETLK
, 0, 0);
1483 req
->out
.numargs
= 1;
1484 req
->out
.args
[0].size
= sizeof(outarg
);
1485 req
->out
.args
[0].value
= &outarg
;
1486 fuse_request_send(fc
, req
);
1487 err
= req
->out
.h
.error
;
1488 fuse_put_request(fc
, req
);
1490 err
= convert_fuse_file_lock(&outarg
.lk
, fl
);
1495 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
1497 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1498 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1499 struct fuse_req
*req
;
1500 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
1501 pid_t pid
= fl
->fl_type
!= F_UNLCK
? current
->tgid
: 0;
1504 if (fl
->fl_lmops
&& fl
->fl_lmops
->fl_grant
) {
1505 /* NLM needs asynchronous locks, which we don't support yet */
1509 /* Unlock on close is handled by the flush method */
1510 if (fl
->fl_flags
& FL_CLOSE
)
1513 req
= fuse_get_req(fc
);
1515 return PTR_ERR(req
);
1517 fuse_lk_fill(req
, file
, fl
, opcode
, pid
, flock
);
1518 fuse_request_send(fc
, req
);
1519 err
= req
->out
.h
.error
;
1520 /* locking is restartable */
1523 fuse_put_request(fc
, req
);
1527 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1529 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1530 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1533 if (cmd
== F_CANCELLK
) {
1535 } else if (cmd
== F_GETLK
) {
1537 posix_test_lock(file
, fl
);
1540 err
= fuse_getlk(file
, fl
);
1543 err
= posix_lock_file(file
, fl
, NULL
);
1545 err
= fuse_setlk(file
, fl
, 0);
1550 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1552 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1553 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1557 err
= flock_lock_file_wait(file
, fl
);
1559 struct fuse_file
*ff
= file
->private_data
;
1561 /* emulate flock with POSIX locks */
1562 fl
->fl_owner
= (fl_owner_t
) file
;
1564 err
= fuse_setlk(file
, fl
, 1);
1570 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
1572 struct inode
*inode
= mapping
->host
;
1573 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1574 struct fuse_req
*req
;
1575 struct fuse_bmap_in inarg
;
1576 struct fuse_bmap_out outarg
;
1579 if (!inode
->i_sb
->s_bdev
|| fc
->no_bmap
)
1582 req
= fuse_get_req(fc
);
1586 memset(&inarg
, 0, sizeof(inarg
));
1587 inarg
.block
= block
;
1588 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
1589 req
->in
.h
.opcode
= FUSE_BMAP
;
1590 req
->in
.h
.nodeid
= get_node_id(inode
);
1591 req
->in
.numargs
= 1;
1592 req
->in
.args
[0].size
= sizeof(inarg
);
1593 req
->in
.args
[0].value
= &inarg
;
1594 req
->out
.numargs
= 1;
1595 req
->out
.args
[0].size
= sizeof(outarg
);
1596 req
->out
.args
[0].value
= &outarg
;
1597 fuse_request_send(fc
, req
);
1598 err
= req
->out
.h
.error
;
1599 fuse_put_request(fc
, req
);
1603 return err
? 0 : outarg
.block
;
1606 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int origin
)
1609 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1611 mutex_lock(&inode
->i_mutex
);
1614 retval
= fuse_update_attributes(inode
, NULL
, file
, NULL
);
1617 offset
+= i_size_read(inode
);
1620 offset
+= file
->f_pos
;
1623 if (offset
>= 0 && offset
<= inode
->i_sb
->s_maxbytes
) {
1624 if (offset
!= file
->f_pos
) {
1625 file
->f_pos
= offset
;
1626 file
->f_version
= 0;
1631 mutex_unlock(&inode
->i_mutex
);
1635 static int fuse_ioctl_copy_user(struct page
**pages
, struct iovec
*iov
,
1636 unsigned int nr_segs
, size_t bytes
, bool to_user
)
1644 iov_iter_init(&ii
, iov
, nr_segs
, bytes
, 0);
1646 while (iov_iter_count(&ii
)) {
1647 struct page
*page
= pages
[page_idx
++];
1648 size_t todo
= min_t(size_t, PAGE_SIZE
, iov_iter_count(&ii
));
1654 char __user
*uaddr
= ii
.iov
->iov_base
+ ii
.iov_offset
;
1655 size_t iov_len
= ii
.iov
->iov_len
- ii
.iov_offset
;
1656 size_t copy
= min(todo
, iov_len
);
1660 left
= copy_from_user(kaddr
, uaddr
, copy
);
1662 left
= copy_to_user(uaddr
, kaddr
, copy
);
1667 iov_iter_advance(&ii
, copy
);
1679 * CUSE servers compiled on 32bit broke on 64bit kernels because the
1680 * ABI was defined to be 'struct iovec' which is different on 32bit
1681 * and 64bit. Fortunately we can determine which structure the server
1682 * used from the size of the reply.
1684 static int fuse_copy_ioctl_iovec_old(struct iovec
*dst
, void *src
,
1685 size_t transferred
, unsigned count
,
1688 #ifdef CONFIG_COMPAT
1689 if (count
* sizeof(struct compat_iovec
) == transferred
) {
1690 struct compat_iovec
*ciov
= src
;
1694 * With this interface a 32bit server cannot support
1695 * non-compat (i.e. ones coming from 64bit apps) ioctl
1701 for (i
= 0; i
< count
; i
++) {
1702 dst
[i
].iov_base
= compat_ptr(ciov
[i
].iov_base
);
1703 dst
[i
].iov_len
= ciov
[i
].iov_len
;
1709 if (count
* sizeof(struct iovec
) != transferred
)
1712 memcpy(dst
, src
, transferred
);
1716 /* Make sure iov_length() won't overflow */
1717 static int fuse_verify_ioctl_iov(struct iovec
*iov
, size_t count
)
1720 u32 max
= FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
;
1722 for (n
= 0; n
< count
; n
++) {
1723 if (iov
->iov_len
> (size_t) max
)
1725 max
-= iov
->iov_len
;
1730 static int fuse_copy_ioctl_iovec(struct fuse_conn
*fc
, struct iovec
*dst
,
1731 void *src
, size_t transferred
, unsigned count
,
1735 struct fuse_ioctl_iovec
*fiov
= src
;
1737 if (fc
->minor
< 16) {
1738 return fuse_copy_ioctl_iovec_old(dst
, src
, transferred
,
1742 if (count
* sizeof(struct fuse_ioctl_iovec
) != transferred
)
1745 for (i
= 0; i
< count
; i
++) {
1746 /* Did the server supply an inappropriate value? */
1747 if (fiov
[i
].base
!= (unsigned long) fiov
[i
].base
||
1748 fiov
[i
].len
!= (unsigned long) fiov
[i
].len
)
1751 dst
[i
].iov_base
= (void __user
*) (unsigned long) fiov
[i
].base
;
1752 dst
[i
].iov_len
= (size_t) fiov
[i
].len
;
1754 #ifdef CONFIG_COMPAT
1756 (ptr_to_compat(dst
[i
].iov_base
) != fiov
[i
].base
||
1757 (compat_size_t
) dst
[i
].iov_len
!= fiov
[i
].len
))
1767 * For ioctls, there is no generic way to determine how much memory
1768 * needs to be read and/or written. Furthermore, ioctls are allowed
1769 * to dereference the passed pointer, so the parameter requires deep
1770 * copying but FUSE has no idea whatsoever about what to copy in or
1773 * This is solved by allowing FUSE server to retry ioctl with
1774 * necessary in/out iovecs. Let's assume the ioctl implementation
1775 * needs to read in the following structure.
1782 * On the first callout to FUSE server, inarg->in_size and
1783 * inarg->out_size will be NULL; then, the server completes the ioctl
1784 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
1785 * the actual iov array to
1787 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
1789 * which tells FUSE to copy in the requested area and retry the ioctl.
1790 * On the second round, the server has access to the structure and
1791 * from that it can tell what to look for next, so on the invocation,
1792 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
1794 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
1795 * { .iov_base = a.buf, .iov_len = a.buflen } }
1797 * FUSE will copy both struct a and the pointed buffer from the
1798 * process doing the ioctl and retry ioctl with both struct a and the
1801 * This time, FUSE server has everything it needs and completes ioctl
1802 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
1804 * Copying data out works the same way.
1806 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
1807 * automatically initializes in and out iovs by decoding @cmd with
1808 * _IOC_* macros and the server is not allowed to request RETRY. This
1809 * limits ioctl data transfers to well-formed ioctls and is the forced
1810 * behavior for all FUSE servers.
1812 long fuse_do_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
,
1815 struct fuse_file
*ff
= file
->private_data
;
1816 struct fuse_conn
*fc
= ff
->fc
;
1817 struct fuse_ioctl_in inarg
= {
1823 struct fuse_ioctl_out outarg
;
1824 struct fuse_req
*req
= NULL
;
1825 struct page
**pages
= NULL
;
1826 struct iovec
*iov_page
= NULL
;
1827 struct iovec
*in_iov
= NULL
, *out_iov
= NULL
;
1828 unsigned int in_iovs
= 0, out_iovs
= 0, num_pages
= 0, max_pages
;
1829 size_t in_size
, out_size
, transferred
;
1832 #if BITS_PER_LONG == 32
1833 inarg
.flags
|= FUSE_IOCTL_32BIT
;
1835 if (flags
& FUSE_IOCTL_COMPAT
)
1836 inarg
.flags
|= FUSE_IOCTL_32BIT
;
1839 /* assume all the iovs returned by client always fits in a page */
1840 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec
) * FUSE_IOCTL_MAX_IOV
> PAGE_SIZE
);
1843 pages
= kzalloc(sizeof(pages
[0]) * FUSE_MAX_PAGES_PER_REQ
, GFP_KERNEL
);
1844 iov_page
= (struct iovec
*) __get_free_page(GFP_KERNEL
);
1845 if (!pages
|| !iov_page
)
1849 * If restricted, initialize IO parameters as encoded in @cmd.
1850 * RETRY from server is not allowed.
1852 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
)) {
1853 struct iovec
*iov
= iov_page
;
1855 iov
->iov_base
= (void __user
*)arg
;
1856 iov
->iov_len
= _IOC_SIZE(cmd
);
1858 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
1863 if (_IOC_DIR(cmd
) & _IOC_READ
) {
1870 inarg
.in_size
= in_size
= iov_length(in_iov
, in_iovs
);
1871 inarg
.out_size
= out_size
= iov_length(out_iov
, out_iovs
);
1874 * Out data can be used either for actual out data or iovs,
1875 * make sure there always is at least one page.
1877 out_size
= max_t(size_t, out_size
, PAGE_SIZE
);
1878 max_pages
= DIV_ROUND_UP(max(in_size
, out_size
), PAGE_SIZE
);
1880 /* make sure there are enough buffer pages and init request with them */
1882 if (max_pages
> FUSE_MAX_PAGES_PER_REQ
)
1884 while (num_pages
< max_pages
) {
1885 pages
[num_pages
] = alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
1886 if (!pages
[num_pages
])
1891 req
= fuse_get_req(fc
);
1897 memcpy(req
->pages
, pages
, sizeof(req
->pages
[0]) * num_pages
);
1898 req
->num_pages
= num_pages
;
1900 /* okay, let's send it to the client */
1901 req
->in
.h
.opcode
= FUSE_IOCTL
;
1902 req
->in
.h
.nodeid
= ff
->nodeid
;
1903 req
->in
.numargs
= 1;
1904 req
->in
.args
[0].size
= sizeof(inarg
);
1905 req
->in
.args
[0].value
= &inarg
;
1908 req
->in
.args
[1].size
= in_size
;
1909 req
->in
.argpages
= 1;
1911 err
= fuse_ioctl_copy_user(pages
, in_iov
, in_iovs
, in_size
,
1917 req
->out
.numargs
= 2;
1918 req
->out
.args
[0].size
= sizeof(outarg
);
1919 req
->out
.args
[0].value
= &outarg
;
1920 req
->out
.args
[1].size
= out_size
;
1921 req
->out
.argpages
= 1;
1922 req
->out
.argvar
= 1;
1924 fuse_request_send(fc
, req
);
1925 err
= req
->out
.h
.error
;
1926 transferred
= req
->out
.args
[1].size
;
1927 fuse_put_request(fc
, req
);
1932 /* did it ask for retry? */
1933 if (outarg
.flags
& FUSE_IOCTL_RETRY
) {
1936 /* no retry if in restricted mode */
1938 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
))
1941 in_iovs
= outarg
.in_iovs
;
1942 out_iovs
= outarg
.out_iovs
;
1945 * Make sure things are in boundary, separate checks
1946 * are to protect against overflow.
1949 if (in_iovs
> FUSE_IOCTL_MAX_IOV
||
1950 out_iovs
> FUSE_IOCTL_MAX_IOV
||
1951 in_iovs
+ out_iovs
> FUSE_IOCTL_MAX_IOV
)
1954 vaddr
= kmap_atomic(pages
[0], KM_USER0
);
1955 err
= fuse_copy_ioctl_iovec(fc
, iov_page
, vaddr
,
1956 transferred
, in_iovs
+ out_iovs
,
1957 (flags
& FUSE_IOCTL_COMPAT
) != 0);
1958 kunmap_atomic(vaddr
, KM_USER0
);
1963 out_iov
= in_iov
+ in_iovs
;
1965 err
= fuse_verify_ioctl_iov(in_iov
, in_iovs
);
1969 err
= fuse_verify_ioctl_iov(out_iov
, out_iovs
);
1977 if (transferred
> inarg
.out_size
)
1980 err
= fuse_ioctl_copy_user(pages
, out_iov
, out_iovs
, transferred
, true);
1983 fuse_put_request(fc
, req
);
1984 free_page((unsigned long) iov_page
);
1986 __free_page(pages
[--num_pages
]);
1989 return err
? err
: outarg
.result
;
1991 EXPORT_SYMBOL_GPL(fuse_do_ioctl
);
1993 static long fuse_file_ioctl_common(struct file
*file
, unsigned int cmd
,
1994 unsigned long arg
, unsigned int flags
)
1996 struct inode
*inode
= file
->f_dentry
->d_inode
;
1997 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1999 if (!fuse_allow_task(fc
, current
))
2002 if (is_bad_inode(inode
))
2005 return fuse_do_ioctl(file
, cmd
, arg
, flags
);
2008 static long fuse_file_ioctl(struct file
*file
, unsigned int cmd
,
2011 return fuse_file_ioctl_common(file
, cmd
, arg
, 0);
2014 static long fuse_file_compat_ioctl(struct file
*file
, unsigned int cmd
,
2017 return fuse_file_ioctl_common(file
, cmd
, arg
, FUSE_IOCTL_COMPAT
);
2021 * All files which have been polled are linked to RB tree
2022 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2023 * find the matching one.
2025 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2026 struct rb_node
**parent_out
)
2028 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2029 struct rb_node
*last
= NULL
;
2032 struct fuse_file
*ff
;
2035 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2038 link
= &last
->rb_left
;
2039 else if (kh
> ff
->kh
)
2040 link
= &last
->rb_right
;
2051 * The file is about to be polled. Make sure it's on the polled_files
2052 * RB tree. Note that files once added to the polled_files tree are
2053 * not removed before the file is released. This is because a file
2054 * polled once is likely to be polled again.
2056 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2057 struct fuse_file
*ff
)
2059 spin_lock(&fc
->lock
);
2060 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2061 struct rb_node
**link
, *parent
;
2063 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2065 rb_link_node(&ff
->polled_node
, parent
, link
);
2066 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2068 spin_unlock(&fc
->lock
);
2071 unsigned fuse_file_poll(struct file
*file
, poll_table
*wait
)
2073 struct fuse_file
*ff
= file
->private_data
;
2074 struct fuse_conn
*fc
= ff
->fc
;
2075 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2076 struct fuse_poll_out outarg
;
2077 struct fuse_req
*req
;
2081 return DEFAULT_POLLMASK
;
2083 poll_wait(file
, &ff
->poll_wait
, wait
);
2086 * Ask for notification iff there's someone waiting for it.
2087 * The client may ignore the flag and always notify.
2089 if (waitqueue_active(&ff
->poll_wait
)) {
2090 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2091 fuse_register_polled_file(fc
, ff
);
2094 req
= fuse_get_req(fc
);
2098 req
->in
.h
.opcode
= FUSE_POLL
;
2099 req
->in
.h
.nodeid
= ff
->nodeid
;
2100 req
->in
.numargs
= 1;
2101 req
->in
.args
[0].size
= sizeof(inarg
);
2102 req
->in
.args
[0].value
= &inarg
;
2103 req
->out
.numargs
= 1;
2104 req
->out
.args
[0].size
= sizeof(outarg
);
2105 req
->out
.args
[0].value
= &outarg
;
2106 fuse_request_send(fc
, req
);
2107 err
= req
->out
.h
.error
;
2108 fuse_put_request(fc
, req
);
2111 return outarg
.revents
;
2112 if (err
== -ENOSYS
) {
2114 return DEFAULT_POLLMASK
;
2118 EXPORT_SYMBOL_GPL(fuse_file_poll
);
2121 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2122 * wakes up the poll waiters.
2124 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
2125 struct fuse_notify_poll_wakeup_out
*outarg
)
2127 u64 kh
= outarg
->kh
;
2128 struct rb_node
**link
;
2130 spin_lock(&fc
->lock
);
2132 link
= fuse_find_polled_node(fc
, kh
, NULL
);
2134 struct fuse_file
*ff
;
2136 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
2137 wake_up_interruptible_sync(&ff
->poll_wait
);
2140 spin_unlock(&fc
->lock
);
2144 static const struct file_operations fuse_file_operations
= {
2145 .llseek
= fuse_file_llseek
,
2146 .read
= do_sync_read
,
2147 .aio_read
= fuse_file_aio_read
,
2148 .write
= do_sync_write
,
2149 .aio_write
= fuse_file_aio_write
,
2150 .mmap
= fuse_file_mmap
,
2152 .flush
= fuse_flush
,
2153 .release
= fuse_release
,
2154 .fsync
= fuse_fsync
,
2155 .lock
= fuse_file_lock
,
2156 .flock
= fuse_file_flock
,
2157 .splice_read
= generic_file_splice_read
,
2158 .unlocked_ioctl
= fuse_file_ioctl
,
2159 .compat_ioctl
= fuse_file_compat_ioctl
,
2160 .poll
= fuse_file_poll
,
2163 static const struct file_operations fuse_direct_io_file_operations
= {
2164 .llseek
= fuse_file_llseek
,
2165 .read
= fuse_direct_read
,
2166 .write
= fuse_direct_write
,
2167 .mmap
= fuse_direct_mmap
,
2169 .flush
= fuse_flush
,
2170 .release
= fuse_release
,
2171 .fsync
= fuse_fsync
,
2172 .lock
= fuse_file_lock
,
2173 .flock
= fuse_file_flock
,
2174 .unlocked_ioctl
= fuse_file_ioctl
,
2175 .compat_ioctl
= fuse_file_compat_ioctl
,
2176 .poll
= fuse_file_poll
,
2177 /* no splice_read */
2180 static const struct address_space_operations fuse_file_aops
= {
2181 .readpage
= fuse_readpage
,
2182 .writepage
= fuse_writepage
,
2183 .launder_page
= fuse_launder_page
,
2184 .write_begin
= fuse_write_begin
,
2185 .write_end
= fuse_write_end
,
2186 .readpages
= fuse_readpages
,
2187 .set_page_dirty
= __set_page_dirty_nobuffers
,
2191 void fuse_init_file_inode(struct inode
*inode
)
2193 inode
->i_fop
= &fuse_file_operations
;
2194 inode
->i_data
.a_ops
= &fuse_file_aops
;