2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/compat.h>
17 #include <linux/swap.h>
19 static const struct file_operations fuse_direct_io_file_operations
;
21 static int fuse_send_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
22 int opcode
, struct fuse_open_out
*outargp
)
24 struct fuse_open_in inarg
;
28 req
= fuse_get_req_nopages(fc
);
32 memset(&inarg
, 0, sizeof(inarg
));
33 inarg
.flags
= file
->f_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
34 if (!fc
->atomic_o_trunc
)
35 inarg
.flags
&= ~O_TRUNC
;
36 req
->in
.h
.opcode
= opcode
;
37 req
->in
.h
.nodeid
= nodeid
;
39 req
->in
.args
[0].size
= sizeof(inarg
);
40 req
->in
.args
[0].value
= &inarg
;
42 req
->out
.args
[0].size
= sizeof(*outargp
);
43 req
->out
.args
[0].value
= outargp
;
44 fuse_request_send(fc
, req
);
45 err
= req
->out
.h
.error
;
46 fuse_put_request(fc
, req
);
51 struct fuse_file
*fuse_file_alloc(struct fuse_conn
*fc
)
55 ff
= kmalloc(sizeof(struct fuse_file
), GFP_KERNEL
);
60 ff
->reserved_req
= fuse_request_alloc(0);
61 if (unlikely(!ff
->reserved_req
)) {
66 INIT_LIST_HEAD(&ff
->write_entry
);
67 atomic_set(&ff
->count
, 0);
68 RB_CLEAR_NODE(&ff
->polled_node
);
69 init_waitqueue_head(&ff
->poll_wait
);
73 spin_unlock(&fc
->lock
);
78 void fuse_file_free(struct fuse_file
*ff
)
80 fuse_request_free(ff
->reserved_req
);
84 struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
86 atomic_inc(&ff
->count
);
90 static void fuse_release_async(struct work_struct
*work
)
96 req
= container_of(work
, struct fuse_req
, misc
.release
.work
);
97 path
= req
->misc
.release
.path
;
98 fc
= get_fuse_conn(path
.dentry
->d_inode
);
100 fuse_put_request(fc
, req
);
104 static void fuse_release_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
106 if (fc
->destroy_req
) {
108 * If this is a fuseblk mount, then it's possible that
109 * releasing the path will result in releasing the
110 * super block and sending the DESTROY request. If
111 * the server is single threaded, this would hang.
112 * For this reason do the path_put() in a separate
115 atomic_inc(&req
->count
);
116 INIT_WORK(&req
->misc
.release
.work
, fuse_release_async
);
117 schedule_work(&req
->misc
.release
.work
);
119 path_put(&req
->misc
.release
.path
);
123 static void fuse_file_put(struct fuse_file
*ff
, bool sync
)
125 if (atomic_dec_and_test(&ff
->count
)) {
126 struct fuse_req
*req
= ff
->reserved_req
;
129 fuse_request_send(ff
->fc
, req
);
130 path_put(&req
->misc
.release
.path
);
131 fuse_put_request(ff
->fc
, req
);
133 req
->end
= fuse_release_end
;
134 fuse_request_send_background(ff
->fc
, req
);
140 int fuse_do_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
143 struct fuse_open_out outarg
;
144 struct fuse_file
*ff
;
146 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
148 ff
= fuse_file_alloc(fc
);
152 err
= fuse_send_open(fc
, nodeid
, file
, opcode
, &outarg
);
159 outarg
.open_flags
&= ~FOPEN_DIRECT_IO
;
163 ff
->open_flags
= outarg
.open_flags
;
164 file
->private_data
= fuse_file_get(ff
);
168 EXPORT_SYMBOL_GPL(fuse_do_open
);
170 void fuse_finish_open(struct inode
*inode
, struct file
*file
)
172 struct fuse_file
*ff
= file
->private_data
;
173 struct fuse_conn
*fc
= get_fuse_conn(inode
);
175 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
176 file
->f_op
= &fuse_direct_io_file_operations
;
177 if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
178 invalidate_inode_pages2(inode
->i_mapping
);
179 if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
180 nonseekable_open(inode
, file
);
181 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
)) {
182 struct fuse_inode
*fi
= get_fuse_inode(inode
);
184 spin_lock(&fc
->lock
);
185 fi
->attr_version
= ++fc
->attr_version
;
186 i_size_write(inode
, 0);
187 spin_unlock(&fc
->lock
);
188 fuse_invalidate_attr(inode
);
192 int fuse_open_common(struct inode
*inode
, struct file
*file
, bool isdir
)
194 struct fuse_conn
*fc
= get_fuse_conn(inode
);
197 err
= generic_file_open(inode
, file
);
201 err
= fuse_do_open(fc
, get_node_id(inode
), file
, isdir
);
205 fuse_finish_open(inode
, file
);
210 static void fuse_prepare_release(struct fuse_file
*ff
, int flags
, int opcode
)
212 struct fuse_conn
*fc
= ff
->fc
;
213 struct fuse_req
*req
= ff
->reserved_req
;
214 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
216 spin_lock(&fc
->lock
);
217 list_del(&ff
->write_entry
);
218 if (!RB_EMPTY_NODE(&ff
->polled_node
))
219 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
220 spin_unlock(&fc
->lock
);
222 wake_up_interruptible_all(&ff
->poll_wait
);
225 inarg
->flags
= flags
;
226 req
->in
.h
.opcode
= opcode
;
227 req
->in
.h
.nodeid
= ff
->nodeid
;
229 req
->in
.args
[0].size
= sizeof(struct fuse_release_in
);
230 req
->in
.args
[0].value
= inarg
;
233 void fuse_release_common(struct file
*file
, int opcode
)
235 struct fuse_file
*ff
;
236 struct fuse_req
*req
;
238 ff
= file
->private_data
;
242 req
= ff
->reserved_req
;
243 fuse_prepare_release(ff
, file
->f_flags
, opcode
);
246 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
247 inarg
->release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
248 inarg
->lock_owner
= fuse_lock_owner_id(ff
->fc
,
251 /* Hold vfsmount and dentry until release is finished */
252 path_get(&file
->f_path
);
253 req
->misc
.release
.path
= file
->f_path
;
256 * Normally this will send the RELEASE request, however if
257 * some asynchronous READ or WRITE requests are outstanding,
258 * the sending will be delayed.
260 * Make the release synchronous if this is a fuseblk mount,
261 * synchronous RELEASE is allowed (and desirable) in this case
262 * because the server can be trusted not to screw up.
264 fuse_file_put(ff
, ff
->fc
->destroy_req
!= NULL
);
267 static int fuse_open(struct inode
*inode
, struct file
*file
)
269 return fuse_open_common(inode
, file
, false);
272 static int fuse_release(struct inode
*inode
, struct file
*file
)
274 fuse_release_common(file
, FUSE_RELEASE
);
276 /* return value is ignored by VFS */
280 void fuse_sync_release(struct fuse_file
*ff
, int flags
)
282 WARN_ON(atomic_read(&ff
->count
) > 1);
283 fuse_prepare_release(ff
, flags
, FUSE_RELEASE
);
284 ff
->reserved_req
->force
= 1;
285 fuse_request_send(ff
->fc
, ff
->reserved_req
);
286 fuse_put_request(ff
->fc
, ff
->reserved_req
);
289 EXPORT_SYMBOL_GPL(fuse_sync_release
);
292 * Scramble the ID space with XTEA, so that the value of the files_struct
293 * pointer is not exposed to userspace.
295 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
297 u32
*k
= fc
->scramble_key
;
298 u64 v
= (unsigned long) id
;
304 for (i
= 0; i
< 32; i
++) {
305 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
307 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
310 return (u64
) v0
+ ((u64
) v1
<< 32);
314 * Check if page is under writeback
316 * This is currently done by walking the list of writepage requests
317 * for the inode, which can be pretty inefficient.
319 static bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
321 struct fuse_conn
*fc
= get_fuse_conn(inode
);
322 struct fuse_inode
*fi
= get_fuse_inode(inode
);
323 struct fuse_req
*req
;
326 spin_lock(&fc
->lock
);
327 list_for_each_entry(req
, &fi
->writepages
, writepages_entry
) {
330 BUG_ON(req
->inode
!= inode
);
331 curr_index
= req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
332 if (curr_index
== index
) {
337 spin_unlock(&fc
->lock
);
343 * Wait for page writeback to be completed.
345 * Since fuse doesn't rely on the VM writeback tracking, this has to
346 * use some other means.
348 static int fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
350 struct fuse_inode
*fi
= get_fuse_inode(inode
);
352 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
356 static int fuse_flush(struct file
*file
, fl_owner_t id
)
358 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
359 struct fuse_conn
*fc
= get_fuse_conn(inode
);
360 struct fuse_file
*ff
= file
->private_data
;
361 struct fuse_req
*req
;
362 struct fuse_flush_in inarg
;
365 if (is_bad_inode(inode
))
371 req
= fuse_get_req_nofail_nopages(fc
, file
);
372 memset(&inarg
, 0, sizeof(inarg
));
374 inarg
.lock_owner
= fuse_lock_owner_id(fc
, id
);
375 req
->in
.h
.opcode
= FUSE_FLUSH
;
376 req
->in
.h
.nodeid
= get_node_id(inode
);
378 req
->in
.args
[0].size
= sizeof(inarg
);
379 req
->in
.args
[0].value
= &inarg
;
381 fuse_request_send(fc
, req
);
382 err
= req
->out
.h
.error
;
383 fuse_put_request(fc
, req
);
384 if (err
== -ENOSYS
) {
392 * Wait for all pending writepages on the inode to finish.
394 * This is currently done by blocking further writes with FUSE_NOWRITE
395 * and waiting for all sent writes to complete.
397 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
398 * could conflict with truncation.
400 static void fuse_sync_writes(struct inode
*inode
)
402 fuse_set_nowrite(inode
);
403 fuse_release_nowrite(inode
);
406 int fuse_fsync_common(struct file
*file
, loff_t start
, loff_t end
,
407 int datasync
, int isdir
)
409 struct inode
*inode
= file
->f_mapping
->host
;
410 struct fuse_conn
*fc
= get_fuse_conn(inode
);
411 struct fuse_file
*ff
= file
->private_data
;
412 struct fuse_req
*req
;
413 struct fuse_fsync_in inarg
;
416 if (is_bad_inode(inode
))
419 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
423 if ((!isdir
&& fc
->no_fsync
) || (isdir
&& fc
->no_fsyncdir
))
426 mutex_lock(&inode
->i_mutex
);
429 * Start writeback against all dirty pages of the inode, then
430 * wait for all outstanding writes, before sending the FSYNC
433 err
= write_inode_now(inode
, 0);
437 fuse_sync_writes(inode
);
439 req
= fuse_get_req_nopages(fc
);
445 memset(&inarg
, 0, sizeof(inarg
));
447 inarg
.fsync_flags
= datasync
? 1 : 0;
448 req
->in
.h
.opcode
= isdir
? FUSE_FSYNCDIR
: FUSE_FSYNC
;
449 req
->in
.h
.nodeid
= get_node_id(inode
);
451 req
->in
.args
[0].size
= sizeof(inarg
);
452 req
->in
.args
[0].value
= &inarg
;
453 fuse_request_send(fc
, req
);
454 err
= req
->out
.h
.error
;
455 fuse_put_request(fc
, req
);
456 if (err
== -ENOSYS
) {
464 mutex_unlock(&inode
->i_mutex
);
468 static int fuse_fsync(struct file
*file
, loff_t start
, loff_t end
,
471 return fuse_fsync_common(file
, start
, end
, datasync
, 0);
474 void fuse_read_fill(struct fuse_req
*req
, struct file
*file
, loff_t pos
,
475 size_t count
, int opcode
)
477 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
478 struct fuse_file
*ff
= file
->private_data
;
483 inarg
->flags
= file
->f_flags
;
484 req
->in
.h
.opcode
= opcode
;
485 req
->in
.h
.nodeid
= ff
->nodeid
;
487 req
->in
.args
[0].size
= sizeof(struct fuse_read_in
);
488 req
->in
.args
[0].value
= inarg
;
490 req
->out
.numargs
= 1;
491 req
->out
.args
[0].size
= count
;
494 static size_t fuse_send_read(struct fuse_req
*req
, struct file
*file
,
495 loff_t pos
, size_t count
, fl_owner_t owner
)
497 struct fuse_file
*ff
= file
->private_data
;
498 struct fuse_conn
*fc
= ff
->fc
;
500 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
502 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
504 inarg
->read_flags
|= FUSE_READ_LOCKOWNER
;
505 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
507 fuse_request_send(fc
, req
);
508 return req
->out
.args
[0].size
;
511 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
514 struct fuse_conn
*fc
= get_fuse_conn(inode
);
515 struct fuse_inode
*fi
= get_fuse_inode(inode
);
517 spin_lock(&fc
->lock
);
518 if (attr_ver
== fi
->attr_version
&& size
< inode
->i_size
) {
519 fi
->attr_version
= ++fc
->attr_version
;
520 i_size_write(inode
, size
);
522 spin_unlock(&fc
->lock
);
525 static int fuse_readpage(struct file
*file
, struct page
*page
)
527 struct inode
*inode
= page
->mapping
->host
;
528 struct fuse_conn
*fc
= get_fuse_conn(inode
);
529 struct fuse_req
*req
;
531 loff_t pos
= page_offset(page
);
532 size_t count
= PAGE_CACHE_SIZE
;
537 if (is_bad_inode(inode
))
541 * Page writeback can extend beyond the lifetime of the
542 * page-cache page, so make sure we read a properly synced
545 fuse_wait_on_page_writeback(inode
, page
->index
);
547 req
= fuse_get_req(fc
, 1);
552 attr_ver
= fuse_get_attr_version(fc
);
554 req
->out
.page_zeroing
= 1;
555 req
->out
.argpages
= 1;
557 req
->pages
[0] = page
;
558 req
->page_descs
[0].length
= count
;
559 num_read
= fuse_send_read(req
, file
, pos
, count
, NULL
);
560 err
= req
->out
.h
.error
;
561 fuse_put_request(fc
, req
);
565 * Short read means EOF. If file size is larger, truncate it
567 if (num_read
< count
)
568 fuse_read_update_size(inode
, pos
+ num_read
, attr_ver
);
570 SetPageUptodate(page
);
573 fuse_invalidate_attr(inode
); /* atime changed */
579 static void fuse_readpages_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
582 size_t count
= req
->misc
.read
.in
.size
;
583 size_t num_read
= req
->out
.args
[0].size
;
584 struct address_space
*mapping
= NULL
;
586 for (i
= 0; mapping
== NULL
&& i
< req
->num_pages
; i
++)
587 mapping
= req
->pages
[i
]->mapping
;
590 struct inode
*inode
= mapping
->host
;
593 * Short read means EOF. If file size is larger, truncate it
595 if (!req
->out
.h
.error
&& num_read
< count
) {
598 pos
= page_offset(req
->pages
[0]) + num_read
;
599 fuse_read_update_size(inode
, pos
,
600 req
->misc
.read
.attr_ver
);
602 fuse_invalidate_attr(inode
); /* atime changed */
605 for (i
= 0; i
< req
->num_pages
; i
++) {
606 struct page
*page
= req
->pages
[i
];
607 if (!req
->out
.h
.error
)
608 SetPageUptodate(page
);
612 page_cache_release(page
);
615 fuse_file_put(req
->ff
, false);
618 static void fuse_send_readpages(struct fuse_req
*req
, struct file
*file
)
620 struct fuse_file
*ff
= file
->private_data
;
621 struct fuse_conn
*fc
= ff
->fc
;
622 loff_t pos
= page_offset(req
->pages
[0]);
623 size_t count
= req
->num_pages
<< PAGE_CACHE_SHIFT
;
625 req
->out
.argpages
= 1;
626 req
->out
.page_zeroing
= 1;
627 req
->out
.page_replace
= 1;
628 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
629 req
->misc
.read
.attr_ver
= fuse_get_attr_version(fc
);
630 if (fc
->async_read
) {
631 req
->ff
= fuse_file_get(ff
);
632 req
->end
= fuse_readpages_end
;
633 fuse_request_send_background(fc
, req
);
635 fuse_request_send(fc
, req
);
636 fuse_readpages_end(fc
, req
);
637 fuse_put_request(fc
, req
);
641 struct fuse_fill_data
{
642 struct fuse_req
*req
;
648 static int fuse_readpages_fill(void *_data
, struct page
*page
)
650 struct fuse_fill_data
*data
= _data
;
651 struct fuse_req
*req
= data
->req
;
652 struct inode
*inode
= data
->inode
;
653 struct fuse_conn
*fc
= get_fuse_conn(inode
);
655 fuse_wait_on_page_writeback(inode
, page
->index
);
657 if (req
->num_pages
&&
658 (req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
659 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_read
||
660 req
->pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
661 int nr_alloc
= min_t(unsigned, data
->nr_pages
,
662 FUSE_MAX_PAGES_PER_REQ
);
663 fuse_send_readpages(req
, data
->file
);
664 data
->req
= req
= fuse_get_req(fc
, nr_alloc
);
671 if (WARN_ON(req
->num_pages
>= req
->max_pages
)) {
672 fuse_put_request(fc
, req
);
676 page_cache_get(page
);
677 req
->pages
[req
->num_pages
] = page
;
678 req
->page_descs
[req
->num_pages
].length
= PAGE_SIZE
;
684 static int fuse_readpages(struct file
*file
, struct address_space
*mapping
,
685 struct list_head
*pages
, unsigned nr_pages
)
687 struct inode
*inode
= mapping
->host
;
688 struct fuse_conn
*fc
= get_fuse_conn(inode
);
689 struct fuse_fill_data data
;
691 int nr_alloc
= min_t(unsigned, nr_pages
, FUSE_MAX_PAGES_PER_REQ
);
694 if (is_bad_inode(inode
))
699 data
.req
= fuse_get_req(fc
, nr_alloc
);
700 data
.nr_pages
= nr_pages
;
701 err
= PTR_ERR(data
.req
);
702 if (IS_ERR(data
.req
))
705 err
= read_cache_pages(mapping
, pages
, fuse_readpages_fill
, &data
);
707 if (data
.req
->num_pages
)
708 fuse_send_readpages(data
.req
, file
);
710 fuse_put_request(fc
, data
.req
);
716 static ssize_t
fuse_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
717 unsigned long nr_segs
, loff_t pos
)
719 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
720 struct fuse_conn
*fc
= get_fuse_conn(inode
);
723 * In auto invalidate mode, always update attributes on read.
724 * Otherwise, only update if we attempt to read past EOF (to ensure
725 * i_size is up to date).
727 if (fc
->auto_inval_data
||
728 (pos
+ iov_length(iov
, nr_segs
) > i_size_read(inode
))) {
730 err
= fuse_update_attributes(inode
, NULL
, iocb
->ki_filp
, NULL
);
735 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
738 static void fuse_write_fill(struct fuse_req
*req
, struct fuse_file
*ff
,
739 loff_t pos
, size_t count
)
741 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
742 struct fuse_write_out
*outarg
= &req
->misc
.write
.out
;
747 req
->in
.h
.opcode
= FUSE_WRITE
;
748 req
->in
.h
.nodeid
= ff
->nodeid
;
750 if (ff
->fc
->minor
< 9)
751 req
->in
.args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
753 req
->in
.args
[0].size
= sizeof(struct fuse_write_in
);
754 req
->in
.args
[0].value
= inarg
;
755 req
->in
.args
[1].size
= count
;
756 req
->out
.numargs
= 1;
757 req
->out
.args
[0].size
= sizeof(struct fuse_write_out
);
758 req
->out
.args
[0].value
= outarg
;
761 static size_t fuse_send_write(struct fuse_req
*req
, struct file
*file
,
762 loff_t pos
, size_t count
, fl_owner_t owner
)
764 struct fuse_file
*ff
= file
->private_data
;
765 struct fuse_conn
*fc
= ff
->fc
;
766 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
768 fuse_write_fill(req
, ff
, pos
, count
);
769 inarg
->flags
= file
->f_flags
;
771 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
772 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
774 fuse_request_send(fc
, req
);
775 return req
->misc
.write
.out
.size
;
778 void fuse_write_update_size(struct inode
*inode
, loff_t pos
)
780 struct fuse_conn
*fc
= get_fuse_conn(inode
);
781 struct fuse_inode
*fi
= get_fuse_inode(inode
);
783 spin_lock(&fc
->lock
);
784 fi
->attr_version
= ++fc
->attr_version
;
785 if (pos
> inode
->i_size
)
786 i_size_write(inode
, pos
);
787 spin_unlock(&fc
->lock
);
790 static size_t fuse_send_write_pages(struct fuse_req
*req
, struct file
*file
,
791 struct inode
*inode
, loff_t pos
,
798 for (i
= 0; i
< req
->num_pages
; i
++)
799 fuse_wait_on_page_writeback(inode
, req
->pages
[i
]->index
);
801 res
= fuse_send_write(req
, file
, pos
, count
, NULL
);
803 offset
= req
->page_descs
[0].offset
;
805 for (i
= 0; i
< req
->num_pages
; i
++) {
806 struct page
*page
= req
->pages
[i
];
808 if (!req
->out
.h
.error
&& !offset
&& count
>= PAGE_CACHE_SIZE
)
809 SetPageUptodate(page
);
811 if (count
> PAGE_CACHE_SIZE
- offset
)
812 count
-= PAGE_CACHE_SIZE
- offset
;
818 page_cache_release(page
);
824 static ssize_t
fuse_fill_write_pages(struct fuse_req
*req
,
825 struct address_space
*mapping
,
826 struct iov_iter
*ii
, loff_t pos
)
828 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
829 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
833 req
->in
.argpages
= 1;
834 req
->page_descs
[0].offset
= offset
;
839 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
840 size_t bytes
= min_t(size_t, PAGE_CACHE_SIZE
- offset
,
843 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
847 if (iov_iter_fault_in_readable(ii
, bytes
))
851 page
= grab_cache_page_write_begin(mapping
, index
, 0);
855 if (mapping_writably_mapped(mapping
))
856 flush_dcache_page(page
);
859 tmp
= iov_iter_copy_from_user_atomic(page
, ii
, offset
, bytes
);
861 flush_dcache_page(page
);
863 mark_page_accessed(page
);
867 page_cache_release(page
);
868 bytes
= min(bytes
, iov_iter_single_seg_count(ii
));
873 req
->pages
[req
->num_pages
] = page
;
874 req
->page_descs
[req
->num_pages
].length
= tmp
;
877 iov_iter_advance(ii
, tmp
);
881 if (offset
== PAGE_CACHE_SIZE
)
886 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
887 req
->num_pages
< req
->max_pages
&& offset
== 0);
889 return count
> 0 ? count
: err
;
892 static inline unsigned fuse_wr_pages(loff_t pos
, size_t len
)
894 return min_t(unsigned,
895 ((pos
+ len
- 1) >> PAGE_CACHE_SHIFT
) -
896 (pos
>> PAGE_CACHE_SHIFT
) + 1,
897 FUSE_MAX_PAGES_PER_REQ
);
900 static ssize_t
fuse_perform_write(struct file
*file
,
901 struct address_space
*mapping
,
902 struct iov_iter
*ii
, loff_t pos
)
904 struct inode
*inode
= mapping
->host
;
905 struct fuse_conn
*fc
= get_fuse_conn(inode
);
909 if (is_bad_inode(inode
))
913 struct fuse_req
*req
;
915 unsigned nr_pages
= fuse_wr_pages(pos
, iov_iter_count(ii
));
917 req
= fuse_get_req(fc
, nr_pages
);
923 count
= fuse_fill_write_pages(req
, mapping
, ii
, pos
);
929 num_written
= fuse_send_write_pages(req
, file
, inode
,
931 err
= req
->out
.h
.error
;
936 /* break out of the loop on short write */
937 if (num_written
!= count
)
941 fuse_put_request(fc
, req
);
942 } while (!err
&& iov_iter_count(ii
));
945 fuse_write_update_size(inode
, pos
);
947 fuse_invalidate_attr(inode
);
949 return res
> 0 ? res
: err
;
952 static ssize_t
fuse_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
953 unsigned long nr_segs
, loff_t pos
)
955 struct file
*file
= iocb
->ki_filp
;
956 struct address_space
*mapping
= file
->f_mapping
;
960 ssize_t written_buffered
= 0;
961 struct inode
*inode
= mapping
->host
;
966 WARN_ON(iocb
->ki_pos
!= pos
);
969 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
974 sb_start_write(inode
->i_sb
);
975 mutex_lock(&inode
->i_mutex
);
977 /* We can write back this queue in page reclaim */
978 current
->backing_dev_info
= mapping
->backing_dev_info
;
980 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
987 err
= file_remove_suid(file
);
991 err
= file_update_time(file
);
995 if (file
->f_flags
& O_DIRECT
) {
996 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
,
999 if (written
< 0 || written
== count
)
1005 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1006 written_buffered
= fuse_perform_write(file
, mapping
, &i
, pos
);
1007 if (written_buffered
< 0) {
1008 err
= written_buffered
;
1011 endbyte
= pos
+ written_buffered
- 1;
1013 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
,
1018 invalidate_mapping_pages(file
->f_mapping
,
1019 pos
>> PAGE_CACHE_SHIFT
,
1020 endbyte
>> PAGE_CACHE_SHIFT
);
1022 written
+= written_buffered
;
1023 iocb
->ki_pos
= pos
+ written_buffered
;
1025 iov_iter_init(&i
, iov
, nr_segs
, count
, 0);
1026 written
= fuse_perform_write(file
, mapping
, &i
, pos
);
1028 iocb
->ki_pos
= pos
+ written
;
1031 current
->backing_dev_info
= NULL
;
1032 mutex_unlock(&inode
->i_mutex
);
1033 sb_end_write(inode
->i_sb
);
1035 return written
? written
: err
;
1038 static void fuse_release_user_pages(struct fuse_req
*req
, int write
)
1042 for (i
= 0; i
< req
->num_pages
; i
++) {
1043 struct page
*page
= req
->pages
[i
];
1045 set_page_dirty_lock(page
);
1050 static inline void fuse_page_descs_length_init(struct fuse_req
*req
,
1051 unsigned index
, unsigned nr_pages
)
1055 for (i
= index
; i
< index
+ nr_pages
; i
++)
1056 req
->page_descs
[i
].length
= PAGE_SIZE
-
1057 req
->page_descs
[i
].offset
;
1060 static inline unsigned long fuse_get_user_addr(const struct iov_iter
*ii
)
1062 return (unsigned long)ii
->iov
->iov_base
+ ii
->iov_offset
;
1065 static inline size_t fuse_get_frag_size(const struct iov_iter
*ii
,
1068 return min(iov_iter_single_seg_count(ii
), max_size
);
1071 static int fuse_get_user_pages(struct fuse_req
*req
, struct iov_iter
*ii
,
1072 size_t *nbytesp
, int write
)
1074 size_t nbytes
= 0; /* # bytes already packed in req */
1076 /* Special case for kernel I/O: can copy directly into the buffer */
1077 if (segment_eq(get_fs(), KERNEL_DS
)) {
1078 unsigned long user_addr
= fuse_get_user_addr(ii
);
1079 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
);
1082 req
->in
.args
[1].value
= (void *) user_addr
;
1084 req
->out
.args
[0].value
= (void *) user_addr
;
1086 iov_iter_advance(ii
, frag_size
);
1087 *nbytesp
= frag_size
;
1091 while (nbytes
< *nbytesp
&& req
->num_pages
< req
->max_pages
) {
1093 unsigned long user_addr
= fuse_get_user_addr(ii
);
1094 unsigned offset
= user_addr
& ~PAGE_MASK
;
1095 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
- nbytes
);
1098 unsigned n
= req
->max_pages
- req
->num_pages
;
1099 frag_size
= min_t(size_t, frag_size
, n
<< PAGE_SHIFT
);
1101 npages
= (frag_size
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1102 npages
= clamp(npages
, 1U, n
);
1104 ret
= get_user_pages_fast(user_addr
, npages
, !write
,
1105 &req
->pages
[req
->num_pages
]);
1110 frag_size
= min_t(size_t, frag_size
,
1111 (npages
<< PAGE_SHIFT
) - offset
);
1112 iov_iter_advance(ii
, frag_size
);
1114 req
->page_descs
[req
->num_pages
].offset
= offset
;
1115 fuse_page_descs_length_init(req
, req
->num_pages
, npages
);
1117 req
->num_pages
+= npages
;
1118 req
->page_descs
[req
->num_pages
- 1].length
-=
1119 (npages
<< PAGE_SHIFT
) - offset
- frag_size
;
1121 nbytes
+= frag_size
;
1125 req
->in
.argpages
= 1;
1127 req
->out
.argpages
= 1;
1134 static inline int fuse_iter_npages(const struct iov_iter
*ii_p
)
1136 struct iov_iter ii
= *ii_p
;
1139 while (iov_iter_count(&ii
) && npages
< FUSE_MAX_PAGES_PER_REQ
) {
1140 unsigned long user_addr
= fuse_get_user_addr(&ii
);
1141 unsigned offset
= user_addr
& ~PAGE_MASK
;
1142 size_t frag_size
= iov_iter_single_seg_count(&ii
);
1144 npages
+= (frag_size
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1145 iov_iter_advance(&ii
, frag_size
);
1148 return min(npages
, FUSE_MAX_PAGES_PER_REQ
);
1151 ssize_t
fuse_direct_io(struct file
*file
, const struct iovec
*iov
,
1152 unsigned long nr_segs
, size_t count
, loff_t
*ppos
,
1155 struct fuse_file
*ff
= file
->private_data
;
1156 struct fuse_conn
*fc
= ff
->fc
;
1157 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1160 struct fuse_req
*req
;
1163 iov_iter_init(&ii
, iov
, nr_segs
, count
, 0);
1165 req
= fuse_get_req(fc
, fuse_iter_npages(&ii
));
1167 return PTR_ERR(req
);
1171 fl_owner_t owner
= current
->files
;
1172 size_t nbytes
= min(count
, nmax
);
1173 int err
= fuse_get_user_pages(req
, &ii
, &nbytes
, write
);
1180 nres
= fuse_send_write(req
, file
, pos
, nbytes
, owner
);
1182 nres
= fuse_send_read(req
, file
, pos
, nbytes
, owner
);
1184 fuse_release_user_pages(req
, !write
);
1185 if (req
->out
.h
.error
) {
1187 res
= req
->out
.h
.error
;
1189 } else if (nres
> nbytes
) {
1199 fuse_put_request(fc
, req
);
1200 req
= fuse_get_req(fc
, fuse_iter_npages(&ii
));
1206 fuse_put_request(fc
, req
);
1212 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1214 static ssize_t
__fuse_direct_read(struct file
*file
, const struct iovec
*iov
,
1215 unsigned long nr_segs
, loff_t
*ppos
)
1218 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1220 if (is_bad_inode(inode
))
1223 res
= fuse_direct_io(file
, iov
, nr_segs
, iov_length(iov
, nr_segs
),
1226 fuse_invalidate_attr(inode
);
1231 static ssize_t
fuse_direct_read(struct file
*file
, char __user
*buf
,
1232 size_t count
, loff_t
*ppos
)
1234 struct iovec iov
= { .iov_base
= buf
, .iov_len
= count
};
1235 return __fuse_direct_read(file
, &iov
, 1, ppos
);
1238 static ssize_t
__fuse_direct_write(struct file
*file
, const struct iovec
*iov
,
1239 unsigned long nr_segs
, loff_t
*ppos
)
1241 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1242 size_t count
= iov_length(iov
, nr_segs
);
1245 res
= generic_write_checks(file
, ppos
, &count
, 0);
1247 res
= fuse_direct_io(file
, iov
, nr_segs
, count
, ppos
, 1);
1249 fuse_write_update_size(inode
, *ppos
);
1252 fuse_invalidate_attr(inode
);
1257 static ssize_t
fuse_direct_write(struct file
*file
, const char __user
*buf
,
1258 size_t count
, loff_t
*ppos
)
1260 struct iovec iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
1261 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1264 if (is_bad_inode(inode
))
1267 /* Don't allow parallel writes to the same file */
1268 mutex_lock(&inode
->i_mutex
);
1269 res
= __fuse_direct_write(file
, &iov
, 1, ppos
);
1270 mutex_unlock(&inode
->i_mutex
);
1275 static void fuse_writepage_free(struct fuse_conn
*fc
, struct fuse_req
*req
)
1277 __free_page(req
->pages
[0]);
1278 fuse_file_put(req
->ff
, false);
1281 static void fuse_writepage_finish(struct fuse_conn
*fc
, struct fuse_req
*req
)
1283 struct inode
*inode
= req
->inode
;
1284 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1285 struct backing_dev_info
*bdi
= inode
->i_mapping
->backing_dev_info
;
1287 list_del(&req
->writepages_entry
);
1288 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1289 dec_zone_page_state(req
->pages
[0], NR_WRITEBACK_TEMP
);
1290 bdi_writeout_inc(bdi
);
1291 wake_up(&fi
->page_waitq
);
1294 /* Called under fc->lock, may release and reacquire it */
1295 static void fuse_send_writepage(struct fuse_conn
*fc
, struct fuse_req
*req
)
1296 __releases(fc
->lock
)
1297 __acquires(fc
->lock
)
1299 struct fuse_inode
*fi
= get_fuse_inode(req
->inode
);
1300 loff_t size
= i_size_read(req
->inode
);
1301 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1306 if (inarg
->offset
+ PAGE_CACHE_SIZE
<= size
) {
1307 inarg
->size
= PAGE_CACHE_SIZE
;
1308 } else if (inarg
->offset
< size
) {
1309 inarg
->size
= size
& (PAGE_CACHE_SIZE
- 1);
1311 /* Got truncated off completely */
1315 req
->in
.args
[1].size
= inarg
->size
;
1317 fuse_request_send_background_locked(fc
, req
);
1321 fuse_writepage_finish(fc
, req
);
1322 spin_unlock(&fc
->lock
);
1323 fuse_writepage_free(fc
, req
);
1324 fuse_put_request(fc
, req
);
1325 spin_lock(&fc
->lock
);
1329 * If fi->writectr is positive (no truncate or fsync going on) send
1330 * all queued writepage requests.
1332 * Called with fc->lock
1334 void fuse_flush_writepages(struct inode
*inode
)
1335 __releases(fc
->lock
)
1336 __acquires(fc
->lock
)
1338 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1339 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1340 struct fuse_req
*req
;
1342 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1343 req
= list_entry(fi
->queued_writes
.next
, struct fuse_req
, list
);
1344 list_del_init(&req
->list
);
1345 fuse_send_writepage(fc
, req
);
1349 static void fuse_writepage_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1351 struct inode
*inode
= req
->inode
;
1352 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1354 mapping_set_error(inode
->i_mapping
, req
->out
.h
.error
);
1355 spin_lock(&fc
->lock
);
1357 fuse_writepage_finish(fc
, req
);
1358 spin_unlock(&fc
->lock
);
1359 fuse_writepage_free(fc
, req
);
1362 static int fuse_writepage_locked(struct page
*page
)
1364 struct address_space
*mapping
= page
->mapping
;
1365 struct inode
*inode
= mapping
->host
;
1366 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1367 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1368 struct fuse_req
*req
;
1369 struct fuse_file
*ff
;
1370 struct page
*tmp_page
;
1372 set_page_writeback(page
);
1374 req
= fuse_request_alloc_nofs(1);
1378 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1382 spin_lock(&fc
->lock
);
1383 BUG_ON(list_empty(&fi
->write_files
));
1384 ff
= list_entry(fi
->write_files
.next
, struct fuse_file
, write_entry
);
1385 req
->ff
= fuse_file_get(ff
);
1386 spin_unlock(&fc
->lock
);
1388 fuse_write_fill(req
, ff
, page_offset(page
), 0);
1390 copy_highpage(tmp_page
, page
);
1391 req
->misc
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
1392 req
->in
.argpages
= 1;
1394 req
->pages
[0] = tmp_page
;
1395 req
->page_descs
[0].offset
= 0;
1396 req
->page_descs
[0].length
= PAGE_SIZE
;
1397 req
->end
= fuse_writepage_end
;
1400 inc_bdi_stat(mapping
->backing_dev_info
, BDI_WRITEBACK
);
1401 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1402 end_page_writeback(page
);
1404 spin_lock(&fc
->lock
);
1405 list_add(&req
->writepages_entry
, &fi
->writepages
);
1406 list_add_tail(&req
->list
, &fi
->queued_writes
);
1407 fuse_flush_writepages(inode
);
1408 spin_unlock(&fc
->lock
);
1413 fuse_request_free(req
);
1415 end_page_writeback(page
);
1419 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
1423 err
= fuse_writepage_locked(page
);
1429 static int fuse_launder_page(struct page
*page
)
1432 if (clear_page_dirty_for_io(page
)) {
1433 struct inode
*inode
= page
->mapping
->host
;
1434 err
= fuse_writepage_locked(page
);
1436 fuse_wait_on_page_writeback(inode
, page
->index
);
1442 * Write back dirty pages now, because there may not be any suitable
1445 static void fuse_vma_close(struct vm_area_struct
*vma
)
1447 filemap_write_and_wait(vma
->vm_file
->f_mapping
);
1451 * Wait for writeback against this page to complete before allowing it
1452 * to be marked dirty again, and hence written back again, possibly
1453 * before the previous writepage completed.
1455 * Block here, instead of in ->writepage(), so that the userspace fs
1456 * can only block processes actually operating on the filesystem.
1458 * Otherwise unprivileged userspace fs would be able to block
1463 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1465 static int fuse_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1467 struct page
*page
= vmf
->page
;
1469 * Don't use page->mapping as it may become NULL from a
1470 * concurrent truncate.
1472 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1474 fuse_wait_on_page_writeback(inode
, page
->index
);
1478 static const struct vm_operations_struct fuse_file_vm_ops
= {
1479 .close
= fuse_vma_close
,
1480 .fault
= filemap_fault
,
1481 .page_mkwrite
= fuse_page_mkwrite
,
1482 .remap_pages
= generic_file_remap_pages
,
1485 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1487 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
)) {
1488 struct inode
*inode
= file
->f_dentry
->d_inode
;
1489 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1490 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1491 struct fuse_file
*ff
= file
->private_data
;
1493 * file may be written through mmap, so chain it onto the
1494 * inodes's write_file list
1496 spin_lock(&fc
->lock
);
1497 if (list_empty(&ff
->write_entry
))
1498 list_add(&ff
->write_entry
, &fi
->write_files
);
1499 spin_unlock(&fc
->lock
);
1501 file_accessed(file
);
1502 vma
->vm_ops
= &fuse_file_vm_ops
;
1506 static int fuse_direct_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1508 /* Can't provide the coherency needed for MAP_SHARED */
1509 if (vma
->vm_flags
& VM_MAYSHARE
)
1512 invalidate_inode_pages2(file
->f_mapping
);
1514 return generic_file_mmap(file
, vma
);
1517 static int convert_fuse_file_lock(const struct fuse_file_lock
*ffl
,
1518 struct file_lock
*fl
)
1520 switch (ffl
->type
) {
1526 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
1527 ffl
->end
< ffl
->start
)
1530 fl
->fl_start
= ffl
->start
;
1531 fl
->fl_end
= ffl
->end
;
1532 fl
->fl_pid
= ffl
->pid
;
1538 fl
->fl_type
= ffl
->type
;
1542 static void fuse_lk_fill(struct fuse_req
*req
, struct file
*file
,
1543 const struct file_lock
*fl
, int opcode
, pid_t pid
,
1546 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1547 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1548 struct fuse_file
*ff
= file
->private_data
;
1549 struct fuse_lk_in
*arg
= &req
->misc
.lk_in
;
1552 arg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
1553 arg
->lk
.start
= fl
->fl_start
;
1554 arg
->lk
.end
= fl
->fl_end
;
1555 arg
->lk
.type
= fl
->fl_type
;
1558 arg
->lk_flags
|= FUSE_LK_FLOCK
;
1559 req
->in
.h
.opcode
= opcode
;
1560 req
->in
.h
.nodeid
= get_node_id(inode
);
1561 req
->in
.numargs
= 1;
1562 req
->in
.args
[0].size
= sizeof(*arg
);
1563 req
->in
.args
[0].value
= arg
;
1566 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
1568 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1569 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1570 struct fuse_req
*req
;
1571 struct fuse_lk_out outarg
;
1574 req
= fuse_get_req_nopages(fc
);
1576 return PTR_ERR(req
);
1578 fuse_lk_fill(req
, file
, fl
, FUSE_GETLK
, 0, 0);
1579 req
->out
.numargs
= 1;
1580 req
->out
.args
[0].size
= sizeof(outarg
);
1581 req
->out
.args
[0].value
= &outarg
;
1582 fuse_request_send(fc
, req
);
1583 err
= req
->out
.h
.error
;
1584 fuse_put_request(fc
, req
);
1586 err
= convert_fuse_file_lock(&outarg
.lk
, fl
);
1591 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
1593 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1594 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1595 struct fuse_req
*req
;
1596 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
1597 pid_t pid
= fl
->fl_type
!= F_UNLCK
? current
->tgid
: 0;
1600 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_grant
) {
1601 /* NLM needs asynchronous locks, which we don't support yet */
1605 /* Unlock on close is handled by the flush method */
1606 if (fl
->fl_flags
& FL_CLOSE
)
1609 req
= fuse_get_req_nopages(fc
);
1611 return PTR_ERR(req
);
1613 fuse_lk_fill(req
, file
, fl
, opcode
, pid
, flock
);
1614 fuse_request_send(fc
, req
);
1615 err
= req
->out
.h
.error
;
1616 /* locking is restartable */
1619 fuse_put_request(fc
, req
);
1623 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1625 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1626 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1629 if (cmd
== F_CANCELLK
) {
1631 } else if (cmd
== F_GETLK
) {
1633 posix_test_lock(file
, fl
);
1636 err
= fuse_getlk(file
, fl
);
1639 err
= posix_lock_file(file
, fl
, NULL
);
1641 err
= fuse_setlk(file
, fl
, 0);
1646 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1648 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1649 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1653 err
= flock_lock_file_wait(file
, fl
);
1655 struct fuse_file
*ff
= file
->private_data
;
1657 /* emulate flock with POSIX locks */
1658 fl
->fl_owner
= (fl_owner_t
) file
;
1660 err
= fuse_setlk(file
, fl
, 1);
1666 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
1668 struct inode
*inode
= mapping
->host
;
1669 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1670 struct fuse_req
*req
;
1671 struct fuse_bmap_in inarg
;
1672 struct fuse_bmap_out outarg
;
1675 if (!inode
->i_sb
->s_bdev
|| fc
->no_bmap
)
1678 req
= fuse_get_req_nopages(fc
);
1682 memset(&inarg
, 0, sizeof(inarg
));
1683 inarg
.block
= block
;
1684 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
1685 req
->in
.h
.opcode
= FUSE_BMAP
;
1686 req
->in
.h
.nodeid
= get_node_id(inode
);
1687 req
->in
.numargs
= 1;
1688 req
->in
.args
[0].size
= sizeof(inarg
);
1689 req
->in
.args
[0].value
= &inarg
;
1690 req
->out
.numargs
= 1;
1691 req
->out
.args
[0].size
= sizeof(outarg
);
1692 req
->out
.args
[0].value
= &outarg
;
1693 fuse_request_send(fc
, req
);
1694 err
= req
->out
.h
.error
;
1695 fuse_put_request(fc
, req
);
1699 return err
? 0 : outarg
.block
;
1702 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int whence
)
1705 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1707 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
1708 if (whence
== SEEK_CUR
|| whence
== SEEK_SET
)
1709 return generic_file_llseek(file
, offset
, whence
);
1711 mutex_lock(&inode
->i_mutex
);
1712 retval
= fuse_update_attributes(inode
, NULL
, file
, NULL
);
1714 retval
= generic_file_llseek(file
, offset
, whence
);
1715 mutex_unlock(&inode
->i_mutex
);
1720 static int fuse_ioctl_copy_user(struct page
**pages
, struct iovec
*iov
,
1721 unsigned int nr_segs
, size_t bytes
, bool to_user
)
1729 iov_iter_init(&ii
, iov
, nr_segs
, bytes
, 0);
1731 while (iov_iter_count(&ii
)) {
1732 struct page
*page
= pages
[page_idx
++];
1733 size_t todo
= min_t(size_t, PAGE_SIZE
, iov_iter_count(&ii
));
1739 char __user
*uaddr
= ii
.iov
->iov_base
+ ii
.iov_offset
;
1740 size_t iov_len
= ii
.iov
->iov_len
- ii
.iov_offset
;
1741 size_t copy
= min(todo
, iov_len
);
1745 left
= copy_from_user(kaddr
, uaddr
, copy
);
1747 left
= copy_to_user(uaddr
, kaddr
, copy
);
1752 iov_iter_advance(&ii
, copy
);
1764 * CUSE servers compiled on 32bit broke on 64bit kernels because the
1765 * ABI was defined to be 'struct iovec' which is different on 32bit
1766 * and 64bit. Fortunately we can determine which structure the server
1767 * used from the size of the reply.
1769 static int fuse_copy_ioctl_iovec_old(struct iovec
*dst
, void *src
,
1770 size_t transferred
, unsigned count
,
1773 #ifdef CONFIG_COMPAT
1774 if (count
* sizeof(struct compat_iovec
) == transferred
) {
1775 struct compat_iovec
*ciov
= src
;
1779 * With this interface a 32bit server cannot support
1780 * non-compat (i.e. ones coming from 64bit apps) ioctl
1786 for (i
= 0; i
< count
; i
++) {
1787 dst
[i
].iov_base
= compat_ptr(ciov
[i
].iov_base
);
1788 dst
[i
].iov_len
= ciov
[i
].iov_len
;
1794 if (count
* sizeof(struct iovec
) != transferred
)
1797 memcpy(dst
, src
, transferred
);
1801 /* Make sure iov_length() won't overflow */
1802 static int fuse_verify_ioctl_iov(struct iovec
*iov
, size_t count
)
1805 u32 max
= FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
;
1807 for (n
= 0; n
< count
; n
++, iov
++) {
1808 if (iov
->iov_len
> (size_t) max
)
1810 max
-= iov
->iov_len
;
1815 static int fuse_copy_ioctl_iovec(struct fuse_conn
*fc
, struct iovec
*dst
,
1816 void *src
, size_t transferred
, unsigned count
,
1820 struct fuse_ioctl_iovec
*fiov
= src
;
1822 if (fc
->minor
< 16) {
1823 return fuse_copy_ioctl_iovec_old(dst
, src
, transferred
,
1827 if (count
* sizeof(struct fuse_ioctl_iovec
) != transferred
)
1830 for (i
= 0; i
< count
; i
++) {
1831 /* Did the server supply an inappropriate value? */
1832 if (fiov
[i
].base
!= (unsigned long) fiov
[i
].base
||
1833 fiov
[i
].len
!= (unsigned long) fiov
[i
].len
)
1836 dst
[i
].iov_base
= (void __user
*) (unsigned long) fiov
[i
].base
;
1837 dst
[i
].iov_len
= (size_t) fiov
[i
].len
;
1839 #ifdef CONFIG_COMPAT
1841 (ptr_to_compat(dst
[i
].iov_base
) != fiov
[i
].base
||
1842 (compat_size_t
) dst
[i
].iov_len
!= fiov
[i
].len
))
1852 * For ioctls, there is no generic way to determine how much memory
1853 * needs to be read and/or written. Furthermore, ioctls are allowed
1854 * to dereference the passed pointer, so the parameter requires deep
1855 * copying but FUSE has no idea whatsoever about what to copy in or
1858 * This is solved by allowing FUSE server to retry ioctl with
1859 * necessary in/out iovecs. Let's assume the ioctl implementation
1860 * needs to read in the following structure.
1867 * On the first callout to FUSE server, inarg->in_size and
1868 * inarg->out_size will be NULL; then, the server completes the ioctl
1869 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
1870 * the actual iov array to
1872 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
1874 * which tells FUSE to copy in the requested area and retry the ioctl.
1875 * On the second round, the server has access to the structure and
1876 * from that it can tell what to look for next, so on the invocation,
1877 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
1879 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
1880 * { .iov_base = a.buf, .iov_len = a.buflen } }
1882 * FUSE will copy both struct a and the pointed buffer from the
1883 * process doing the ioctl and retry ioctl with both struct a and the
1886 * This time, FUSE server has everything it needs and completes ioctl
1887 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
1889 * Copying data out works the same way.
1891 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
1892 * automatically initializes in and out iovs by decoding @cmd with
1893 * _IOC_* macros and the server is not allowed to request RETRY. This
1894 * limits ioctl data transfers to well-formed ioctls and is the forced
1895 * behavior for all FUSE servers.
1897 long fuse_do_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
,
1900 struct fuse_file
*ff
= file
->private_data
;
1901 struct fuse_conn
*fc
= ff
->fc
;
1902 struct fuse_ioctl_in inarg
= {
1908 struct fuse_ioctl_out outarg
;
1909 struct fuse_req
*req
= NULL
;
1910 struct page
**pages
= NULL
;
1911 struct iovec
*iov_page
= NULL
;
1912 struct iovec
*in_iov
= NULL
, *out_iov
= NULL
;
1913 unsigned int in_iovs
= 0, out_iovs
= 0, num_pages
= 0, max_pages
;
1914 size_t in_size
, out_size
, transferred
;
1917 #if BITS_PER_LONG == 32
1918 inarg
.flags
|= FUSE_IOCTL_32BIT
;
1920 if (flags
& FUSE_IOCTL_COMPAT
)
1921 inarg
.flags
|= FUSE_IOCTL_32BIT
;
1924 /* assume all the iovs returned by client always fits in a page */
1925 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec
) * FUSE_IOCTL_MAX_IOV
> PAGE_SIZE
);
1928 pages
= kcalloc(FUSE_MAX_PAGES_PER_REQ
, sizeof(pages
[0]), GFP_KERNEL
);
1929 iov_page
= (struct iovec
*) __get_free_page(GFP_KERNEL
);
1930 if (!pages
|| !iov_page
)
1934 * If restricted, initialize IO parameters as encoded in @cmd.
1935 * RETRY from server is not allowed.
1937 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
)) {
1938 struct iovec
*iov
= iov_page
;
1940 iov
->iov_base
= (void __user
*)arg
;
1941 iov
->iov_len
= _IOC_SIZE(cmd
);
1943 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
1948 if (_IOC_DIR(cmd
) & _IOC_READ
) {
1955 inarg
.in_size
= in_size
= iov_length(in_iov
, in_iovs
);
1956 inarg
.out_size
= out_size
= iov_length(out_iov
, out_iovs
);
1959 * Out data can be used either for actual out data or iovs,
1960 * make sure there always is at least one page.
1962 out_size
= max_t(size_t, out_size
, PAGE_SIZE
);
1963 max_pages
= DIV_ROUND_UP(max(in_size
, out_size
), PAGE_SIZE
);
1965 /* make sure there are enough buffer pages and init request with them */
1967 if (max_pages
> FUSE_MAX_PAGES_PER_REQ
)
1969 while (num_pages
< max_pages
) {
1970 pages
[num_pages
] = alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
1971 if (!pages
[num_pages
])
1976 req
= fuse_get_req(fc
, num_pages
);
1982 memcpy(req
->pages
, pages
, sizeof(req
->pages
[0]) * num_pages
);
1983 req
->num_pages
= num_pages
;
1984 fuse_page_descs_length_init(req
, 0, req
->num_pages
);
1986 /* okay, let's send it to the client */
1987 req
->in
.h
.opcode
= FUSE_IOCTL
;
1988 req
->in
.h
.nodeid
= ff
->nodeid
;
1989 req
->in
.numargs
= 1;
1990 req
->in
.args
[0].size
= sizeof(inarg
);
1991 req
->in
.args
[0].value
= &inarg
;
1994 req
->in
.args
[1].size
= in_size
;
1995 req
->in
.argpages
= 1;
1997 err
= fuse_ioctl_copy_user(pages
, in_iov
, in_iovs
, in_size
,
2003 req
->out
.numargs
= 2;
2004 req
->out
.args
[0].size
= sizeof(outarg
);
2005 req
->out
.args
[0].value
= &outarg
;
2006 req
->out
.args
[1].size
= out_size
;
2007 req
->out
.argpages
= 1;
2008 req
->out
.argvar
= 1;
2010 fuse_request_send(fc
, req
);
2011 err
= req
->out
.h
.error
;
2012 transferred
= req
->out
.args
[1].size
;
2013 fuse_put_request(fc
, req
);
2018 /* did it ask for retry? */
2019 if (outarg
.flags
& FUSE_IOCTL_RETRY
) {
2022 /* no retry if in restricted mode */
2024 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
))
2027 in_iovs
= outarg
.in_iovs
;
2028 out_iovs
= outarg
.out_iovs
;
2031 * Make sure things are in boundary, separate checks
2032 * are to protect against overflow.
2035 if (in_iovs
> FUSE_IOCTL_MAX_IOV
||
2036 out_iovs
> FUSE_IOCTL_MAX_IOV
||
2037 in_iovs
+ out_iovs
> FUSE_IOCTL_MAX_IOV
)
2040 vaddr
= kmap_atomic(pages
[0]);
2041 err
= fuse_copy_ioctl_iovec(fc
, iov_page
, vaddr
,
2042 transferred
, in_iovs
+ out_iovs
,
2043 (flags
& FUSE_IOCTL_COMPAT
) != 0);
2044 kunmap_atomic(vaddr
);
2049 out_iov
= in_iov
+ in_iovs
;
2051 err
= fuse_verify_ioctl_iov(in_iov
, in_iovs
);
2055 err
= fuse_verify_ioctl_iov(out_iov
, out_iovs
);
2063 if (transferred
> inarg
.out_size
)
2066 err
= fuse_ioctl_copy_user(pages
, out_iov
, out_iovs
, transferred
, true);
2069 fuse_put_request(fc
, req
);
2070 free_page((unsigned long) iov_page
);
2072 __free_page(pages
[--num_pages
]);
2075 return err
? err
: outarg
.result
;
2077 EXPORT_SYMBOL_GPL(fuse_do_ioctl
);
2079 long fuse_ioctl_common(struct file
*file
, unsigned int cmd
,
2080 unsigned long arg
, unsigned int flags
)
2082 struct inode
*inode
= file
->f_dentry
->d_inode
;
2083 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2085 if (!fuse_allow_current_process(fc
))
2088 if (is_bad_inode(inode
))
2091 return fuse_do_ioctl(file
, cmd
, arg
, flags
);
2094 static long fuse_file_ioctl(struct file
*file
, unsigned int cmd
,
2097 return fuse_ioctl_common(file
, cmd
, arg
, 0);
2100 static long fuse_file_compat_ioctl(struct file
*file
, unsigned int cmd
,
2103 return fuse_ioctl_common(file
, cmd
, arg
, FUSE_IOCTL_COMPAT
);
2107 * All files which have been polled are linked to RB tree
2108 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2109 * find the matching one.
2111 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2112 struct rb_node
**parent_out
)
2114 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2115 struct rb_node
*last
= NULL
;
2118 struct fuse_file
*ff
;
2121 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2124 link
= &last
->rb_left
;
2125 else if (kh
> ff
->kh
)
2126 link
= &last
->rb_right
;
2137 * The file is about to be polled. Make sure it's on the polled_files
2138 * RB tree. Note that files once added to the polled_files tree are
2139 * not removed before the file is released. This is because a file
2140 * polled once is likely to be polled again.
2142 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2143 struct fuse_file
*ff
)
2145 spin_lock(&fc
->lock
);
2146 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2147 struct rb_node
**link
, *parent
;
2149 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2151 rb_link_node(&ff
->polled_node
, parent
, link
);
2152 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2154 spin_unlock(&fc
->lock
);
2157 unsigned fuse_file_poll(struct file
*file
, poll_table
*wait
)
2159 struct fuse_file
*ff
= file
->private_data
;
2160 struct fuse_conn
*fc
= ff
->fc
;
2161 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2162 struct fuse_poll_out outarg
;
2163 struct fuse_req
*req
;
2167 return DEFAULT_POLLMASK
;
2169 poll_wait(file
, &ff
->poll_wait
, wait
);
2170 inarg
.events
= (__u32
)poll_requested_events(wait
);
2173 * Ask for notification iff there's someone waiting for it.
2174 * The client may ignore the flag and always notify.
2176 if (waitqueue_active(&ff
->poll_wait
)) {
2177 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2178 fuse_register_polled_file(fc
, ff
);
2181 req
= fuse_get_req_nopages(fc
);
2185 req
->in
.h
.opcode
= FUSE_POLL
;
2186 req
->in
.h
.nodeid
= ff
->nodeid
;
2187 req
->in
.numargs
= 1;
2188 req
->in
.args
[0].size
= sizeof(inarg
);
2189 req
->in
.args
[0].value
= &inarg
;
2190 req
->out
.numargs
= 1;
2191 req
->out
.args
[0].size
= sizeof(outarg
);
2192 req
->out
.args
[0].value
= &outarg
;
2193 fuse_request_send(fc
, req
);
2194 err
= req
->out
.h
.error
;
2195 fuse_put_request(fc
, req
);
2198 return outarg
.revents
;
2199 if (err
== -ENOSYS
) {
2201 return DEFAULT_POLLMASK
;
2205 EXPORT_SYMBOL_GPL(fuse_file_poll
);
2208 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2209 * wakes up the poll waiters.
2211 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
2212 struct fuse_notify_poll_wakeup_out
*outarg
)
2214 u64 kh
= outarg
->kh
;
2215 struct rb_node
**link
;
2217 spin_lock(&fc
->lock
);
2219 link
= fuse_find_polled_node(fc
, kh
, NULL
);
2221 struct fuse_file
*ff
;
2223 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
2224 wake_up_interruptible_sync(&ff
->poll_wait
);
2227 spin_unlock(&fc
->lock
);
2232 fuse_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
2233 loff_t offset
, unsigned long nr_segs
)
2236 struct file
*file
= NULL
;
2239 file
= iocb
->ki_filp
;
2243 ret
= __fuse_direct_write(file
, iov
, nr_segs
, &pos
);
2245 ret
= __fuse_direct_read(file
, iov
, nr_segs
, &pos
);
2250 static long fuse_file_fallocate(struct file
*file
, int mode
, loff_t offset
,
2253 struct fuse_file
*ff
= file
->private_data
;
2254 struct fuse_conn
*fc
= ff
->fc
;
2255 struct fuse_req
*req
;
2256 struct fuse_fallocate_in inarg
= {
2264 if (fc
->no_fallocate
)
2267 req
= fuse_get_req_nopages(fc
);
2269 return PTR_ERR(req
);
2271 req
->in
.h
.opcode
= FUSE_FALLOCATE
;
2272 req
->in
.h
.nodeid
= ff
->nodeid
;
2273 req
->in
.numargs
= 1;
2274 req
->in
.args
[0].size
= sizeof(inarg
);
2275 req
->in
.args
[0].value
= &inarg
;
2276 fuse_request_send(fc
, req
);
2277 err
= req
->out
.h
.error
;
2278 if (err
== -ENOSYS
) {
2279 fc
->no_fallocate
= 1;
2282 fuse_put_request(fc
, req
);
2287 static const struct file_operations fuse_file_operations
= {
2288 .llseek
= fuse_file_llseek
,
2289 .read
= do_sync_read
,
2290 .aio_read
= fuse_file_aio_read
,
2291 .write
= do_sync_write
,
2292 .aio_write
= fuse_file_aio_write
,
2293 .mmap
= fuse_file_mmap
,
2295 .flush
= fuse_flush
,
2296 .release
= fuse_release
,
2297 .fsync
= fuse_fsync
,
2298 .lock
= fuse_file_lock
,
2299 .flock
= fuse_file_flock
,
2300 .splice_read
= generic_file_splice_read
,
2301 .unlocked_ioctl
= fuse_file_ioctl
,
2302 .compat_ioctl
= fuse_file_compat_ioctl
,
2303 .poll
= fuse_file_poll
,
2304 .fallocate
= fuse_file_fallocate
,
2307 static const struct file_operations fuse_direct_io_file_operations
= {
2308 .llseek
= fuse_file_llseek
,
2309 .read
= fuse_direct_read
,
2310 .write
= fuse_direct_write
,
2311 .mmap
= fuse_direct_mmap
,
2313 .flush
= fuse_flush
,
2314 .release
= fuse_release
,
2315 .fsync
= fuse_fsync
,
2316 .lock
= fuse_file_lock
,
2317 .flock
= fuse_file_flock
,
2318 .unlocked_ioctl
= fuse_file_ioctl
,
2319 .compat_ioctl
= fuse_file_compat_ioctl
,
2320 .poll
= fuse_file_poll
,
2321 .fallocate
= fuse_file_fallocate
,
2322 /* no splice_read */
2325 static const struct address_space_operations fuse_file_aops
= {
2326 .readpage
= fuse_readpage
,
2327 .writepage
= fuse_writepage
,
2328 .launder_page
= fuse_launder_page
,
2329 .readpages
= fuse_readpages
,
2330 .set_page_dirty
= __set_page_dirty_nobuffers
,
2332 .direct_IO
= fuse_direct_IO
,
2335 void fuse_init_file_inode(struct inode
*inode
)
2337 inode
->i_fop
= &fuse_file_operations
;
2338 inode
->i_data
.a_ops
= &fuse_file_aops
;