mm: page_mkwrite change prototype to match fault
[deliverable/linux.git] / fs / fuse / file.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15
16 static const struct file_operations fuse_direct_io_file_operations;
17
18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
19 struct fuse_open_out *outargp)
20 {
21 struct fuse_conn *fc = get_fuse_conn(inode);
22 struct fuse_open_in inarg;
23 struct fuse_req *req;
24 int err;
25
26 req = fuse_get_req(fc);
27 if (IS_ERR(req))
28 return PTR_ERR(req);
29
30 memset(&inarg, 0, sizeof(inarg));
31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32 if (!fc->atomic_o_trunc)
33 inarg.flags &= ~O_TRUNC;
34 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
35 req->in.h.nodeid = get_node_id(inode);
36 req->in.numargs = 1;
37 req->in.args[0].size = sizeof(inarg);
38 req->in.args[0].value = &inarg;
39 req->out.numargs = 1;
40 req->out.args[0].size = sizeof(*outargp);
41 req->out.args[0].value = outargp;
42 fuse_request_send(fc, req);
43 err = req->out.h.error;
44 fuse_put_request(fc, req);
45
46 return err;
47 }
48
49 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
50 {
51 struct fuse_file *ff;
52 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
53 if (ff) {
54 ff->reserved_req = fuse_request_alloc();
55 if (!ff->reserved_req) {
56 kfree(ff);
57 return NULL;
58 } else {
59 INIT_LIST_HEAD(&ff->write_entry);
60 atomic_set(&ff->count, 0);
61 spin_lock(&fc->lock);
62 ff->kh = ++fc->khctr;
63 spin_unlock(&fc->lock);
64 }
65 RB_CLEAR_NODE(&ff->polled_node);
66 init_waitqueue_head(&ff->poll_wait);
67 }
68 return ff;
69 }
70
71 void fuse_file_free(struct fuse_file *ff)
72 {
73 fuse_request_free(ff->reserved_req);
74 kfree(ff);
75 }
76
77 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
78 {
79 atomic_inc(&ff->count);
80 return ff;
81 }
82
83 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
84 {
85 dput(req->misc.release.dentry);
86 mntput(req->misc.release.vfsmount);
87 }
88
89 static void fuse_file_put(struct fuse_file *ff)
90 {
91 if (atomic_dec_and_test(&ff->count)) {
92 struct fuse_req *req = ff->reserved_req;
93 struct inode *inode = req->misc.release.dentry->d_inode;
94 struct fuse_conn *fc = get_fuse_conn(inode);
95 req->end = fuse_release_end;
96 fuse_request_send_background(fc, req);
97 kfree(ff);
98 }
99 }
100
101 void fuse_finish_open(struct inode *inode, struct file *file,
102 struct fuse_file *ff, struct fuse_open_out *outarg)
103 {
104 if (outarg->open_flags & FOPEN_DIRECT_IO)
105 file->f_op = &fuse_direct_io_file_operations;
106 if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
107 invalidate_inode_pages2(inode->i_mapping);
108 if (outarg->open_flags & FOPEN_NONSEEKABLE)
109 nonseekable_open(inode, file);
110 ff->fh = outarg->fh;
111 file->private_data = fuse_file_get(ff);
112 }
113
114 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
115 {
116 struct fuse_conn *fc = get_fuse_conn(inode);
117 struct fuse_open_out outarg;
118 struct fuse_file *ff;
119 int err;
120
121 /* VFS checks this, but only _after_ ->open() */
122 if (file->f_flags & O_DIRECT)
123 return -EINVAL;
124
125 err = generic_file_open(inode, file);
126 if (err)
127 return err;
128
129 ff = fuse_file_alloc(fc);
130 if (!ff)
131 return -ENOMEM;
132
133 err = fuse_send_open(inode, file, isdir, &outarg);
134 if (err)
135 fuse_file_free(ff);
136 else {
137 if (isdir)
138 outarg.open_flags &= ~FOPEN_DIRECT_IO;
139 fuse_finish_open(inode, file, ff, &outarg);
140 }
141
142 return err;
143 }
144
145 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
146 {
147 struct fuse_req *req = ff->reserved_req;
148 struct fuse_release_in *inarg = &req->misc.release.in;
149
150 inarg->fh = ff->fh;
151 inarg->flags = flags;
152 req->in.h.opcode = opcode;
153 req->in.h.nodeid = nodeid;
154 req->in.numargs = 1;
155 req->in.args[0].size = sizeof(struct fuse_release_in);
156 req->in.args[0].value = inarg;
157 }
158
159 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
160 {
161 struct fuse_file *ff = file->private_data;
162 if (ff) {
163 struct fuse_conn *fc = get_fuse_conn(inode);
164 struct fuse_req *req = ff->reserved_req;
165
166 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
167 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
168
169 /* Hold vfsmount and dentry until release is finished */
170 req->misc.release.vfsmount = mntget(file->f_path.mnt);
171 req->misc.release.dentry = dget(file->f_path.dentry);
172
173 spin_lock(&fc->lock);
174 list_del(&ff->write_entry);
175 if (!RB_EMPTY_NODE(&ff->polled_node))
176 rb_erase(&ff->polled_node, &fc->polled_files);
177 spin_unlock(&fc->lock);
178
179 wake_up_interruptible_sync(&ff->poll_wait);
180 /*
181 * Normally this will send the RELEASE request,
182 * however if some asynchronous READ or WRITE requests
183 * are outstanding, the sending will be delayed
184 */
185 fuse_file_put(ff);
186 }
187
188 /* Return value is ignored by VFS */
189 return 0;
190 }
191
192 static int fuse_open(struct inode *inode, struct file *file)
193 {
194 return fuse_open_common(inode, file, 0);
195 }
196
197 static int fuse_release(struct inode *inode, struct file *file)
198 {
199 return fuse_release_common(inode, file, 0);
200 }
201
202 /*
203 * Scramble the ID space with XTEA, so that the value of the files_struct
204 * pointer is not exposed to userspace.
205 */
206 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
207 {
208 u32 *k = fc->scramble_key;
209 u64 v = (unsigned long) id;
210 u32 v0 = v;
211 u32 v1 = v >> 32;
212 u32 sum = 0;
213 int i;
214
215 for (i = 0; i < 32; i++) {
216 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
217 sum += 0x9E3779B9;
218 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
219 }
220
221 return (u64) v0 + ((u64) v1 << 32);
222 }
223
224 /*
225 * Check if page is under writeback
226 *
227 * This is currently done by walking the list of writepage requests
228 * for the inode, which can be pretty inefficient.
229 */
230 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
231 {
232 struct fuse_conn *fc = get_fuse_conn(inode);
233 struct fuse_inode *fi = get_fuse_inode(inode);
234 struct fuse_req *req;
235 bool found = false;
236
237 spin_lock(&fc->lock);
238 list_for_each_entry(req, &fi->writepages, writepages_entry) {
239 pgoff_t curr_index;
240
241 BUG_ON(req->inode != inode);
242 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
243 if (curr_index == index) {
244 found = true;
245 break;
246 }
247 }
248 spin_unlock(&fc->lock);
249
250 return found;
251 }
252
253 /*
254 * Wait for page writeback to be completed.
255 *
256 * Since fuse doesn't rely on the VM writeback tracking, this has to
257 * use some other means.
258 */
259 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
260 {
261 struct fuse_inode *fi = get_fuse_inode(inode);
262
263 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
264 return 0;
265 }
266
267 static int fuse_flush(struct file *file, fl_owner_t id)
268 {
269 struct inode *inode = file->f_path.dentry->d_inode;
270 struct fuse_conn *fc = get_fuse_conn(inode);
271 struct fuse_file *ff = file->private_data;
272 struct fuse_req *req;
273 struct fuse_flush_in inarg;
274 int err;
275
276 if (is_bad_inode(inode))
277 return -EIO;
278
279 if (fc->no_flush)
280 return 0;
281
282 req = fuse_get_req_nofail(fc, file);
283 memset(&inarg, 0, sizeof(inarg));
284 inarg.fh = ff->fh;
285 inarg.lock_owner = fuse_lock_owner_id(fc, id);
286 req->in.h.opcode = FUSE_FLUSH;
287 req->in.h.nodeid = get_node_id(inode);
288 req->in.numargs = 1;
289 req->in.args[0].size = sizeof(inarg);
290 req->in.args[0].value = &inarg;
291 req->force = 1;
292 fuse_request_send(fc, req);
293 err = req->out.h.error;
294 fuse_put_request(fc, req);
295 if (err == -ENOSYS) {
296 fc->no_flush = 1;
297 err = 0;
298 }
299 return err;
300 }
301
302 /*
303 * Wait for all pending writepages on the inode to finish.
304 *
305 * This is currently done by blocking further writes with FUSE_NOWRITE
306 * and waiting for all sent writes to complete.
307 *
308 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
309 * could conflict with truncation.
310 */
311 static void fuse_sync_writes(struct inode *inode)
312 {
313 fuse_set_nowrite(inode);
314 fuse_release_nowrite(inode);
315 }
316
317 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
318 int isdir)
319 {
320 struct inode *inode = de->d_inode;
321 struct fuse_conn *fc = get_fuse_conn(inode);
322 struct fuse_file *ff = file->private_data;
323 struct fuse_req *req;
324 struct fuse_fsync_in inarg;
325 int err;
326
327 if (is_bad_inode(inode))
328 return -EIO;
329
330 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
331 return 0;
332
333 /*
334 * Start writeback against all dirty pages of the inode, then
335 * wait for all outstanding writes, before sending the FSYNC
336 * request.
337 */
338 err = write_inode_now(inode, 0);
339 if (err)
340 return err;
341
342 fuse_sync_writes(inode);
343
344 req = fuse_get_req(fc);
345 if (IS_ERR(req))
346 return PTR_ERR(req);
347
348 memset(&inarg, 0, sizeof(inarg));
349 inarg.fh = ff->fh;
350 inarg.fsync_flags = datasync ? 1 : 0;
351 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
352 req->in.h.nodeid = get_node_id(inode);
353 req->in.numargs = 1;
354 req->in.args[0].size = sizeof(inarg);
355 req->in.args[0].value = &inarg;
356 fuse_request_send(fc, req);
357 err = req->out.h.error;
358 fuse_put_request(fc, req);
359 if (err == -ENOSYS) {
360 if (isdir)
361 fc->no_fsyncdir = 1;
362 else
363 fc->no_fsync = 1;
364 err = 0;
365 }
366 return err;
367 }
368
369 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
370 {
371 return fuse_fsync_common(file, de, datasync, 0);
372 }
373
374 void fuse_read_fill(struct fuse_req *req, struct file *file,
375 struct inode *inode, loff_t pos, size_t count, int opcode)
376 {
377 struct fuse_read_in *inarg = &req->misc.read.in;
378 struct fuse_file *ff = file->private_data;
379
380 inarg->fh = ff->fh;
381 inarg->offset = pos;
382 inarg->size = count;
383 inarg->flags = file->f_flags;
384 req->in.h.opcode = opcode;
385 req->in.h.nodeid = get_node_id(inode);
386 req->in.numargs = 1;
387 req->in.args[0].size = sizeof(struct fuse_read_in);
388 req->in.args[0].value = inarg;
389 req->out.argpages = 1;
390 req->out.argvar = 1;
391 req->out.numargs = 1;
392 req->out.args[0].size = count;
393 }
394
395 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
396 struct inode *inode, loff_t pos, size_t count,
397 fl_owner_t owner)
398 {
399 struct fuse_conn *fc = get_fuse_conn(inode);
400
401 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
402 if (owner != NULL) {
403 struct fuse_read_in *inarg = &req->misc.read.in;
404
405 inarg->read_flags |= FUSE_READ_LOCKOWNER;
406 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
407 }
408 fuse_request_send(fc, req);
409 return req->out.args[0].size;
410 }
411
412 static void fuse_read_update_size(struct inode *inode, loff_t size,
413 u64 attr_ver)
414 {
415 struct fuse_conn *fc = get_fuse_conn(inode);
416 struct fuse_inode *fi = get_fuse_inode(inode);
417
418 spin_lock(&fc->lock);
419 if (attr_ver == fi->attr_version && size < inode->i_size) {
420 fi->attr_version = ++fc->attr_version;
421 i_size_write(inode, size);
422 }
423 spin_unlock(&fc->lock);
424 }
425
426 static int fuse_readpage(struct file *file, struct page *page)
427 {
428 struct inode *inode = page->mapping->host;
429 struct fuse_conn *fc = get_fuse_conn(inode);
430 struct fuse_req *req;
431 size_t num_read;
432 loff_t pos = page_offset(page);
433 size_t count = PAGE_CACHE_SIZE;
434 u64 attr_ver;
435 int err;
436
437 err = -EIO;
438 if (is_bad_inode(inode))
439 goto out;
440
441 /*
442 * Page writeback can extend beyond the liftime of the
443 * page-cache page, so make sure we read a properly synced
444 * page.
445 */
446 fuse_wait_on_page_writeback(inode, page->index);
447
448 req = fuse_get_req(fc);
449 err = PTR_ERR(req);
450 if (IS_ERR(req))
451 goto out;
452
453 attr_ver = fuse_get_attr_version(fc);
454
455 req->out.page_zeroing = 1;
456 req->num_pages = 1;
457 req->pages[0] = page;
458 num_read = fuse_send_read(req, file, inode, pos, count, NULL);
459 err = req->out.h.error;
460 fuse_put_request(fc, req);
461
462 if (!err) {
463 /*
464 * Short read means EOF. If file size is larger, truncate it
465 */
466 if (num_read < count)
467 fuse_read_update_size(inode, pos + num_read, attr_ver);
468
469 SetPageUptodate(page);
470 }
471
472 fuse_invalidate_attr(inode); /* atime changed */
473 out:
474 unlock_page(page);
475 return err;
476 }
477
478 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
479 {
480 int i;
481 size_t count = req->misc.read.in.size;
482 size_t num_read = req->out.args[0].size;
483 struct inode *inode = req->pages[0]->mapping->host;
484
485 /*
486 * Short read means EOF. If file size is larger, truncate it
487 */
488 if (!req->out.h.error && num_read < count) {
489 loff_t pos = page_offset(req->pages[0]) + num_read;
490 fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
491 }
492
493 fuse_invalidate_attr(inode); /* atime changed */
494
495 for (i = 0; i < req->num_pages; i++) {
496 struct page *page = req->pages[i];
497 if (!req->out.h.error)
498 SetPageUptodate(page);
499 else
500 SetPageError(page);
501 unlock_page(page);
502 }
503 if (req->ff)
504 fuse_file_put(req->ff);
505 }
506
507 static void fuse_send_readpages(struct fuse_req *req, struct file *file,
508 struct inode *inode)
509 {
510 struct fuse_conn *fc = get_fuse_conn(inode);
511 loff_t pos = page_offset(req->pages[0]);
512 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
513 req->out.page_zeroing = 1;
514 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
515 req->misc.read.attr_ver = fuse_get_attr_version(fc);
516 if (fc->async_read) {
517 struct fuse_file *ff = file->private_data;
518 req->ff = fuse_file_get(ff);
519 req->end = fuse_readpages_end;
520 fuse_request_send_background(fc, req);
521 } else {
522 fuse_request_send(fc, req);
523 fuse_readpages_end(fc, req);
524 fuse_put_request(fc, req);
525 }
526 }
527
528 struct fuse_fill_data {
529 struct fuse_req *req;
530 struct file *file;
531 struct inode *inode;
532 };
533
534 static int fuse_readpages_fill(void *_data, struct page *page)
535 {
536 struct fuse_fill_data *data = _data;
537 struct fuse_req *req = data->req;
538 struct inode *inode = data->inode;
539 struct fuse_conn *fc = get_fuse_conn(inode);
540
541 fuse_wait_on_page_writeback(inode, page->index);
542
543 if (req->num_pages &&
544 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
545 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
546 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
547 fuse_send_readpages(req, data->file, inode);
548 data->req = req = fuse_get_req(fc);
549 if (IS_ERR(req)) {
550 unlock_page(page);
551 return PTR_ERR(req);
552 }
553 }
554 req->pages[req->num_pages] = page;
555 req->num_pages++;
556 return 0;
557 }
558
559 static int fuse_readpages(struct file *file, struct address_space *mapping,
560 struct list_head *pages, unsigned nr_pages)
561 {
562 struct inode *inode = mapping->host;
563 struct fuse_conn *fc = get_fuse_conn(inode);
564 struct fuse_fill_data data;
565 int err;
566
567 err = -EIO;
568 if (is_bad_inode(inode))
569 goto out;
570
571 data.file = file;
572 data.inode = inode;
573 data.req = fuse_get_req(fc);
574 err = PTR_ERR(data.req);
575 if (IS_ERR(data.req))
576 goto out;
577
578 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
579 if (!err) {
580 if (data.req->num_pages)
581 fuse_send_readpages(data.req, file, inode);
582 else
583 fuse_put_request(fc, data.req);
584 }
585 out:
586 return err;
587 }
588
589 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
590 unsigned long nr_segs, loff_t pos)
591 {
592 struct inode *inode = iocb->ki_filp->f_mapping->host;
593
594 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
595 int err;
596 /*
597 * If trying to read past EOF, make sure the i_size
598 * attribute is up-to-date.
599 */
600 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
601 if (err)
602 return err;
603 }
604
605 return generic_file_aio_read(iocb, iov, nr_segs, pos);
606 }
607
608 static void fuse_write_fill(struct fuse_req *req, struct file *file,
609 struct fuse_file *ff, struct inode *inode,
610 loff_t pos, size_t count, int writepage)
611 {
612 struct fuse_conn *fc = get_fuse_conn(inode);
613 struct fuse_write_in *inarg = &req->misc.write.in;
614 struct fuse_write_out *outarg = &req->misc.write.out;
615
616 memset(inarg, 0, sizeof(struct fuse_write_in));
617 inarg->fh = ff->fh;
618 inarg->offset = pos;
619 inarg->size = count;
620 inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
621 inarg->flags = file ? file->f_flags : 0;
622 req->in.h.opcode = FUSE_WRITE;
623 req->in.h.nodeid = get_node_id(inode);
624 req->in.argpages = 1;
625 req->in.numargs = 2;
626 if (fc->minor < 9)
627 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
628 else
629 req->in.args[0].size = sizeof(struct fuse_write_in);
630 req->in.args[0].value = inarg;
631 req->in.args[1].size = count;
632 req->out.numargs = 1;
633 req->out.args[0].size = sizeof(struct fuse_write_out);
634 req->out.args[0].value = outarg;
635 }
636
637 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
638 struct inode *inode, loff_t pos, size_t count,
639 fl_owner_t owner)
640 {
641 struct fuse_conn *fc = get_fuse_conn(inode);
642 fuse_write_fill(req, file, file->private_data, inode, pos, count, 0);
643 if (owner != NULL) {
644 struct fuse_write_in *inarg = &req->misc.write.in;
645 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
646 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
647 }
648 fuse_request_send(fc, req);
649 return req->misc.write.out.size;
650 }
651
652 static int fuse_write_begin(struct file *file, struct address_space *mapping,
653 loff_t pos, unsigned len, unsigned flags,
654 struct page **pagep, void **fsdata)
655 {
656 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
657
658 *pagep = grab_cache_page_write_begin(mapping, index, flags);
659 if (!*pagep)
660 return -ENOMEM;
661 return 0;
662 }
663
664 static void fuse_write_update_size(struct inode *inode, loff_t pos)
665 {
666 struct fuse_conn *fc = get_fuse_conn(inode);
667 struct fuse_inode *fi = get_fuse_inode(inode);
668
669 spin_lock(&fc->lock);
670 fi->attr_version = ++fc->attr_version;
671 if (pos > inode->i_size)
672 i_size_write(inode, pos);
673 spin_unlock(&fc->lock);
674 }
675
676 static int fuse_buffered_write(struct file *file, struct inode *inode,
677 loff_t pos, unsigned count, struct page *page)
678 {
679 int err;
680 size_t nres;
681 struct fuse_conn *fc = get_fuse_conn(inode);
682 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
683 struct fuse_req *req;
684
685 if (is_bad_inode(inode))
686 return -EIO;
687
688 /*
689 * Make sure writepages on the same page are not mixed up with
690 * plain writes.
691 */
692 fuse_wait_on_page_writeback(inode, page->index);
693
694 req = fuse_get_req(fc);
695 if (IS_ERR(req))
696 return PTR_ERR(req);
697
698 req->num_pages = 1;
699 req->pages[0] = page;
700 req->page_offset = offset;
701 nres = fuse_send_write(req, file, inode, pos, count, NULL);
702 err = req->out.h.error;
703 fuse_put_request(fc, req);
704 if (!err && !nres)
705 err = -EIO;
706 if (!err) {
707 pos += nres;
708 fuse_write_update_size(inode, pos);
709 if (count == PAGE_CACHE_SIZE)
710 SetPageUptodate(page);
711 }
712 fuse_invalidate_attr(inode);
713 return err ? err : nres;
714 }
715
716 static int fuse_write_end(struct file *file, struct address_space *mapping,
717 loff_t pos, unsigned len, unsigned copied,
718 struct page *page, void *fsdata)
719 {
720 struct inode *inode = mapping->host;
721 int res = 0;
722
723 if (copied)
724 res = fuse_buffered_write(file, inode, pos, copied, page);
725
726 unlock_page(page);
727 page_cache_release(page);
728 return res;
729 }
730
731 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
732 struct inode *inode, loff_t pos,
733 size_t count)
734 {
735 size_t res;
736 unsigned offset;
737 unsigned i;
738
739 for (i = 0; i < req->num_pages; i++)
740 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
741
742 res = fuse_send_write(req, file, inode, pos, count, NULL);
743
744 offset = req->page_offset;
745 count = res;
746 for (i = 0; i < req->num_pages; i++) {
747 struct page *page = req->pages[i];
748
749 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
750 SetPageUptodate(page);
751
752 if (count > PAGE_CACHE_SIZE - offset)
753 count -= PAGE_CACHE_SIZE - offset;
754 else
755 count = 0;
756 offset = 0;
757
758 unlock_page(page);
759 page_cache_release(page);
760 }
761
762 return res;
763 }
764
765 static ssize_t fuse_fill_write_pages(struct fuse_req *req,
766 struct address_space *mapping,
767 struct iov_iter *ii, loff_t pos)
768 {
769 struct fuse_conn *fc = get_fuse_conn(mapping->host);
770 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
771 size_t count = 0;
772 int err;
773
774 req->page_offset = offset;
775
776 do {
777 size_t tmp;
778 struct page *page;
779 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
780 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
781 iov_iter_count(ii));
782
783 bytes = min_t(size_t, bytes, fc->max_write - count);
784
785 again:
786 err = -EFAULT;
787 if (iov_iter_fault_in_readable(ii, bytes))
788 break;
789
790 err = -ENOMEM;
791 page = grab_cache_page_write_begin(mapping, index, 0);
792 if (!page)
793 break;
794
795 pagefault_disable();
796 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
797 pagefault_enable();
798 flush_dcache_page(page);
799
800 if (!tmp) {
801 unlock_page(page);
802 page_cache_release(page);
803 bytes = min(bytes, iov_iter_single_seg_count(ii));
804 goto again;
805 }
806
807 err = 0;
808 req->pages[req->num_pages] = page;
809 req->num_pages++;
810
811 iov_iter_advance(ii, tmp);
812 count += tmp;
813 pos += tmp;
814 offset += tmp;
815 if (offset == PAGE_CACHE_SIZE)
816 offset = 0;
817
818 if (!fc->big_writes)
819 break;
820 } while (iov_iter_count(ii) && count < fc->max_write &&
821 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
822
823 return count > 0 ? count : err;
824 }
825
826 static ssize_t fuse_perform_write(struct file *file,
827 struct address_space *mapping,
828 struct iov_iter *ii, loff_t pos)
829 {
830 struct inode *inode = mapping->host;
831 struct fuse_conn *fc = get_fuse_conn(inode);
832 int err = 0;
833 ssize_t res = 0;
834
835 if (is_bad_inode(inode))
836 return -EIO;
837
838 do {
839 struct fuse_req *req;
840 ssize_t count;
841
842 req = fuse_get_req(fc);
843 if (IS_ERR(req)) {
844 err = PTR_ERR(req);
845 break;
846 }
847
848 count = fuse_fill_write_pages(req, mapping, ii, pos);
849 if (count <= 0) {
850 err = count;
851 } else {
852 size_t num_written;
853
854 num_written = fuse_send_write_pages(req, file, inode,
855 pos, count);
856 err = req->out.h.error;
857 if (!err) {
858 res += num_written;
859 pos += num_written;
860
861 /* break out of the loop on short write */
862 if (num_written != count)
863 err = -EIO;
864 }
865 }
866 fuse_put_request(fc, req);
867 } while (!err && iov_iter_count(ii));
868
869 if (res > 0)
870 fuse_write_update_size(inode, pos);
871
872 fuse_invalidate_attr(inode);
873
874 return res > 0 ? res : err;
875 }
876
877 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
878 unsigned long nr_segs, loff_t pos)
879 {
880 struct file *file = iocb->ki_filp;
881 struct address_space *mapping = file->f_mapping;
882 size_t count = 0;
883 ssize_t written = 0;
884 struct inode *inode = mapping->host;
885 ssize_t err;
886 struct iov_iter i;
887
888 WARN_ON(iocb->ki_pos != pos);
889
890 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
891 if (err)
892 return err;
893
894 mutex_lock(&inode->i_mutex);
895 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
896
897 /* We can write back this queue in page reclaim */
898 current->backing_dev_info = mapping->backing_dev_info;
899
900 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
901 if (err)
902 goto out;
903
904 if (count == 0)
905 goto out;
906
907 err = file_remove_suid(file);
908 if (err)
909 goto out;
910
911 file_update_time(file);
912
913 iov_iter_init(&i, iov, nr_segs, count, 0);
914 written = fuse_perform_write(file, mapping, &i, pos);
915 if (written >= 0)
916 iocb->ki_pos = pos + written;
917
918 out:
919 current->backing_dev_info = NULL;
920 mutex_unlock(&inode->i_mutex);
921
922 return written ? written : err;
923 }
924
925 static void fuse_release_user_pages(struct fuse_req *req, int write)
926 {
927 unsigned i;
928
929 for (i = 0; i < req->num_pages; i++) {
930 struct page *page = req->pages[i];
931 if (write)
932 set_page_dirty_lock(page);
933 put_page(page);
934 }
935 }
936
937 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
938 unsigned nbytes, int write)
939 {
940 unsigned long user_addr = (unsigned long) buf;
941 unsigned offset = user_addr & ~PAGE_MASK;
942 int npages;
943
944 /* This doesn't work with nfsd */
945 if (!current->mm)
946 return -EPERM;
947
948 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
949 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
950 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
951 down_read(&current->mm->mmap_sem);
952 npages = get_user_pages(current, current->mm, user_addr, npages, write,
953 0, req->pages, NULL);
954 up_read(&current->mm->mmap_sem);
955 if (npages < 0)
956 return npages;
957
958 req->num_pages = npages;
959 req->page_offset = offset;
960 return 0;
961 }
962
963 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
964 size_t count, loff_t *ppos, int write)
965 {
966 struct inode *inode = file->f_path.dentry->d_inode;
967 struct fuse_conn *fc = get_fuse_conn(inode);
968 size_t nmax = write ? fc->max_write : fc->max_read;
969 loff_t pos = *ppos;
970 ssize_t res = 0;
971 struct fuse_req *req;
972
973 if (is_bad_inode(inode))
974 return -EIO;
975
976 req = fuse_get_req(fc);
977 if (IS_ERR(req))
978 return PTR_ERR(req);
979
980 while (count) {
981 size_t nres;
982 size_t nbytes_limit = min(count, nmax);
983 size_t nbytes;
984 int err = fuse_get_user_pages(req, buf, nbytes_limit, !write);
985 if (err) {
986 res = err;
987 break;
988 }
989 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
990 nbytes = min(nbytes_limit, nbytes);
991 if (write)
992 nres = fuse_send_write(req, file, inode, pos, nbytes,
993 current->files);
994 else
995 nres = fuse_send_read(req, file, inode, pos, nbytes,
996 current->files);
997 fuse_release_user_pages(req, !write);
998 if (req->out.h.error) {
999 if (!res)
1000 res = req->out.h.error;
1001 break;
1002 } else if (nres > nbytes) {
1003 res = -EIO;
1004 break;
1005 }
1006 count -= nres;
1007 res += nres;
1008 pos += nres;
1009 buf += nres;
1010 if (nres != nbytes)
1011 break;
1012 if (count) {
1013 fuse_put_request(fc, req);
1014 req = fuse_get_req(fc);
1015 if (IS_ERR(req))
1016 break;
1017 }
1018 }
1019 fuse_put_request(fc, req);
1020 if (res > 0) {
1021 if (write)
1022 fuse_write_update_size(inode, pos);
1023 *ppos = pos;
1024 }
1025 fuse_invalidate_attr(inode);
1026
1027 return res;
1028 }
1029
1030 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1031 size_t count, loff_t *ppos)
1032 {
1033 return fuse_direct_io(file, buf, count, ppos, 0);
1034 }
1035
1036 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1037 size_t count, loff_t *ppos)
1038 {
1039 struct inode *inode = file->f_path.dentry->d_inode;
1040 ssize_t res;
1041 /* Don't allow parallel writes to the same file */
1042 mutex_lock(&inode->i_mutex);
1043 res = generic_write_checks(file, ppos, &count, 0);
1044 if (!res)
1045 res = fuse_direct_io(file, buf, count, ppos, 1);
1046 mutex_unlock(&inode->i_mutex);
1047 return res;
1048 }
1049
1050 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1051 {
1052 __free_page(req->pages[0]);
1053 fuse_file_put(req->ff);
1054 }
1055
1056 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1057 {
1058 struct inode *inode = req->inode;
1059 struct fuse_inode *fi = get_fuse_inode(inode);
1060 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1061
1062 list_del(&req->writepages_entry);
1063 dec_bdi_stat(bdi, BDI_WRITEBACK);
1064 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1065 bdi_writeout_inc(bdi);
1066 wake_up(&fi->page_waitq);
1067 }
1068
1069 /* Called under fc->lock, may release and reacquire it */
1070 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1071 __releases(&fc->lock)
1072 __acquires(&fc->lock)
1073 {
1074 struct fuse_inode *fi = get_fuse_inode(req->inode);
1075 loff_t size = i_size_read(req->inode);
1076 struct fuse_write_in *inarg = &req->misc.write.in;
1077
1078 if (!fc->connected)
1079 goto out_free;
1080
1081 if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1082 inarg->size = PAGE_CACHE_SIZE;
1083 } else if (inarg->offset < size) {
1084 inarg->size = size & (PAGE_CACHE_SIZE - 1);
1085 } else {
1086 /* Got truncated off completely */
1087 goto out_free;
1088 }
1089
1090 req->in.args[1].size = inarg->size;
1091 fi->writectr++;
1092 fuse_request_send_background_locked(fc, req);
1093 return;
1094
1095 out_free:
1096 fuse_writepage_finish(fc, req);
1097 spin_unlock(&fc->lock);
1098 fuse_writepage_free(fc, req);
1099 fuse_put_request(fc, req);
1100 spin_lock(&fc->lock);
1101 }
1102
1103 /*
1104 * If fi->writectr is positive (no truncate or fsync going on) send
1105 * all queued writepage requests.
1106 *
1107 * Called with fc->lock
1108 */
1109 void fuse_flush_writepages(struct inode *inode)
1110 __releases(&fc->lock)
1111 __acquires(&fc->lock)
1112 {
1113 struct fuse_conn *fc = get_fuse_conn(inode);
1114 struct fuse_inode *fi = get_fuse_inode(inode);
1115 struct fuse_req *req;
1116
1117 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1118 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1119 list_del_init(&req->list);
1120 fuse_send_writepage(fc, req);
1121 }
1122 }
1123
1124 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1125 {
1126 struct inode *inode = req->inode;
1127 struct fuse_inode *fi = get_fuse_inode(inode);
1128
1129 mapping_set_error(inode->i_mapping, req->out.h.error);
1130 spin_lock(&fc->lock);
1131 fi->writectr--;
1132 fuse_writepage_finish(fc, req);
1133 spin_unlock(&fc->lock);
1134 fuse_writepage_free(fc, req);
1135 }
1136
1137 static int fuse_writepage_locked(struct page *page)
1138 {
1139 struct address_space *mapping = page->mapping;
1140 struct inode *inode = mapping->host;
1141 struct fuse_conn *fc = get_fuse_conn(inode);
1142 struct fuse_inode *fi = get_fuse_inode(inode);
1143 struct fuse_req *req;
1144 struct fuse_file *ff;
1145 struct page *tmp_page;
1146
1147 set_page_writeback(page);
1148
1149 req = fuse_request_alloc_nofs();
1150 if (!req)
1151 goto err;
1152
1153 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1154 if (!tmp_page)
1155 goto err_free;
1156
1157 spin_lock(&fc->lock);
1158 BUG_ON(list_empty(&fi->write_files));
1159 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1160 req->ff = fuse_file_get(ff);
1161 spin_unlock(&fc->lock);
1162
1163 fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
1164
1165 copy_highpage(tmp_page, page);
1166 req->num_pages = 1;
1167 req->pages[0] = tmp_page;
1168 req->page_offset = 0;
1169 req->end = fuse_writepage_end;
1170 req->inode = inode;
1171
1172 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1173 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1174 end_page_writeback(page);
1175
1176 spin_lock(&fc->lock);
1177 list_add(&req->writepages_entry, &fi->writepages);
1178 list_add_tail(&req->list, &fi->queued_writes);
1179 fuse_flush_writepages(inode);
1180 spin_unlock(&fc->lock);
1181
1182 return 0;
1183
1184 err_free:
1185 fuse_request_free(req);
1186 err:
1187 end_page_writeback(page);
1188 return -ENOMEM;
1189 }
1190
1191 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1192 {
1193 int err;
1194
1195 err = fuse_writepage_locked(page);
1196 unlock_page(page);
1197
1198 return err;
1199 }
1200
1201 static int fuse_launder_page(struct page *page)
1202 {
1203 int err = 0;
1204 if (clear_page_dirty_for_io(page)) {
1205 struct inode *inode = page->mapping->host;
1206 err = fuse_writepage_locked(page);
1207 if (!err)
1208 fuse_wait_on_page_writeback(inode, page->index);
1209 }
1210 return err;
1211 }
1212
1213 /*
1214 * Write back dirty pages now, because there may not be any suitable
1215 * open files later
1216 */
1217 static void fuse_vma_close(struct vm_area_struct *vma)
1218 {
1219 filemap_write_and_wait(vma->vm_file->f_mapping);
1220 }
1221
1222 /*
1223 * Wait for writeback against this page to complete before allowing it
1224 * to be marked dirty again, and hence written back again, possibly
1225 * before the previous writepage completed.
1226 *
1227 * Block here, instead of in ->writepage(), so that the userspace fs
1228 * can only block processes actually operating on the filesystem.
1229 *
1230 * Otherwise unprivileged userspace fs would be able to block
1231 * unrelated:
1232 *
1233 * - page migration
1234 * - sync(2)
1235 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1236 */
1237 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1238 {
1239 struct page *page = vmf->page;
1240 /*
1241 * Don't use page->mapping as it may become NULL from a
1242 * concurrent truncate.
1243 */
1244 struct inode *inode = vma->vm_file->f_mapping->host;
1245
1246 fuse_wait_on_page_writeback(inode, page->index);
1247 return 0;
1248 }
1249
1250 static struct vm_operations_struct fuse_file_vm_ops = {
1251 .close = fuse_vma_close,
1252 .fault = filemap_fault,
1253 .page_mkwrite = fuse_page_mkwrite,
1254 };
1255
1256 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1257 {
1258 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1259 struct inode *inode = file->f_dentry->d_inode;
1260 struct fuse_conn *fc = get_fuse_conn(inode);
1261 struct fuse_inode *fi = get_fuse_inode(inode);
1262 struct fuse_file *ff = file->private_data;
1263 /*
1264 * file may be written through mmap, so chain it onto the
1265 * inodes's write_file list
1266 */
1267 spin_lock(&fc->lock);
1268 if (list_empty(&ff->write_entry))
1269 list_add(&ff->write_entry, &fi->write_files);
1270 spin_unlock(&fc->lock);
1271 }
1272 file_accessed(file);
1273 vma->vm_ops = &fuse_file_vm_ops;
1274 return 0;
1275 }
1276
1277 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1278 struct file_lock *fl)
1279 {
1280 switch (ffl->type) {
1281 case F_UNLCK:
1282 break;
1283
1284 case F_RDLCK:
1285 case F_WRLCK:
1286 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1287 ffl->end < ffl->start)
1288 return -EIO;
1289
1290 fl->fl_start = ffl->start;
1291 fl->fl_end = ffl->end;
1292 fl->fl_pid = ffl->pid;
1293 break;
1294
1295 default:
1296 return -EIO;
1297 }
1298 fl->fl_type = ffl->type;
1299 return 0;
1300 }
1301
1302 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1303 const struct file_lock *fl, int opcode, pid_t pid,
1304 int flock)
1305 {
1306 struct inode *inode = file->f_path.dentry->d_inode;
1307 struct fuse_conn *fc = get_fuse_conn(inode);
1308 struct fuse_file *ff = file->private_data;
1309 struct fuse_lk_in *arg = &req->misc.lk_in;
1310
1311 arg->fh = ff->fh;
1312 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1313 arg->lk.start = fl->fl_start;
1314 arg->lk.end = fl->fl_end;
1315 arg->lk.type = fl->fl_type;
1316 arg->lk.pid = pid;
1317 if (flock)
1318 arg->lk_flags |= FUSE_LK_FLOCK;
1319 req->in.h.opcode = opcode;
1320 req->in.h.nodeid = get_node_id(inode);
1321 req->in.numargs = 1;
1322 req->in.args[0].size = sizeof(*arg);
1323 req->in.args[0].value = arg;
1324 }
1325
1326 static int fuse_getlk(struct file *file, struct file_lock *fl)
1327 {
1328 struct inode *inode = file->f_path.dentry->d_inode;
1329 struct fuse_conn *fc = get_fuse_conn(inode);
1330 struct fuse_req *req;
1331 struct fuse_lk_out outarg;
1332 int err;
1333
1334 req = fuse_get_req(fc);
1335 if (IS_ERR(req))
1336 return PTR_ERR(req);
1337
1338 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1339 req->out.numargs = 1;
1340 req->out.args[0].size = sizeof(outarg);
1341 req->out.args[0].value = &outarg;
1342 fuse_request_send(fc, req);
1343 err = req->out.h.error;
1344 fuse_put_request(fc, req);
1345 if (!err)
1346 err = convert_fuse_file_lock(&outarg.lk, fl);
1347
1348 return err;
1349 }
1350
1351 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1352 {
1353 struct inode *inode = file->f_path.dentry->d_inode;
1354 struct fuse_conn *fc = get_fuse_conn(inode);
1355 struct fuse_req *req;
1356 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1357 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1358 int err;
1359
1360 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
1361 /* NLM needs asynchronous locks, which we don't support yet */
1362 return -ENOLCK;
1363 }
1364
1365 /* Unlock on close is handled by the flush method */
1366 if (fl->fl_flags & FL_CLOSE)
1367 return 0;
1368
1369 req = fuse_get_req(fc);
1370 if (IS_ERR(req))
1371 return PTR_ERR(req);
1372
1373 fuse_lk_fill(req, file, fl, opcode, pid, flock);
1374 fuse_request_send(fc, req);
1375 err = req->out.h.error;
1376 /* locking is restartable */
1377 if (err == -EINTR)
1378 err = -ERESTARTSYS;
1379 fuse_put_request(fc, req);
1380 return err;
1381 }
1382
1383 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1384 {
1385 struct inode *inode = file->f_path.dentry->d_inode;
1386 struct fuse_conn *fc = get_fuse_conn(inode);
1387 int err;
1388
1389 if (cmd == F_CANCELLK) {
1390 err = 0;
1391 } else if (cmd == F_GETLK) {
1392 if (fc->no_lock) {
1393 posix_test_lock(file, fl);
1394 err = 0;
1395 } else
1396 err = fuse_getlk(file, fl);
1397 } else {
1398 if (fc->no_lock)
1399 err = posix_lock_file(file, fl, NULL);
1400 else
1401 err = fuse_setlk(file, fl, 0);
1402 }
1403 return err;
1404 }
1405
1406 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1407 {
1408 struct inode *inode = file->f_path.dentry->d_inode;
1409 struct fuse_conn *fc = get_fuse_conn(inode);
1410 int err;
1411
1412 if (fc->no_lock) {
1413 err = flock_lock_file_wait(file, fl);
1414 } else {
1415 /* emulate flock with POSIX locks */
1416 fl->fl_owner = (fl_owner_t) file;
1417 err = fuse_setlk(file, fl, 1);
1418 }
1419
1420 return err;
1421 }
1422
1423 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1424 {
1425 struct inode *inode = mapping->host;
1426 struct fuse_conn *fc = get_fuse_conn(inode);
1427 struct fuse_req *req;
1428 struct fuse_bmap_in inarg;
1429 struct fuse_bmap_out outarg;
1430 int err;
1431
1432 if (!inode->i_sb->s_bdev || fc->no_bmap)
1433 return 0;
1434
1435 req = fuse_get_req(fc);
1436 if (IS_ERR(req))
1437 return 0;
1438
1439 memset(&inarg, 0, sizeof(inarg));
1440 inarg.block = block;
1441 inarg.blocksize = inode->i_sb->s_blocksize;
1442 req->in.h.opcode = FUSE_BMAP;
1443 req->in.h.nodeid = get_node_id(inode);
1444 req->in.numargs = 1;
1445 req->in.args[0].size = sizeof(inarg);
1446 req->in.args[0].value = &inarg;
1447 req->out.numargs = 1;
1448 req->out.args[0].size = sizeof(outarg);
1449 req->out.args[0].value = &outarg;
1450 fuse_request_send(fc, req);
1451 err = req->out.h.error;
1452 fuse_put_request(fc, req);
1453 if (err == -ENOSYS)
1454 fc->no_bmap = 1;
1455
1456 return err ? 0 : outarg.block;
1457 }
1458
1459 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1460 {
1461 loff_t retval;
1462 struct inode *inode = file->f_path.dentry->d_inode;
1463
1464 mutex_lock(&inode->i_mutex);
1465 switch (origin) {
1466 case SEEK_END:
1467 retval = fuse_update_attributes(inode, NULL, file, NULL);
1468 if (retval)
1469 goto exit;
1470 offset += i_size_read(inode);
1471 break;
1472 case SEEK_CUR:
1473 offset += file->f_pos;
1474 }
1475 retval = -EINVAL;
1476 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
1477 if (offset != file->f_pos) {
1478 file->f_pos = offset;
1479 file->f_version = 0;
1480 }
1481 retval = offset;
1482 }
1483 exit:
1484 mutex_unlock(&inode->i_mutex);
1485 return retval;
1486 }
1487
1488 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1489 unsigned int nr_segs, size_t bytes, bool to_user)
1490 {
1491 struct iov_iter ii;
1492 int page_idx = 0;
1493
1494 if (!bytes)
1495 return 0;
1496
1497 iov_iter_init(&ii, iov, nr_segs, bytes, 0);
1498
1499 while (iov_iter_count(&ii)) {
1500 struct page *page = pages[page_idx++];
1501 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
1502 void *kaddr, *map;
1503
1504 kaddr = map = kmap(page);
1505
1506 while (todo) {
1507 char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
1508 size_t iov_len = ii.iov->iov_len - ii.iov_offset;
1509 size_t copy = min(todo, iov_len);
1510 size_t left;
1511
1512 if (!to_user)
1513 left = copy_from_user(kaddr, uaddr, copy);
1514 else
1515 left = copy_to_user(uaddr, kaddr, copy);
1516
1517 if (unlikely(left))
1518 return -EFAULT;
1519
1520 iov_iter_advance(&ii, copy);
1521 todo -= copy;
1522 kaddr += copy;
1523 }
1524
1525 kunmap(map);
1526 }
1527
1528 return 0;
1529 }
1530
1531 /*
1532 * For ioctls, there is no generic way to determine how much memory
1533 * needs to be read and/or written. Furthermore, ioctls are allowed
1534 * to dereference the passed pointer, so the parameter requires deep
1535 * copying but FUSE has no idea whatsoever about what to copy in or
1536 * out.
1537 *
1538 * This is solved by allowing FUSE server to retry ioctl with
1539 * necessary in/out iovecs. Let's assume the ioctl implementation
1540 * needs to read in the following structure.
1541 *
1542 * struct a {
1543 * char *buf;
1544 * size_t buflen;
1545 * }
1546 *
1547 * On the first callout to FUSE server, inarg->in_size and
1548 * inarg->out_size will be NULL; then, the server completes the ioctl
1549 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
1550 * the actual iov array to
1551 *
1552 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
1553 *
1554 * which tells FUSE to copy in the requested area and retry the ioctl.
1555 * On the second round, the server has access to the structure and
1556 * from that it can tell what to look for next, so on the invocation,
1557 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
1558 *
1559 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
1560 * { .iov_base = a.buf, .iov_len = a.buflen } }
1561 *
1562 * FUSE will copy both struct a and the pointed buffer from the
1563 * process doing the ioctl and retry ioctl with both struct a and the
1564 * buffer.
1565 *
1566 * This time, FUSE server has everything it needs and completes ioctl
1567 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
1568 *
1569 * Copying data out works the same way.
1570 *
1571 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
1572 * automatically initializes in and out iovs by decoding @cmd with
1573 * _IOC_* macros and the server is not allowed to request RETRY. This
1574 * limits ioctl data transfers to well-formed ioctls and is the forced
1575 * behavior for all FUSE servers.
1576 */
1577 static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
1578 unsigned long arg, unsigned int flags)
1579 {
1580 struct inode *inode = file->f_dentry->d_inode;
1581 struct fuse_file *ff = file->private_data;
1582 struct fuse_conn *fc = get_fuse_conn(inode);
1583 struct fuse_ioctl_in inarg = {
1584 .fh = ff->fh,
1585 .cmd = cmd,
1586 .arg = arg,
1587 .flags = flags
1588 };
1589 struct fuse_ioctl_out outarg;
1590 struct fuse_req *req = NULL;
1591 struct page **pages = NULL;
1592 struct page *iov_page = NULL;
1593 struct iovec *in_iov = NULL, *out_iov = NULL;
1594 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
1595 size_t in_size, out_size, transferred;
1596 int err;
1597
1598 /* assume all the iovs returned by client always fits in a page */
1599 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
1600
1601 if (!fuse_allow_task(fc, current))
1602 return -EACCES;
1603
1604 err = -EIO;
1605 if (is_bad_inode(inode))
1606 goto out;
1607
1608 err = -ENOMEM;
1609 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
1610 iov_page = alloc_page(GFP_KERNEL);
1611 if (!pages || !iov_page)
1612 goto out;
1613
1614 /*
1615 * If restricted, initialize IO parameters as encoded in @cmd.
1616 * RETRY from server is not allowed.
1617 */
1618 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
1619 struct iovec *iov = page_address(iov_page);
1620
1621 iov->iov_base = (void __user *)arg;
1622 iov->iov_len = _IOC_SIZE(cmd);
1623
1624 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1625 in_iov = iov;
1626 in_iovs = 1;
1627 }
1628
1629 if (_IOC_DIR(cmd) & _IOC_READ) {
1630 out_iov = iov;
1631 out_iovs = 1;
1632 }
1633 }
1634
1635 retry:
1636 inarg.in_size = in_size = iov_length(in_iov, in_iovs);
1637 inarg.out_size = out_size = iov_length(out_iov, out_iovs);
1638
1639 /*
1640 * Out data can be used either for actual out data or iovs,
1641 * make sure there always is at least one page.
1642 */
1643 out_size = max_t(size_t, out_size, PAGE_SIZE);
1644 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
1645
1646 /* make sure there are enough buffer pages and init request with them */
1647 err = -ENOMEM;
1648 if (max_pages > FUSE_MAX_PAGES_PER_REQ)
1649 goto out;
1650 while (num_pages < max_pages) {
1651 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
1652 if (!pages[num_pages])
1653 goto out;
1654 num_pages++;
1655 }
1656
1657 req = fuse_get_req(fc);
1658 if (IS_ERR(req)) {
1659 err = PTR_ERR(req);
1660 req = NULL;
1661 goto out;
1662 }
1663 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
1664 req->num_pages = num_pages;
1665
1666 /* okay, let's send it to the client */
1667 req->in.h.opcode = FUSE_IOCTL;
1668 req->in.h.nodeid = get_node_id(inode);
1669 req->in.numargs = 1;
1670 req->in.args[0].size = sizeof(inarg);
1671 req->in.args[0].value = &inarg;
1672 if (in_size) {
1673 req->in.numargs++;
1674 req->in.args[1].size = in_size;
1675 req->in.argpages = 1;
1676
1677 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
1678 false);
1679 if (err)
1680 goto out;
1681 }
1682
1683 req->out.numargs = 2;
1684 req->out.args[0].size = sizeof(outarg);
1685 req->out.args[0].value = &outarg;
1686 req->out.args[1].size = out_size;
1687 req->out.argpages = 1;
1688 req->out.argvar = 1;
1689
1690 fuse_request_send(fc, req);
1691 err = req->out.h.error;
1692 transferred = req->out.args[1].size;
1693 fuse_put_request(fc, req);
1694 req = NULL;
1695 if (err)
1696 goto out;
1697
1698 /* did it ask for retry? */
1699 if (outarg.flags & FUSE_IOCTL_RETRY) {
1700 char *vaddr;
1701
1702 /* no retry if in restricted mode */
1703 err = -EIO;
1704 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
1705 goto out;
1706
1707 in_iovs = outarg.in_iovs;
1708 out_iovs = outarg.out_iovs;
1709
1710 /*
1711 * Make sure things are in boundary, separate checks
1712 * are to protect against overflow.
1713 */
1714 err = -ENOMEM;
1715 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
1716 out_iovs > FUSE_IOCTL_MAX_IOV ||
1717 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1718 goto out;
1719
1720 err = -EIO;
1721 if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
1722 goto out;
1723
1724 /* okay, copy in iovs and retry */
1725 vaddr = kmap_atomic(pages[0], KM_USER0);
1726 memcpy(page_address(iov_page), vaddr, transferred);
1727 kunmap_atomic(vaddr, KM_USER0);
1728
1729 in_iov = page_address(iov_page);
1730 out_iov = in_iov + in_iovs;
1731
1732 goto retry;
1733 }
1734
1735 err = -EIO;
1736 if (transferred > inarg.out_size)
1737 goto out;
1738
1739 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
1740 out:
1741 if (req)
1742 fuse_put_request(fc, req);
1743 if (iov_page)
1744 __free_page(iov_page);
1745 while (num_pages)
1746 __free_page(pages[--num_pages]);
1747 kfree(pages);
1748
1749 return err ? err : outarg.result;
1750 }
1751
1752 static long fuse_file_ioctl(struct file *file, unsigned int cmd,
1753 unsigned long arg)
1754 {
1755 return fuse_file_do_ioctl(file, cmd, arg, 0);
1756 }
1757
1758 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
1759 unsigned long arg)
1760 {
1761 return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT);
1762 }
1763
1764 /*
1765 * All files which have been polled are linked to RB tree
1766 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
1767 * find the matching one.
1768 */
1769 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
1770 struct rb_node **parent_out)
1771 {
1772 struct rb_node **link = &fc->polled_files.rb_node;
1773 struct rb_node *last = NULL;
1774
1775 while (*link) {
1776 struct fuse_file *ff;
1777
1778 last = *link;
1779 ff = rb_entry(last, struct fuse_file, polled_node);
1780
1781 if (kh < ff->kh)
1782 link = &last->rb_left;
1783 else if (kh > ff->kh)
1784 link = &last->rb_right;
1785 else
1786 return link;
1787 }
1788
1789 if (parent_out)
1790 *parent_out = last;
1791 return link;
1792 }
1793
1794 /*
1795 * The file is about to be polled. Make sure it's on the polled_files
1796 * RB tree. Note that files once added to the polled_files tree are
1797 * not removed before the file is released. This is because a file
1798 * polled once is likely to be polled again.
1799 */
1800 static void fuse_register_polled_file(struct fuse_conn *fc,
1801 struct fuse_file *ff)
1802 {
1803 spin_lock(&fc->lock);
1804 if (RB_EMPTY_NODE(&ff->polled_node)) {
1805 struct rb_node **link, *parent;
1806
1807 link = fuse_find_polled_node(fc, ff->kh, &parent);
1808 BUG_ON(*link);
1809 rb_link_node(&ff->polled_node, parent, link);
1810 rb_insert_color(&ff->polled_node, &fc->polled_files);
1811 }
1812 spin_unlock(&fc->lock);
1813 }
1814
1815 static unsigned fuse_file_poll(struct file *file, poll_table *wait)
1816 {
1817 struct inode *inode = file->f_dentry->d_inode;
1818 struct fuse_file *ff = file->private_data;
1819 struct fuse_conn *fc = get_fuse_conn(inode);
1820 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
1821 struct fuse_poll_out outarg;
1822 struct fuse_req *req;
1823 int err;
1824
1825 if (fc->no_poll)
1826 return DEFAULT_POLLMASK;
1827
1828 poll_wait(file, &ff->poll_wait, wait);
1829
1830 /*
1831 * Ask for notification iff there's someone waiting for it.
1832 * The client may ignore the flag and always notify.
1833 */
1834 if (waitqueue_active(&ff->poll_wait)) {
1835 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
1836 fuse_register_polled_file(fc, ff);
1837 }
1838
1839 req = fuse_get_req(fc);
1840 if (IS_ERR(req))
1841 return PTR_ERR(req);
1842
1843 req->in.h.opcode = FUSE_POLL;
1844 req->in.h.nodeid = get_node_id(inode);
1845 req->in.numargs = 1;
1846 req->in.args[0].size = sizeof(inarg);
1847 req->in.args[0].value = &inarg;
1848 req->out.numargs = 1;
1849 req->out.args[0].size = sizeof(outarg);
1850 req->out.args[0].value = &outarg;
1851 fuse_request_send(fc, req);
1852 err = req->out.h.error;
1853 fuse_put_request(fc, req);
1854
1855 if (!err)
1856 return outarg.revents;
1857 if (err == -ENOSYS) {
1858 fc->no_poll = 1;
1859 return DEFAULT_POLLMASK;
1860 }
1861 return POLLERR;
1862 }
1863
1864 /*
1865 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
1866 * wakes up the poll waiters.
1867 */
1868 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
1869 struct fuse_notify_poll_wakeup_out *outarg)
1870 {
1871 u64 kh = outarg->kh;
1872 struct rb_node **link;
1873
1874 spin_lock(&fc->lock);
1875
1876 link = fuse_find_polled_node(fc, kh, NULL);
1877 if (*link) {
1878 struct fuse_file *ff;
1879
1880 ff = rb_entry(*link, struct fuse_file, polled_node);
1881 wake_up_interruptible_sync(&ff->poll_wait);
1882 }
1883
1884 spin_unlock(&fc->lock);
1885 return 0;
1886 }
1887
1888 static const struct file_operations fuse_file_operations = {
1889 .llseek = fuse_file_llseek,
1890 .read = do_sync_read,
1891 .aio_read = fuse_file_aio_read,
1892 .write = do_sync_write,
1893 .aio_write = fuse_file_aio_write,
1894 .mmap = fuse_file_mmap,
1895 .open = fuse_open,
1896 .flush = fuse_flush,
1897 .release = fuse_release,
1898 .fsync = fuse_fsync,
1899 .lock = fuse_file_lock,
1900 .flock = fuse_file_flock,
1901 .splice_read = generic_file_splice_read,
1902 .unlocked_ioctl = fuse_file_ioctl,
1903 .compat_ioctl = fuse_file_compat_ioctl,
1904 .poll = fuse_file_poll,
1905 };
1906
1907 static const struct file_operations fuse_direct_io_file_operations = {
1908 .llseek = fuse_file_llseek,
1909 .read = fuse_direct_read,
1910 .write = fuse_direct_write,
1911 .open = fuse_open,
1912 .flush = fuse_flush,
1913 .release = fuse_release,
1914 .fsync = fuse_fsync,
1915 .lock = fuse_file_lock,
1916 .flock = fuse_file_flock,
1917 .unlocked_ioctl = fuse_file_ioctl,
1918 .compat_ioctl = fuse_file_compat_ioctl,
1919 .poll = fuse_file_poll,
1920 /* no mmap and splice_read */
1921 };
1922
1923 static const struct address_space_operations fuse_file_aops = {
1924 .readpage = fuse_readpage,
1925 .writepage = fuse_writepage,
1926 .launder_page = fuse_launder_page,
1927 .write_begin = fuse_write_begin,
1928 .write_end = fuse_write_end,
1929 .readpages = fuse_readpages,
1930 .set_page_dirty = __set_page_dirty_nobuffers,
1931 .bmap = fuse_bmap,
1932 };
1933
1934 void fuse_init_file_inode(struct inode *inode)
1935 {
1936 inode->i_fop = &fuse_file_operations;
1937 inode->i_data.a_ops = &fuse_file_aops;
1938 }
This page took 0.070614 seconds and 5 git commands to generate.