[PATCH] fuse: consolidate device errors
[deliverable/linux.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
d7133114 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
22static kmem_cache_t *fuse_req_cachep;
23
8bfc016d 24static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 25{
0720b315
MS
26 /*
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
29 */
30 return file->private_data;
334f485d
MS
31}
32
8bfc016d 33static void fuse_request_init(struct fuse_req *req)
334f485d
MS
34{
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1);
39}
40
41struct fuse_req *fuse_request_alloc(void)
42{
43 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
44 if (req)
45 fuse_request_init(req);
46 return req;
47}
48
49void fuse_request_free(struct fuse_req *req)
50{
51 kmem_cache_free(fuse_req_cachep, req);
52}
53
8bfc016d 54static void block_sigs(sigset_t *oldset)
334f485d
MS
55{
56 sigset_t mask;
57
58 siginitsetinv(&mask, sigmask(SIGKILL));
59 sigprocmask(SIG_BLOCK, &mask, oldset);
60}
61
8bfc016d 62static void restore_sigs(sigset_t *oldset)
334f485d
MS
63{
64 sigprocmask(SIG_SETMASK, oldset, NULL);
65}
66
77e7f250
MS
67/*
68 * Reset request, so that it can be reused
69 *
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
72 */
334f485d
MS
73void fuse_reset_request(struct fuse_req *req)
74{
75 int preallocated = req->preallocated;
76 BUG_ON(atomic_read(&req->count) != 1);
77 fuse_request_init(req);
78 req->preallocated = preallocated;
79}
80
81static void __fuse_get_request(struct fuse_req *req)
82{
83 atomic_inc(&req->count);
84}
85
86/* Must be called with > 1 refcount */
87static void __fuse_put_request(struct fuse_req *req)
88{
89 BUG_ON(atomic_read(&req->count) < 2);
90 atomic_dec(&req->count);
91}
92
93static struct fuse_req *do_get_request(struct fuse_conn *fc)
94{
95 struct fuse_req *req;
96
d7133114 97 spin_lock(&fc->lock);
334f485d
MS
98 BUG_ON(list_empty(&fc->unused_list));
99 req = list_entry(fc->unused_list.next, struct fuse_req, list);
100 list_del_init(&req->list);
d7133114 101 spin_unlock(&fc->lock);
334f485d
MS
102 fuse_request_init(req);
103 req->preallocated = 1;
104 req->in.h.uid = current->fsuid;
105 req->in.h.gid = current->fsgid;
106 req->in.h.pid = current->pid;
107 return req;
108}
109
7c352bdf 110/* This can return NULL, but only in case it's interrupted by a SIGKILL */
334f485d 111struct fuse_req *fuse_get_request(struct fuse_conn *fc)
334f485d
MS
112{
113 int intr;
114 sigset_t oldset;
115
0cd5b885 116 atomic_inc(&fc->num_waiting);
334f485d
MS
117 block_sigs(&oldset);
118 intr = down_interruptible(&fc->outstanding_sem);
119 restore_sigs(&oldset);
0cd5b885
MS
120 if (intr) {
121 atomic_dec(&fc->num_waiting);
122 return NULL;
123 }
124 return do_get_request(fc);
334f485d
MS
125}
126
d7133114 127/* Must be called with fc->lock held */
334f485d
MS
128static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
129{
0cd5b885
MS
130 if (req->preallocated) {
131 atomic_dec(&fc->num_waiting);
334f485d 132 list_add(&req->list, &fc->unused_list);
0cd5b885 133 } else
334f485d
MS
134 fuse_request_free(req);
135
136 /* If we are in debt decrease that first */
137 if (fc->outstanding_debt)
138 fc->outstanding_debt--;
139 else
140 up(&fc->outstanding_sem);
334f485d
MS
141}
142
143void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
144{
145 if (atomic_dec_and_test(&req->count)) {
d7133114 146 spin_lock(&fc->lock);
7128ec2a 147 fuse_putback_request(fc, req);
d7133114 148 spin_unlock(&fc->lock);
7128ec2a
MS
149 }
150}
151
152static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
153{
154 if (atomic_dec_and_test(&req->count))
155 fuse_putback_request(fc, req);
156}
157
d7133114 158void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
1e9a4ed9
MS
159{
160 iput(req->inode);
161 iput(req->inode2);
162 if (req->file)
163 fput(req->file);
d7133114 164 spin_lock(&fc->lock);
1e9a4ed9 165 list_del(&req->bg_entry);
d7133114 166 spin_unlock(&fc->lock);
1e9a4ed9
MS
167}
168
334f485d
MS
169/*
170 * This function is called when a request is finished. Either a reply
171 * has arrived or it was interrupted (and not yet sent) or some error
f43b155a
MS
172 * occurred during communication with userspace, or the device file
173 * was closed. In case of a background request the reference to the
174 * stored objects are released. The requester thread is woken up (if
64c6d8ed
MS
175 * still waiting), the 'end' callback is called if given, else the
176 * reference to the request is released
334f485d 177 *
7128ec2a
MS
178 * Releasing extra reference for foreground requests must be done
179 * within the same locked region as setting state to finished. This
180 * is because fuse_reset_request() may be called after request is
181 * finished and it must be the sole possessor. If request is
182 * interrupted and put in the background, it will return with an error
183 * and hence never be reset and reused.
184 *
d7133114 185 * Called with fc->lock, unlocks it
334f485d
MS
186 */
187static void request_end(struct fuse_conn *fc, struct fuse_req *req)
188{
d77a1d5b 189 list_del(&req->list);
83cfd493 190 req->state = FUSE_REQ_FINISHED;
7128ec2a
MS
191 if (!req->background) {
192 wake_up(&req->waitq);
193 fuse_put_request_locked(fc, req);
d7133114 194 spin_unlock(&fc->lock);
7128ec2a
MS
195 } else {
196 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
197 req->end = NULL;
d7133114 198 spin_unlock(&fc->lock);
1e9a4ed9
MS
199 down_read(&fc->sbput_sem);
200 if (fc->mounted)
d7133114 201 fuse_release_background(fc, req);
1e9a4ed9 202 up_read(&fc->sbput_sem);
7128ec2a
MS
203 if (end)
204 end(fc, req);
205 else
206 fuse_put_request(fc, req);
334f485d 207 }
334f485d
MS
208}
209
1e9a4ed9
MS
210/*
211 * Unfortunately request interruption not just solves the deadlock
212 * problem, it causes problems too. These stem from the fact, that an
213 * interrupted request is continued to be processed in userspace,
214 * while all the locks and object references (inode and file) held
215 * during the operation are released.
216 *
217 * To release the locks is exactly why there's a need to interrupt the
218 * request, so there's not a lot that can be done about this, except
219 * introduce additional locking in userspace.
220 *
221 * More important is to keep inode and file references until userspace
222 * has replied, otherwise FORGET and RELEASE could be sent while the
223 * inode/file is still used by the filesystem.
224 *
225 * For this reason the concept of "background" request is introduced.
226 * An interrupted request is backgrounded if it has been already sent
227 * to userspace. Backgrounding involves getting an extra reference to
228 * inode(s) or file used in the request, and adding the request to
229 * fc->background list. When a reply is received for a background
230 * request, the object references are released, and the request is
231 * removed from the list. If the filesystem is unmounted while there
232 * are still background requests, the list is walked and references
233 * are released as if a reply was received.
234 *
235 * There's one more use for a background request. The RELEASE message is
236 * always sent as background, since it doesn't return an error or
237 * data.
238 */
239static void background_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d 240{
334f485d 241 req->background = 1;
1e9a4ed9 242 list_add(&req->bg_entry, &fc->background);
334f485d
MS
243 if (req->inode)
244 req->inode = igrab(req->inode);
245 if (req->inode2)
246 req->inode2 = igrab(req->inode2);
247 if (req->file)
248 get_file(req->file);
249}
250
d7133114 251/* Called with fc->lock held. Releases, and then reacquires it. */
7c352bdf 252static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 253{
7c352bdf 254 sigset_t oldset;
334f485d 255
d7133114 256 spin_unlock(&fc->lock);
7c352bdf 257 block_sigs(&oldset);
83cfd493 258 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
7c352bdf 259 restore_sigs(&oldset);
d7133114 260 spin_lock(&fc->lock);
69a53bf2 261 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
334f485d
MS
262 return;
263
69a53bf2
MS
264 if (!req->interrupted) {
265 req->out.h.error = -EINTR;
266 req->interrupted = 1;
267 }
334f485d
MS
268 if (req->locked) {
269 /* This is uninterruptible sleep, because data is
270 being copied to/from the buffers of req. During
271 locked state, there mustn't be any filesystem
272 operation (e.g. page fault), since that could lead
273 to deadlock */
d7133114 274 spin_unlock(&fc->lock);
334f485d 275 wait_event(req->waitq, !req->locked);
d7133114 276 spin_lock(&fc->lock);
334f485d 277 }
83cfd493 278 if (req->state == FUSE_REQ_PENDING) {
334f485d
MS
279 list_del(&req->list);
280 __fuse_put_request(req);
83cfd493 281 } else if (req->state == FUSE_REQ_SENT)
1e9a4ed9 282 background_request(fc, req);
334f485d
MS
283}
284
285static unsigned len_args(unsigned numargs, struct fuse_arg *args)
286{
287 unsigned nbytes = 0;
288 unsigned i;
289
290 for (i = 0; i < numargs; i++)
291 nbytes += args[i].size;
292
293 return nbytes;
294}
295
296static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
297{
298 fc->reqctr++;
299 /* zero is special */
300 if (fc->reqctr == 0)
301 fc->reqctr = 1;
302 req->in.h.unique = fc->reqctr;
303 req->in.h.len = sizeof(struct fuse_in_header) +
304 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
305 if (!req->preallocated) {
306 /* If request is not preallocated (either FORGET or
307 RELEASE), then still decrease outstanding_sem, so
308 user can't open infinite number of files while not
309 processing the RELEASE requests. However for
310 efficiency do it without blocking, so if down()
311 would block, just increase the debt instead */
312 if (down_trylock(&fc->outstanding_sem))
313 fc->outstanding_debt++;
314 }
315 list_add_tail(&req->list, &fc->pending);
83cfd493 316 req->state = FUSE_REQ_PENDING;
334f485d 317 wake_up(&fc->waitq);
385a17bf 318 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
334f485d
MS
319}
320
7c352bdf
MS
321/*
322 * This can only be interrupted by a SIGKILL
323 */
324void request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
325{
326 req->isreply = 1;
d7133114 327 spin_lock(&fc->lock);
1e9a4ed9 328 if (!fc->connected)
334f485d
MS
329 req->out.h.error = -ENOTCONN;
330 else if (fc->conn_error)
331 req->out.h.error = -ECONNREFUSED;
332 else {
333 queue_request(fc, req);
334 /* acquire extra reference, since request is still needed
335 after request_end() */
336 __fuse_get_request(req);
337
7c352bdf 338 request_wait_answer(fc, req);
334f485d 339 }
d7133114 340 spin_unlock(&fc->lock);
334f485d
MS
341}
342
334f485d
MS
343static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
344{
d7133114 345 spin_lock(&fc->lock);
1e9a4ed9 346 if (fc->connected) {
334f485d 347 queue_request(fc, req);
d7133114 348 spin_unlock(&fc->lock);
334f485d
MS
349 } else {
350 req->out.h.error = -ENOTCONN;
351 request_end(fc, req);
352 }
353}
354
355void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
356{
357 req->isreply = 0;
358 request_send_nowait(fc, req);
359}
360
361void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
362{
363 req->isreply = 1;
d7133114 364 spin_lock(&fc->lock);
1e9a4ed9 365 background_request(fc, req);
d7133114 366 spin_unlock(&fc->lock);
334f485d
MS
367 request_send_nowait(fc, req);
368}
369
334f485d
MS
370/*
371 * Lock the request. Up to the next unlock_request() there mustn't be
372 * anything that could cause a page-fault. If the request was already
373 * interrupted bail out.
374 */
d7133114 375static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
376{
377 int err = 0;
378 if (req) {
d7133114 379 spin_lock(&fc->lock);
334f485d
MS
380 if (req->interrupted)
381 err = -ENOENT;
382 else
383 req->locked = 1;
d7133114 384 spin_unlock(&fc->lock);
334f485d
MS
385 }
386 return err;
387}
388
389/*
390 * Unlock request. If it was interrupted during being locked, the
391 * requester thread is currently waiting for it to be unlocked, so
392 * wake it up.
393 */
d7133114 394static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
395{
396 if (req) {
d7133114 397 spin_lock(&fc->lock);
334f485d
MS
398 req->locked = 0;
399 if (req->interrupted)
400 wake_up(&req->waitq);
d7133114 401 spin_unlock(&fc->lock);
334f485d
MS
402 }
403}
404
405struct fuse_copy_state {
d7133114 406 struct fuse_conn *fc;
334f485d
MS
407 int write;
408 struct fuse_req *req;
409 const struct iovec *iov;
410 unsigned long nr_segs;
411 unsigned long seglen;
412 unsigned long addr;
413 struct page *pg;
414 void *mapaddr;
415 void *buf;
416 unsigned len;
417};
418
d7133114
MS
419static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
420 int write, struct fuse_req *req,
421 const struct iovec *iov, unsigned long nr_segs)
334f485d
MS
422{
423 memset(cs, 0, sizeof(*cs));
d7133114 424 cs->fc = fc;
334f485d
MS
425 cs->write = write;
426 cs->req = req;
427 cs->iov = iov;
428 cs->nr_segs = nr_segs;
429}
430
431/* Unmap and put previous page of userspace buffer */
8bfc016d 432static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d
MS
433{
434 if (cs->mapaddr) {
435 kunmap_atomic(cs->mapaddr, KM_USER0);
436 if (cs->write) {
437 flush_dcache_page(cs->pg);
438 set_page_dirty_lock(cs->pg);
439 }
440 put_page(cs->pg);
441 cs->mapaddr = NULL;
442 }
443}
444
445/*
446 * Get another pagefull of userspace buffer, and map it to kernel
447 * address space, and lock request
448 */
449static int fuse_copy_fill(struct fuse_copy_state *cs)
450{
451 unsigned long offset;
452 int err;
453
d7133114 454 unlock_request(cs->fc, cs->req);
334f485d
MS
455 fuse_copy_finish(cs);
456 if (!cs->seglen) {
457 BUG_ON(!cs->nr_segs);
458 cs->seglen = cs->iov[0].iov_len;
459 cs->addr = (unsigned long) cs->iov[0].iov_base;
460 cs->iov ++;
461 cs->nr_segs --;
462 }
463 down_read(&current->mm->mmap_sem);
464 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
465 &cs->pg, NULL);
466 up_read(&current->mm->mmap_sem);
467 if (err < 0)
468 return err;
469 BUG_ON(err != 1);
470 offset = cs->addr % PAGE_SIZE;
471 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
472 cs->buf = cs->mapaddr + offset;
473 cs->len = min(PAGE_SIZE - offset, cs->seglen);
474 cs->seglen -= cs->len;
475 cs->addr += cs->len;
476
d7133114 477 return lock_request(cs->fc, cs->req);
334f485d
MS
478}
479
480/* Do as much copy to/from userspace buffer as we can */
8bfc016d 481static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
482{
483 unsigned ncpy = min(*size, cs->len);
484 if (val) {
485 if (cs->write)
486 memcpy(cs->buf, *val, ncpy);
487 else
488 memcpy(*val, cs->buf, ncpy);
489 *val += ncpy;
490 }
491 *size -= ncpy;
492 cs->len -= ncpy;
493 cs->buf += ncpy;
494 return ncpy;
495}
496
497/*
498 * Copy a page in the request to/from the userspace buffer. Must be
499 * done atomically
500 */
8bfc016d
MS
501static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
502 unsigned offset, unsigned count, int zeroing)
334f485d
MS
503{
504 if (page && zeroing && count < PAGE_SIZE) {
505 void *mapaddr = kmap_atomic(page, KM_USER1);
506 memset(mapaddr, 0, PAGE_SIZE);
507 kunmap_atomic(mapaddr, KM_USER1);
508 }
509 while (count) {
510 int err;
511 if (!cs->len && (err = fuse_copy_fill(cs)))
512 return err;
513 if (page) {
514 void *mapaddr = kmap_atomic(page, KM_USER1);
515 void *buf = mapaddr + offset;
516 offset += fuse_copy_do(cs, &buf, &count);
517 kunmap_atomic(mapaddr, KM_USER1);
518 } else
519 offset += fuse_copy_do(cs, NULL, &count);
520 }
521 if (page && !cs->write)
522 flush_dcache_page(page);
523 return 0;
524}
525
526/* Copy pages in the request to/from userspace buffer */
527static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
528 int zeroing)
529{
530 unsigned i;
531 struct fuse_req *req = cs->req;
532 unsigned offset = req->page_offset;
533 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
534
535 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
536 struct page *page = req->pages[i];
537 int err = fuse_copy_page(cs, page, offset, count, zeroing);
538 if (err)
539 return err;
540
541 nbytes -= count;
542 count = min(nbytes, (unsigned) PAGE_SIZE);
543 offset = 0;
544 }
545 return 0;
546}
547
548/* Copy a single argument in the request to/from userspace buffer */
549static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
550{
551 while (size) {
552 int err;
553 if (!cs->len && (err = fuse_copy_fill(cs)))
554 return err;
555 fuse_copy_do(cs, &val, &size);
556 }
557 return 0;
558}
559
560/* Copy request arguments to/from userspace buffer */
561static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
562 unsigned argpages, struct fuse_arg *args,
563 int zeroing)
564{
565 int err = 0;
566 unsigned i;
567
568 for (i = 0; !err && i < numargs; i++) {
569 struct fuse_arg *arg = &args[i];
570 if (i == numargs - 1 && argpages)
571 err = fuse_copy_pages(cs, arg->size, zeroing);
572 else
573 err = fuse_copy_one(cs, arg->value, arg->size);
574 }
575 return err;
576}
577
578/* Wait until a request is available on the pending list */
579static void request_wait(struct fuse_conn *fc)
580{
581 DECLARE_WAITQUEUE(wait, current);
582
583 add_wait_queue_exclusive(&fc->waitq, &wait);
9ba7cbba 584 while (fc->connected && list_empty(&fc->pending)) {
334f485d
MS
585 set_current_state(TASK_INTERRUPTIBLE);
586 if (signal_pending(current))
587 break;
588
d7133114 589 spin_unlock(&fc->lock);
334f485d 590 schedule();
d7133114 591 spin_lock(&fc->lock);
334f485d
MS
592 }
593 set_current_state(TASK_RUNNING);
594 remove_wait_queue(&fc->waitq, &wait);
595}
596
597/*
598 * Read a single request into the userspace filesystem's buffer. This
599 * function waits until a request is available, then removes it from
600 * the pending list and copies request data to userspace buffer. If
601 * no reply is needed (FORGET) or request has been interrupted or
602 * there was an error during the copying then it's finished by calling
603 * request_end(). Otherwise add it to the processing list, and set
604 * the 'sent' flag.
605 */
606static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
607 unsigned long nr_segs, loff_t *off)
608{
609 int err;
334f485d
MS
610 struct fuse_req *req;
611 struct fuse_in *in;
612 struct fuse_copy_state cs;
613 unsigned reqsize;
0720b315
MS
614 struct fuse_conn *fc = fuse_get_conn(file);
615 if (!fc)
616 return -EPERM;
334f485d 617
1d3d752b 618 restart:
d7133114 619 spin_lock(&fc->lock);
e5ac1d1e
JD
620 err = -EAGAIN;
621 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
622 list_empty(&fc->pending))
623 goto err_unlock;
624
334f485d
MS
625 request_wait(fc);
626 err = -ENODEV;
9ba7cbba 627 if (!fc->connected)
334f485d
MS
628 goto err_unlock;
629 err = -ERESTARTSYS;
630 if (list_empty(&fc->pending))
631 goto err_unlock;
632
633 req = list_entry(fc->pending.next, struct fuse_req, list);
83cfd493 634 req->state = FUSE_REQ_READING;
d77a1d5b 635 list_move(&req->list, &fc->io);
334f485d
MS
636
637 in = &req->in;
1d3d752b
MS
638 reqsize = in->h.len;
639 /* If request is too large, reply with an error and restart the read */
640 if (iov_length(iov, nr_segs) < reqsize) {
641 req->out.h.error = -EIO;
642 /* SETXATTR is special, since it may contain too large data */
643 if (in->h.opcode == FUSE_SETXATTR)
644 req->out.h.error = -E2BIG;
645 request_end(fc, req);
646 goto restart;
334f485d 647 }
d7133114
MS
648 spin_unlock(&fc->lock);
649 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
1d3d752b
MS
650 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
651 if (!err)
652 err = fuse_copy_args(&cs, in->numargs, in->argpages,
653 (struct fuse_arg *) in->args, 0);
334f485d 654 fuse_copy_finish(&cs);
d7133114 655 spin_lock(&fc->lock);
334f485d
MS
656 req->locked = 0;
657 if (!err && req->interrupted)
658 err = -ENOENT;
659 if (err) {
660 if (!req->interrupted)
661 req->out.h.error = -EIO;
662 request_end(fc, req);
663 return err;
664 }
665 if (!req->isreply)
666 request_end(fc, req);
667 else {
83cfd493 668 req->state = FUSE_REQ_SENT;
d77a1d5b 669 list_move_tail(&req->list, &fc->processing);
d7133114 670 spin_unlock(&fc->lock);
334f485d
MS
671 }
672 return reqsize;
673
674 err_unlock:
d7133114 675 spin_unlock(&fc->lock);
334f485d
MS
676 return err;
677}
678
679static ssize_t fuse_dev_read(struct file *file, char __user *buf,
680 size_t nbytes, loff_t *off)
681{
682 struct iovec iov;
683 iov.iov_len = nbytes;
684 iov.iov_base = buf;
685 return fuse_dev_readv(file, &iov, 1, off);
686}
687
688/* Look up request on processing list by unique ID */
689static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
690{
691 struct list_head *entry;
692
693 list_for_each(entry, &fc->processing) {
694 struct fuse_req *req;
695 req = list_entry(entry, struct fuse_req, list);
696 if (req->in.h.unique == unique)
697 return req;
698 }
699 return NULL;
700}
701
702static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
703 unsigned nbytes)
704{
705 unsigned reqsize = sizeof(struct fuse_out_header);
706
707 if (out->h.error)
708 return nbytes != reqsize ? -EINVAL : 0;
709
710 reqsize += len_args(out->numargs, out->args);
711
712 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
713 return -EINVAL;
714 else if (reqsize > nbytes) {
715 struct fuse_arg *lastarg = &out->args[out->numargs-1];
716 unsigned diffsize = reqsize - nbytes;
717 if (diffsize > lastarg->size)
718 return -EINVAL;
719 lastarg->size -= diffsize;
720 }
721 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
722 out->page_zeroing);
723}
724
725/*
726 * Write a single reply to a request. First the header is copied from
727 * the write buffer. The request is then searched on the processing
728 * list by the unique ID found in the header. If found, then remove
729 * it from the list and copy the rest of the buffer to the request.
730 * The request is finished by calling request_end()
731 */
732static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
733 unsigned long nr_segs, loff_t *off)
734{
735 int err;
736 unsigned nbytes = iov_length(iov, nr_segs);
737 struct fuse_req *req;
738 struct fuse_out_header oh;
739 struct fuse_copy_state cs;
740 struct fuse_conn *fc = fuse_get_conn(file);
741 if (!fc)
a87046d8 742 return -EPERM;
334f485d 743
d7133114 744 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
334f485d
MS
745 if (nbytes < sizeof(struct fuse_out_header))
746 return -EINVAL;
747
748 err = fuse_copy_one(&cs, &oh, sizeof(oh));
749 if (err)
750 goto err_finish;
751 err = -EINVAL;
752 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
753 oh.len != nbytes)
754 goto err_finish;
755
d7133114 756 spin_lock(&fc->lock);
69a53bf2
MS
757 err = -ENOENT;
758 if (!fc->connected)
759 goto err_unlock;
760
334f485d
MS
761 req = request_find(fc, oh.unique);
762 err = -EINVAL;
763 if (!req)
764 goto err_unlock;
765
334f485d 766 if (req->interrupted) {
d7133114 767 spin_unlock(&fc->lock);
334f485d 768 fuse_copy_finish(&cs);
d7133114 769 spin_lock(&fc->lock);
222f1d69 770 request_end(fc, req);
334f485d
MS
771 return -ENOENT;
772 }
d77a1d5b 773 list_move(&req->list, &fc->io);
334f485d
MS
774 req->out.h = oh;
775 req->locked = 1;
776 cs.req = req;
d7133114 777 spin_unlock(&fc->lock);
334f485d
MS
778
779 err = copy_out_args(&cs, &req->out, nbytes);
780 fuse_copy_finish(&cs);
781
d7133114 782 spin_lock(&fc->lock);
334f485d
MS
783 req->locked = 0;
784 if (!err) {
785 if (req->interrupted)
786 err = -ENOENT;
787 } else if (!req->interrupted)
788 req->out.h.error = -EIO;
789 request_end(fc, req);
790
791 return err ? err : nbytes;
792
793 err_unlock:
d7133114 794 spin_unlock(&fc->lock);
334f485d
MS
795 err_finish:
796 fuse_copy_finish(&cs);
797 return err;
798}
799
800static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
801 size_t nbytes, loff_t *off)
802{
803 struct iovec iov;
804 iov.iov_len = nbytes;
805 iov.iov_base = (char __user *) buf;
806 return fuse_dev_writev(file, &iov, 1, off);
807}
808
809static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
810{
334f485d 811 unsigned mask = POLLOUT | POLLWRNORM;
7025d9ad 812 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 813 if (!fc)
7025d9ad 814 return POLLERR;
334f485d
MS
815
816 poll_wait(file, &fc->waitq, wait);
817
d7133114 818 spin_lock(&fc->lock);
7025d9ad
MS
819 if (!fc->connected)
820 mask = POLLERR;
821 else if (!list_empty(&fc->pending))
822 mask |= POLLIN | POLLRDNORM;
d7133114 823 spin_unlock(&fc->lock);
334f485d
MS
824
825 return mask;
826}
827
69a53bf2
MS
828/*
829 * Abort all requests on the given list (pending or processing)
830 *
d7133114 831 * This function releases and reacquires fc->lock
69a53bf2 832 */
334f485d
MS
833static void end_requests(struct fuse_conn *fc, struct list_head *head)
834{
835 while (!list_empty(head)) {
836 struct fuse_req *req;
837 req = list_entry(head->next, struct fuse_req, list);
334f485d
MS
838 req->out.h.error = -ECONNABORTED;
839 request_end(fc, req);
d7133114 840 spin_lock(&fc->lock);
334f485d
MS
841 }
842}
843
69a53bf2
MS
844/*
845 * Abort requests under I/O
846 *
847 * The requests are set to interrupted and finished, and the request
848 * waiter is woken up. This will make request_wait_answer() wait
849 * until the request is unlocked and then return.
64c6d8ed
MS
850 *
851 * If the request is asynchronous, then the end function needs to be
852 * called after waiting for the request to be unlocked (if it was
853 * locked).
69a53bf2
MS
854 */
855static void end_io_requests(struct fuse_conn *fc)
856{
857 while (!list_empty(&fc->io)) {
64c6d8ed
MS
858 struct fuse_req *req =
859 list_entry(fc->io.next, struct fuse_req, list);
860 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
861
69a53bf2
MS
862 req->interrupted = 1;
863 req->out.h.error = -ECONNABORTED;
864 req->state = FUSE_REQ_FINISHED;
865 list_del_init(&req->list);
866 wake_up(&req->waitq);
64c6d8ed
MS
867 if (end) {
868 req->end = NULL;
869 /* The end function will consume this reference */
870 __fuse_get_request(req);
d7133114 871 spin_unlock(&fc->lock);
64c6d8ed
MS
872 wait_event(req->waitq, !req->locked);
873 end(fc, req);
d7133114 874 spin_lock(&fc->lock);
64c6d8ed 875 }
69a53bf2
MS
876 }
877}
878
879/*
880 * Abort all requests.
881 *
882 * Emergency exit in case of a malicious or accidental deadlock, or
883 * just a hung filesystem.
884 *
885 * The same effect is usually achievable through killing the
886 * filesystem daemon and all users of the filesystem. The exception
887 * is the combination of an asynchronous request and the tricky
888 * deadlock (see Documentation/filesystems/fuse.txt).
889 *
890 * During the aborting, progression of requests from the pending and
891 * processing lists onto the io list, and progression of new requests
892 * onto the pending list is prevented by req->connected being false.
893 *
894 * Progression of requests under I/O to the processing list is
895 * prevented by the req->interrupted flag being true for these
896 * requests. For this reason requests on the io list must be aborted
897 * first.
898 */
899void fuse_abort_conn(struct fuse_conn *fc)
900{
d7133114 901 spin_lock(&fc->lock);
69a53bf2
MS
902 if (fc->connected) {
903 fc->connected = 0;
904 end_io_requests(fc);
905 end_requests(fc, &fc->pending);
906 end_requests(fc, &fc->processing);
907 wake_up_all(&fc->waitq);
385a17bf 908 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
69a53bf2 909 }
d7133114 910 spin_unlock(&fc->lock);
69a53bf2
MS
911}
912
334f485d
MS
913static int fuse_dev_release(struct inode *inode, struct file *file)
914{
0720b315 915 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 916 if (fc) {
d7133114 917 spin_lock(&fc->lock);
1e9a4ed9 918 fc->connected = 0;
334f485d
MS
919 end_requests(fc, &fc->pending);
920 end_requests(fc, &fc->processing);
d7133114 921 spin_unlock(&fc->lock);
385a17bf 922 fasync_helper(-1, file, 0, &fc->fasync);
f543f253 923 kobject_put(&fc->kobj);
385a17bf 924 }
f543f253 925
334f485d
MS
926 return 0;
927}
928
385a17bf
JD
929static int fuse_dev_fasync(int fd, struct file *file, int on)
930{
931 struct fuse_conn *fc = fuse_get_conn(file);
932 if (!fc)
a87046d8 933 return -EPERM;
385a17bf
JD
934
935 /* No locking - fasync_helper does its own locking */
936 return fasync_helper(fd, file, on, &fc->fasync);
937}
938
4b6f5d20 939const struct file_operations fuse_dev_operations = {
334f485d
MS
940 .owner = THIS_MODULE,
941 .llseek = no_llseek,
942 .read = fuse_dev_read,
943 .readv = fuse_dev_readv,
944 .write = fuse_dev_write,
945 .writev = fuse_dev_writev,
946 .poll = fuse_dev_poll,
947 .release = fuse_dev_release,
385a17bf 948 .fasync = fuse_dev_fasync,
334f485d
MS
949};
950
951static struct miscdevice fuse_miscdevice = {
952 .minor = FUSE_MINOR,
953 .name = "fuse",
954 .fops = &fuse_dev_operations,
955};
956
957int __init fuse_dev_init(void)
958{
959 int err = -ENOMEM;
960 fuse_req_cachep = kmem_cache_create("fuse_request",
961 sizeof(struct fuse_req),
962 0, 0, NULL, NULL);
963 if (!fuse_req_cachep)
964 goto out;
965
966 err = misc_register(&fuse_miscdevice);
967 if (err)
968 goto out_cache_clean;
969
970 return 0;
971
972 out_cache_clean:
973 kmem_cache_destroy(fuse_req_cachep);
974 out:
975 return err;
976}
977
978void fuse_dev_cleanup(void)
979{
980 misc_deregister(&fuse_miscdevice);
981 kmem_cache_destroy(fuse_req_cachep);
982}
This page took 0.195029 seconds and 5 git commands to generate.