[PATCH] fuse: account background requests
[deliverable/linux.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
22 static kmem_cache_t *fuse_req_cachep;
23
24 static struct fuse_conn *fuse_get_conn(struct file *file)
25 {
26 /*
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
29 */
30 return file->private_data;
31 }
32
33 static void fuse_request_init(struct fuse_req *req)
34 {
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1);
39 }
40
41 struct fuse_req *fuse_request_alloc(void)
42 {
43 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
44 if (req)
45 fuse_request_init(req);
46 return req;
47 }
48
49 void fuse_request_free(struct fuse_req *req)
50 {
51 kmem_cache_free(fuse_req_cachep, req);
52 }
53
54 static void block_sigs(sigset_t *oldset)
55 {
56 sigset_t mask;
57
58 siginitsetinv(&mask, sigmask(SIGKILL));
59 sigprocmask(SIG_BLOCK, &mask, oldset);
60 }
61
62 static void restore_sigs(sigset_t *oldset)
63 {
64 sigprocmask(SIG_SETMASK, oldset, NULL);
65 }
66
67 /*
68 * Reset request, so that it can be reused
69 *
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
72 */
73 void fuse_reset_request(struct fuse_req *req)
74 {
75 BUG_ON(atomic_read(&req->count) != 1);
76 fuse_request_init(req);
77 }
78
79 static void __fuse_get_request(struct fuse_req *req)
80 {
81 atomic_inc(&req->count);
82 }
83
84 /* Must be called with > 1 refcount */
85 static void __fuse_put_request(struct fuse_req *req)
86 {
87 BUG_ON(atomic_read(&req->count) < 2);
88 atomic_dec(&req->count);
89 }
90
91 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
92 {
93 struct fuse_req *req;
94 sigset_t oldset;
95 int err;
96
97 block_sigs(&oldset);
98 err = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
99 restore_sigs(&oldset);
100 if (err)
101 return ERR_PTR(-EINTR);
102
103 req = fuse_request_alloc();
104 if (!req)
105 return ERR_PTR(-ENOMEM);
106
107 atomic_inc(&fc->num_waiting);
108 fuse_request_init(req);
109 req->in.h.uid = current->fsuid;
110 req->in.h.gid = current->fsgid;
111 req->in.h.pid = current->pid;
112 return req;
113 }
114
115 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
116 {
117 if (atomic_dec_and_test(&req->count)) {
118 atomic_dec(&fc->num_waiting);
119 fuse_request_free(req);
120 }
121 }
122
123 void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
124 {
125 iput(req->inode);
126 iput(req->inode2);
127 if (req->file)
128 fput(req->file);
129 spin_lock(&fc->lock);
130 list_del(&req->bg_entry);
131 if (fc->num_background == FUSE_MAX_BACKGROUND) {
132 fc->blocked = 0;
133 wake_up_all(&fc->blocked_waitq);
134 }
135 fc->num_background--;
136 spin_unlock(&fc->lock);
137 }
138
139 /*
140 * This function is called when a request is finished. Either a reply
141 * has arrived or it was interrupted (and not yet sent) or some error
142 * occurred during communication with userspace, or the device file
143 * was closed. In case of a background request the reference to the
144 * stored objects are released. The requester thread is woken up (if
145 * still waiting), the 'end' callback is called if given, else the
146 * reference to the request is released
147 *
148 * Releasing extra reference for foreground requests must be done
149 * within the same locked region as setting state to finished. This
150 * is because fuse_reset_request() may be called after request is
151 * finished and it must be the sole possessor. If request is
152 * interrupted and put in the background, it will return with an error
153 * and hence never be reset and reused.
154 *
155 * Called with fc->lock, unlocks it
156 */
157 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
158 {
159 list_del(&req->list);
160 req->state = FUSE_REQ_FINISHED;
161 if (!req->background) {
162 spin_unlock(&fc->lock);
163 wake_up(&req->waitq);
164 fuse_put_request(fc, req);
165 } else {
166 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
167 req->end = NULL;
168 spin_unlock(&fc->lock);
169 down_read(&fc->sbput_sem);
170 if (fc->mounted)
171 fuse_release_background(fc, req);
172 up_read(&fc->sbput_sem);
173 if (end)
174 end(fc, req);
175 else
176 fuse_put_request(fc, req);
177 }
178 }
179
180 /*
181 * Unfortunately request interruption not just solves the deadlock
182 * problem, it causes problems too. These stem from the fact, that an
183 * interrupted request is continued to be processed in userspace,
184 * while all the locks and object references (inode and file) held
185 * during the operation are released.
186 *
187 * To release the locks is exactly why there's a need to interrupt the
188 * request, so there's not a lot that can be done about this, except
189 * introduce additional locking in userspace.
190 *
191 * More important is to keep inode and file references until userspace
192 * has replied, otherwise FORGET and RELEASE could be sent while the
193 * inode/file is still used by the filesystem.
194 *
195 * For this reason the concept of "background" request is introduced.
196 * An interrupted request is backgrounded if it has been already sent
197 * to userspace. Backgrounding involves getting an extra reference to
198 * inode(s) or file used in the request, and adding the request to
199 * fc->background list. When a reply is received for a background
200 * request, the object references are released, and the request is
201 * removed from the list. If the filesystem is unmounted while there
202 * are still background requests, the list is walked and references
203 * are released as if a reply was received.
204 *
205 * There's one more use for a background request. The RELEASE message is
206 * always sent as background, since it doesn't return an error or
207 * data.
208 */
209 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
210 {
211 req->background = 1;
212 list_add(&req->bg_entry, &fc->background);
213 fc->num_background++;
214 if (fc->num_background == FUSE_MAX_BACKGROUND)
215 fc->blocked = 1;
216 if (req->inode)
217 req->inode = igrab(req->inode);
218 if (req->inode2)
219 req->inode2 = igrab(req->inode2);
220 if (req->file)
221 get_file(req->file);
222 }
223
224 /* Called with fc->lock held. Releases, and then reacquires it. */
225 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
226 {
227 sigset_t oldset;
228
229 spin_unlock(&fc->lock);
230 block_sigs(&oldset);
231 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
232 restore_sigs(&oldset);
233 spin_lock(&fc->lock);
234 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
235 return;
236
237 if (!req->interrupted) {
238 req->out.h.error = -EINTR;
239 req->interrupted = 1;
240 }
241 if (req->locked) {
242 /* This is uninterruptible sleep, because data is
243 being copied to/from the buffers of req. During
244 locked state, there mustn't be any filesystem
245 operation (e.g. page fault), since that could lead
246 to deadlock */
247 spin_unlock(&fc->lock);
248 wait_event(req->waitq, !req->locked);
249 spin_lock(&fc->lock);
250 }
251 if (req->state == FUSE_REQ_PENDING) {
252 list_del(&req->list);
253 __fuse_put_request(req);
254 } else if (req->state == FUSE_REQ_SENT)
255 background_request(fc, req);
256 }
257
258 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
259 {
260 unsigned nbytes = 0;
261 unsigned i;
262
263 for (i = 0; i < numargs; i++)
264 nbytes += args[i].size;
265
266 return nbytes;
267 }
268
269 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
270 {
271 fc->reqctr++;
272 /* zero is special */
273 if (fc->reqctr == 0)
274 fc->reqctr = 1;
275 req->in.h.unique = fc->reqctr;
276 req->in.h.len = sizeof(struct fuse_in_header) +
277 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
278 list_add_tail(&req->list, &fc->pending);
279 req->state = FUSE_REQ_PENDING;
280 wake_up(&fc->waitq);
281 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
282 }
283
284 /*
285 * This can only be interrupted by a SIGKILL
286 */
287 void request_send(struct fuse_conn *fc, struct fuse_req *req)
288 {
289 req->isreply = 1;
290 spin_lock(&fc->lock);
291 if (!fc->connected)
292 req->out.h.error = -ENOTCONN;
293 else if (fc->conn_error)
294 req->out.h.error = -ECONNREFUSED;
295 else {
296 queue_request(fc, req);
297 /* acquire extra reference, since request is still needed
298 after request_end() */
299 __fuse_get_request(req);
300
301 request_wait_answer(fc, req);
302 }
303 spin_unlock(&fc->lock);
304 }
305
306 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
307 {
308 spin_lock(&fc->lock);
309 background_request(fc, req);
310 if (fc->connected) {
311 queue_request(fc, req);
312 spin_unlock(&fc->lock);
313 } else {
314 req->out.h.error = -ENOTCONN;
315 request_end(fc, req);
316 }
317 }
318
319 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
320 {
321 req->isreply = 0;
322 request_send_nowait(fc, req);
323 }
324
325 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
326 {
327 req->isreply = 1;
328 request_send_nowait(fc, req);
329 }
330
331 /*
332 * Lock the request. Up to the next unlock_request() there mustn't be
333 * anything that could cause a page-fault. If the request was already
334 * interrupted bail out.
335 */
336 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
337 {
338 int err = 0;
339 if (req) {
340 spin_lock(&fc->lock);
341 if (req->interrupted)
342 err = -ENOENT;
343 else
344 req->locked = 1;
345 spin_unlock(&fc->lock);
346 }
347 return err;
348 }
349
350 /*
351 * Unlock request. If it was interrupted during being locked, the
352 * requester thread is currently waiting for it to be unlocked, so
353 * wake it up.
354 */
355 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
356 {
357 if (req) {
358 spin_lock(&fc->lock);
359 req->locked = 0;
360 if (req->interrupted)
361 wake_up(&req->waitq);
362 spin_unlock(&fc->lock);
363 }
364 }
365
366 struct fuse_copy_state {
367 struct fuse_conn *fc;
368 int write;
369 struct fuse_req *req;
370 const struct iovec *iov;
371 unsigned long nr_segs;
372 unsigned long seglen;
373 unsigned long addr;
374 struct page *pg;
375 void *mapaddr;
376 void *buf;
377 unsigned len;
378 };
379
380 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
381 int write, struct fuse_req *req,
382 const struct iovec *iov, unsigned long nr_segs)
383 {
384 memset(cs, 0, sizeof(*cs));
385 cs->fc = fc;
386 cs->write = write;
387 cs->req = req;
388 cs->iov = iov;
389 cs->nr_segs = nr_segs;
390 }
391
392 /* Unmap and put previous page of userspace buffer */
393 static void fuse_copy_finish(struct fuse_copy_state *cs)
394 {
395 if (cs->mapaddr) {
396 kunmap_atomic(cs->mapaddr, KM_USER0);
397 if (cs->write) {
398 flush_dcache_page(cs->pg);
399 set_page_dirty_lock(cs->pg);
400 }
401 put_page(cs->pg);
402 cs->mapaddr = NULL;
403 }
404 }
405
406 /*
407 * Get another pagefull of userspace buffer, and map it to kernel
408 * address space, and lock request
409 */
410 static int fuse_copy_fill(struct fuse_copy_state *cs)
411 {
412 unsigned long offset;
413 int err;
414
415 unlock_request(cs->fc, cs->req);
416 fuse_copy_finish(cs);
417 if (!cs->seglen) {
418 BUG_ON(!cs->nr_segs);
419 cs->seglen = cs->iov[0].iov_len;
420 cs->addr = (unsigned long) cs->iov[0].iov_base;
421 cs->iov ++;
422 cs->nr_segs --;
423 }
424 down_read(&current->mm->mmap_sem);
425 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
426 &cs->pg, NULL);
427 up_read(&current->mm->mmap_sem);
428 if (err < 0)
429 return err;
430 BUG_ON(err != 1);
431 offset = cs->addr % PAGE_SIZE;
432 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
433 cs->buf = cs->mapaddr + offset;
434 cs->len = min(PAGE_SIZE - offset, cs->seglen);
435 cs->seglen -= cs->len;
436 cs->addr += cs->len;
437
438 return lock_request(cs->fc, cs->req);
439 }
440
441 /* Do as much copy to/from userspace buffer as we can */
442 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
443 {
444 unsigned ncpy = min(*size, cs->len);
445 if (val) {
446 if (cs->write)
447 memcpy(cs->buf, *val, ncpy);
448 else
449 memcpy(*val, cs->buf, ncpy);
450 *val += ncpy;
451 }
452 *size -= ncpy;
453 cs->len -= ncpy;
454 cs->buf += ncpy;
455 return ncpy;
456 }
457
458 /*
459 * Copy a page in the request to/from the userspace buffer. Must be
460 * done atomically
461 */
462 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
463 unsigned offset, unsigned count, int zeroing)
464 {
465 if (page && zeroing && count < PAGE_SIZE) {
466 void *mapaddr = kmap_atomic(page, KM_USER1);
467 memset(mapaddr, 0, PAGE_SIZE);
468 kunmap_atomic(mapaddr, KM_USER1);
469 }
470 while (count) {
471 int err;
472 if (!cs->len && (err = fuse_copy_fill(cs)))
473 return err;
474 if (page) {
475 void *mapaddr = kmap_atomic(page, KM_USER1);
476 void *buf = mapaddr + offset;
477 offset += fuse_copy_do(cs, &buf, &count);
478 kunmap_atomic(mapaddr, KM_USER1);
479 } else
480 offset += fuse_copy_do(cs, NULL, &count);
481 }
482 if (page && !cs->write)
483 flush_dcache_page(page);
484 return 0;
485 }
486
487 /* Copy pages in the request to/from userspace buffer */
488 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
489 int zeroing)
490 {
491 unsigned i;
492 struct fuse_req *req = cs->req;
493 unsigned offset = req->page_offset;
494 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
495
496 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
497 struct page *page = req->pages[i];
498 int err = fuse_copy_page(cs, page, offset, count, zeroing);
499 if (err)
500 return err;
501
502 nbytes -= count;
503 count = min(nbytes, (unsigned) PAGE_SIZE);
504 offset = 0;
505 }
506 return 0;
507 }
508
509 /* Copy a single argument in the request to/from userspace buffer */
510 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
511 {
512 while (size) {
513 int err;
514 if (!cs->len && (err = fuse_copy_fill(cs)))
515 return err;
516 fuse_copy_do(cs, &val, &size);
517 }
518 return 0;
519 }
520
521 /* Copy request arguments to/from userspace buffer */
522 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
523 unsigned argpages, struct fuse_arg *args,
524 int zeroing)
525 {
526 int err = 0;
527 unsigned i;
528
529 for (i = 0; !err && i < numargs; i++) {
530 struct fuse_arg *arg = &args[i];
531 if (i == numargs - 1 && argpages)
532 err = fuse_copy_pages(cs, arg->size, zeroing);
533 else
534 err = fuse_copy_one(cs, arg->value, arg->size);
535 }
536 return err;
537 }
538
539 /* Wait until a request is available on the pending list */
540 static void request_wait(struct fuse_conn *fc)
541 {
542 DECLARE_WAITQUEUE(wait, current);
543
544 add_wait_queue_exclusive(&fc->waitq, &wait);
545 while (fc->connected && list_empty(&fc->pending)) {
546 set_current_state(TASK_INTERRUPTIBLE);
547 if (signal_pending(current))
548 break;
549
550 spin_unlock(&fc->lock);
551 schedule();
552 spin_lock(&fc->lock);
553 }
554 set_current_state(TASK_RUNNING);
555 remove_wait_queue(&fc->waitq, &wait);
556 }
557
558 /*
559 * Read a single request into the userspace filesystem's buffer. This
560 * function waits until a request is available, then removes it from
561 * the pending list and copies request data to userspace buffer. If
562 * no reply is needed (FORGET) or request has been interrupted or
563 * there was an error during the copying then it's finished by calling
564 * request_end(). Otherwise add it to the processing list, and set
565 * the 'sent' flag.
566 */
567 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
568 unsigned long nr_segs, loff_t *off)
569 {
570 int err;
571 struct fuse_req *req;
572 struct fuse_in *in;
573 struct fuse_copy_state cs;
574 unsigned reqsize;
575 struct fuse_conn *fc = fuse_get_conn(file);
576 if (!fc)
577 return -EPERM;
578
579 restart:
580 spin_lock(&fc->lock);
581 err = -EAGAIN;
582 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
583 list_empty(&fc->pending))
584 goto err_unlock;
585
586 request_wait(fc);
587 err = -ENODEV;
588 if (!fc->connected)
589 goto err_unlock;
590 err = -ERESTARTSYS;
591 if (list_empty(&fc->pending))
592 goto err_unlock;
593
594 req = list_entry(fc->pending.next, struct fuse_req, list);
595 req->state = FUSE_REQ_READING;
596 list_move(&req->list, &fc->io);
597
598 in = &req->in;
599 reqsize = in->h.len;
600 /* If request is too large, reply with an error and restart the read */
601 if (iov_length(iov, nr_segs) < reqsize) {
602 req->out.h.error = -EIO;
603 /* SETXATTR is special, since it may contain too large data */
604 if (in->h.opcode == FUSE_SETXATTR)
605 req->out.h.error = -E2BIG;
606 request_end(fc, req);
607 goto restart;
608 }
609 spin_unlock(&fc->lock);
610 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
611 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
612 if (!err)
613 err = fuse_copy_args(&cs, in->numargs, in->argpages,
614 (struct fuse_arg *) in->args, 0);
615 fuse_copy_finish(&cs);
616 spin_lock(&fc->lock);
617 req->locked = 0;
618 if (!err && req->interrupted)
619 err = -ENOENT;
620 if (err) {
621 if (!req->interrupted)
622 req->out.h.error = -EIO;
623 request_end(fc, req);
624 return err;
625 }
626 if (!req->isreply)
627 request_end(fc, req);
628 else {
629 req->state = FUSE_REQ_SENT;
630 list_move_tail(&req->list, &fc->processing);
631 spin_unlock(&fc->lock);
632 }
633 return reqsize;
634
635 err_unlock:
636 spin_unlock(&fc->lock);
637 return err;
638 }
639
640 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
641 size_t nbytes, loff_t *off)
642 {
643 struct iovec iov;
644 iov.iov_len = nbytes;
645 iov.iov_base = buf;
646 return fuse_dev_readv(file, &iov, 1, off);
647 }
648
649 /* Look up request on processing list by unique ID */
650 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
651 {
652 struct list_head *entry;
653
654 list_for_each(entry, &fc->processing) {
655 struct fuse_req *req;
656 req = list_entry(entry, struct fuse_req, list);
657 if (req->in.h.unique == unique)
658 return req;
659 }
660 return NULL;
661 }
662
663 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
664 unsigned nbytes)
665 {
666 unsigned reqsize = sizeof(struct fuse_out_header);
667
668 if (out->h.error)
669 return nbytes != reqsize ? -EINVAL : 0;
670
671 reqsize += len_args(out->numargs, out->args);
672
673 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
674 return -EINVAL;
675 else if (reqsize > nbytes) {
676 struct fuse_arg *lastarg = &out->args[out->numargs-1];
677 unsigned diffsize = reqsize - nbytes;
678 if (diffsize > lastarg->size)
679 return -EINVAL;
680 lastarg->size -= diffsize;
681 }
682 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
683 out->page_zeroing);
684 }
685
686 /*
687 * Write a single reply to a request. First the header is copied from
688 * the write buffer. The request is then searched on the processing
689 * list by the unique ID found in the header. If found, then remove
690 * it from the list and copy the rest of the buffer to the request.
691 * The request is finished by calling request_end()
692 */
693 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
694 unsigned long nr_segs, loff_t *off)
695 {
696 int err;
697 unsigned nbytes = iov_length(iov, nr_segs);
698 struct fuse_req *req;
699 struct fuse_out_header oh;
700 struct fuse_copy_state cs;
701 struct fuse_conn *fc = fuse_get_conn(file);
702 if (!fc)
703 return -EPERM;
704
705 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
706 if (nbytes < sizeof(struct fuse_out_header))
707 return -EINVAL;
708
709 err = fuse_copy_one(&cs, &oh, sizeof(oh));
710 if (err)
711 goto err_finish;
712 err = -EINVAL;
713 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
714 oh.len != nbytes)
715 goto err_finish;
716
717 spin_lock(&fc->lock);
718 err = -ENOENT;
719 if (!fc->connected)
720 goto err_unlock;
721
722 req = request_find(fc, oh.unique);
723 err = -EINVAL;
724 if (!req)
725 goto err_unlock;
726
727 if (req->interrupted) {
728 spin_unlock(&fc->lock);
729 fuse_copy_finish(&cs);
730 spin_lock(&fc->lock);
731 request_end(fc, req);
732 return -ENOENT;
733 }
734 list_move(&req->list, &fc->io);
735 req->out.h = oh;
736 req->locked = 1;
737 cs.req = req;
738 spin_unlock(&fc->lock);
739
740 err = copy_out_args(&cs, &req->out, nbytes);
741 fuse_copy_finish(&cs);
742
743 spin_lock(&fc->lock);
744 req->locked = 0;
745 if (!err) {
746 if (req->interrupted)
747 err = -ENOENT;
748 } else if (!req->interrupted)
749 req->out.h.error = -EIO;
750 request_end(fc, req);
751
752 return err ? err : nbytes;
753
754 err_unlock:
755 spin_unlock(&fc->lock);
756 err_finish:
757 fuse_copy_finish(&cs);
758 return err;
759 }
760
761 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
762 size_t nbytes, loff_t *off)
763 {
764 struct iovec iov;
765 iov.iov_len = nbytes;
766 iov.iov_base = (char __user *) buf;
767 return fuse_dev_writev(file, &iov, 1, off);
768 }
769
770 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
771 {
772 unsigned mask = POLLOUT | POLLWRNORM;
773 struct fuse_conn *fc = fuse_get_conn(file);
774 if (!fc)
775 return POLLERR;
776
777 poll_wait(file, &fc->waitq, wait);
778
779 spin_lock(&fc->lock);
780 if (!fc->connected)
781 mask = POLLERR;
782 else if (!list_empty(&fc->pending))
783 mask |= POLLIN | POLLRDNORM;
784 spin_unlock(&fc->lock);
785
786 return mask;
787 }
788
789 /*
790 * Abort all requests on the given list (pending or processing)
791 *
792 * This function releases and reacquires fc->lock
793 */
794 static void end_requests(struct fuse_conn *fc, struct list_head *head)
795 {
796 while (!list_empty(head)) {
797 struct fuse_req *req;
798 req = list_entry(head->next, struct fuse_req, list);
799 req->out.h.error = -ECONNABORTED;
800 request_end(fc, req);
801 spin_lock(&fc->lock);
802 }
803 }
804
805 /*
806 * Abort requests under I/O
807 *
808 * The requests are set to interrupted and finished, and the request
809 * waiter is woken up. This will make request_wait_answer() wait
810 * until the request is unlocked and then return.
811 *
812 * If the request is asynchronous, then the end function needs to be
813 * called after waiting for the request to be unlocked (if it was
814 * locked).
815 */
816 static void end_io_requests(struct fuse_conn *fc)
817 {
818 while (!list_empty(&fc->io)) {
819 struct fuse_req *req =
820 list_entry(fc->io.next, struct fuse_req, list);
821 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
822
823 req->interrupted = 1;
824 req->out.h.error = -ECONNABORTED;
825 req->state = FUSE_REQ_FINISHED;
826 list_del_init(&req->list);
827 wake_up(&req->waitq);
828 if (end) {
829 req->end = NULL;
830 /* The end function will consume this reference */
831 __fuse_get_request(req);
832 spin_unlock(&fc->lock);
833 wait_event(req->waitq, !req->locked);
834 end(fc, req);
835 spin_lock(&fc->lock);
836 }
837 }
838 }
839
840 /*
841 * Abort all requests.
842 *
843 * Emergency exit in case of a malicious or accidental deadlock, or
844 * just a hung filesystem.
845 *
846 * The same effect is usually achievable through killing the
847 * filesystem daemon and all users of the filesystem. The exception
848 * is the combination of an asynchronous request and the tricky
849 * deadlock (see Documentation/filesystems/fuse.txt).
850 *
851 * During the aborting, progression of requests from the pending and
852 * processing lists onto the io list, and progression of new requests
853 * onto the pending list is prevented by req->connected being false.
854 *
855 * Progression of requests under I/O to the processing list is
856 * prevented by the req->interrupted flag being true for these
857 * requests. For this reason requests on the io list must be aborted
858 * first.
859 */
860 void fuse_abort_conn(struct fuse_conn *fc)
861 {
862 spin_lock(&fc->lock);
863 if (fc->connected) {
864 fc->connected = 0;
865 end_io_requests(fc);
866 end_requests(fc, &fc->pending);
867 end_requests(fc, &fc->processing);
868 wake_up_all(&fc->waitq);
869 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
870 }
871 spin_unlock(&fc->lock);
872 }
873
874 static int fuse_dev_release(struct inode *inode, struct file *file)
875 {
876 struct fuse_conn *fc = fuse_get_conn(file);
877 if (fc) {
878 spin_lock(&fc->lock);
879 fc->connected = 0;
880 end_requests(fc, &fc->pending);
881 end_requests(fc, &fc->processing);
882 spin_unlock(&fc->lock);
883 fasync_helper(-1, file, 0, &fc->fasync);
884 kobject_put(&fc->kobj);
885 }
886
887 return 0;
888 }
889
890 static int fuse_dev_fasync(int fd, struct file *file, int on)
891 {
892 struct fuse_conn *fc = fuse_get_conn(file);
893 if (!fc)
894 return -EPERM;
895
896 /* No locking - fasync_helper does its own locking */
897 return fasync_helper(fd, file, on, &fc->fasync);
898 }
899
900 const struct file_operations fuse_dev_operations = {
901 .owner = THIS_MODULE,
902 .llseek = no_llseek,
903 .read = fuse_dev_read,
904 .readv = fuse_dev_readv,
905 .write = fuse_dev_write,
906 .writev = fuse_dev_writev,
907 .poll = fuse_dev_poll,
908 .release = fuse_dev_release,
909 .fasync = fuse_dev_fasync,
910 };
911
912 static struct miscdevice fuse_miscdevice = {
913 .minor = FUSE_MINOR,
914 .name = "fuse",
915 .fops = &fuse_dev_operations,
916 };
917
918 int __init fuse_dev_init(void)
919 {
920 int err = -ENOMEM;
921 fuse_req_cachep = kmem_cache_create("fuse_request",
922 sizeof(struct fuse_req),
923 0, 0, NULL, NULL);
924 if (!fuse_req_cachep)
925 goto out;
926
927 err = misc_register(&fuse_miscdevice);
928 if (err)
929 goto out_cache_clean;
930
931 return 0;
932
933 out_cache_clean:
934 kmem_cache_destroy(fuse_req_cachep);
935 out:
936 return err;
937 }
938
939 void fuse_dev_cleanup(void)
940 {
941 misc_deregister(&fuse_miscdevice);
942 kmem_cache_destroy(fuse_req_cachep);
943 }
This page took 0.055372 seconds and 5 git commands to generate.