fuse: separate out processing queue
[deliverable/linux.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
334f485d
MS
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 24MODULE_ALIAS("devname:fuse");
334f485d 25
e18b890b 26static struct kmem_cache *fuse_req_cachep;
334f485d 27
8bfc016d 28static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 29{
0720b315
MS
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return file->private_data;
334f485d
MS
35}
36
4250c066 37static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 38 struct fuse_page_desc *page_descs,
4250c066 39 unsigned npages)
334f485d
MS
40{
41 memset(req, 0, sizeof(*req));
4250c066 42 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 43 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 44 INIT_LIST_HEAD(&req->list);
a4d27e75 45 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
4250c066 48 req->pages = pages;
b2430d75 49 req->page_descs = page_descs;
4250c066 50 req->max_pages = npages;
33e14b4d 51 __set_bit(FR_PENDING, &req->flags);
334f485d
MS
52}
53
4250c066 54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 55{
4250c066
MP
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
b2430d75 59 struct fuse_page_desc *page_descs;
4250c066 60
b2430d75 61 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 62 pages = req->inline_pages;
b2430d75
MP
63 page_descs = req->inline_page_descs;
64 } else {
4250c066 65 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
4250c066 69
b2430d75
MP
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
4250c066
MP
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
b2430d75 77 fuse_request_init(req, pages, page_descs, npages);
4250c066 78 }
334f485d
MS
79 return req;
80}
4250c066
MP
81
82struct fuse_req *fuse_request_alloc(unsigned npages)
83{
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85}
08cbf542 86EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 87
4250c066 88struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 89{
4250c066 90 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
91}
92
334f485d
MS
93void fuse_request_free(struct fuse_req *req)
94{
b2430d75 95 if (req->pages != req->inline_pages) {
4250c066 96 kfree(req->pages);
b2430d75
MP
97 kfree(req->page_descs);
98 }
334f485d
MS
99 kmem_cache_free(fuse_req_cachep, req);
100}
101
8bfc016d 102static void block_sigs(sigset_t *oldset)
334f485d
MS
103{
104 sigset_t mask;
105
106 siginitsetinv(&mask, sigmask(SIGKILL));
107 sigprocmask(SIG_BLOCK, &mask, oldset);
108}
109
8bfc016d 110static void restore_sigs(sigset_t *oldset)
334f485d
MS
111{
112 sigprocmask(SIG_SETMASK, oldset, NULL);
113}
114
36cf66ed 115void __fuse_get_request(struct fuse_req *req)
334f485d
MS
116{
117 atomic_inc(&req->count);
118}
119
120/* Must be called with > 1 refcount */
121static void __fuse_put_request(struct fuse_req *req)
122{
123 BUG_ON(atomic_read(&req->count) < 2);
124 atomic_dec(&req->count);
125}
126
33649c91
MS
127static void fuse_req_init_context(struct fuse_req *req)
128{
499dcf20
EB
129 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
131 req->in.h.pid = current->pid;
132}
133
9759bd51
MS
134void fuse_set_initialized(struct fuse_conn *fc)
135{
136 /* Make sure stores before this are seen on another CPU */
137 smp_wmb();
138 fc->initialized = 1;
139}
140
0aada884
MP
141static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
142{
143 return !fc->initialized || (for_background && fc->blocked);
144}
145
8b41e671
MP
146static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
147 bool for_background)
334f485d 148{
08a53cdc 149 struct fuse_req *req;
08a53cdc 150 int err;
9bc5ddda 151 atomic_inc(&fc->num_waiting);
0aada884
MP
152
153 if (fuse_block_alloc(fc, for_background)) {
154 sigset_t oldset;
155 int intr;
156
157 block_sigs(&oldset);
722d2bea 158 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
0aada884
MP
159 !fuse_block_alloc(fc, for_background));
160 restore_sigs(&oldset);
161 err = -EINTR;
162 if (intr)
163 goto out;
164 }
9759bd51
MS
165 /* Matches smp_wmb() in fuse_set_initialized() */
166 smp_rmb();
08a53cdc 167
51eb01e7
MS
168 err = -ENOTCONN;
169 if (!fc->connected)
170 goto out;
171
de155226
MS
172 err = -ECONNREFUSED;
173 if (fc->conn_error)
174 goto out;
175
b111c8c0 176 req = fuse_request_alloc(npages);
9bc5ddda 177 err = -ENOMEM;
722d2bea
MP
178 if (!req) {
179 if (for_background)
180 wake_up(&fc->blocked_waitq);
9bc5ddda 181 goto out;
722d2bea 182 }
334f485d 183
33649c91 184 fuse_req_init_context(req);
825d6d33
MS
185 __set_bit(FR_WAITING, &req->flags);
186 if (for_background)
187 __set_bit(FR_BACKGROUND, &req->flags);
188
334f485d 189 return req;
9bc5ddda
MS
190
191 out:
192 atomic_dec(&fc->num_waiting);
193 return ERR_PTR(err);
334f485d 194}
8b41e671
MP
195
196struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
197{
198 return __fuse_get_req(fc, npages, false);
199}
08cbf542 200EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 201
8b41e671
MP
202struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
203 unsigned npages)
204{
205 return __fuse_get_req(fc, npages, true);
206}
207EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
208
33649c91
MS
209/*
210 * Return request in fuse_file->reserved_req. However that may
211 * currently be in use. If that is the case, wait for it to become
212 * available.
213 */
214static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
215 struct file *file)
216{
217 struct fuse_req *req = NULL;
218 struct fuse_file *ff = file->private_data;
219
220 do {
de5e3dec 221 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
222 spin_lock(&fc->lock);
223 if (ff->reserved_req) {
224 req = ff->reserved_req;
225 ff->reserved_req = NULL;
cb0942b8 226 req->stolen_file = get_file(file);
33649c91
MS
227 }
228 spin_unlock(&fc->lock);
229 } while (!req);
230
231 return req;
232}
233
234/*
235 * Put stolen request back into fuse_file->reserved_req
236 */
237static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
238{
239 struct file *file = req->stolen_file;
240 struct fuse_file *ff = file->private_data;
241
242 spin_lock(&fc->lock);
b2430d75 243 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
244 BUG_ON(ff->reserved_req);
245 ff->reserved_req = req;
de5e3dec 246 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
247 spin_unlock(&fc->lock);
248 fput(file);
249}
250
251/*
252 * Gets a requests for a file operation, always succeeds
253 *
254 * This is used for sending the FLUSH request, which must get to
255 * userspace, due to POSIX locks which may need to be unlocked.
256 *
257 * If allocation fails due to OOM, use the reserved request in
258 * fuse_file.
259 *
260 * This is very unlikely to deadlock accidentally, since the
261 * filesystem should not have it's own file open. If deadlock is
262 * intentional, it can still be broken by "aborting" the filesystem.
263 */
b111c8c0
MP
264struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
265 struct file *file)
33649c91
MS
266{
267 struct fuse_req *req;
268
269 atomic_inc(&fc->num_waiting);
0aada884 270 wait_event(fc->blocked_waitq, fc->initialized);
9759bd51
MS
271 /* Matches smp_wmb() in fuse_set_initialized() */
272 smp_rmb();
b111c8c0 273 req = fuse_request_alloc(0);
33649c91
MS
274 if (!req)
275 req = get_reserved_req(fc, file);
276
277 fuse_req_init_context(req);
825d6d33
MS
278 __set_bit(FR_WAITING, &req->flags);
279 __clear_bit(FR_BACKGROUND, &req->flags);
33649c91
MS
280 return req;
281}
282
334f485d 283void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
284{
285 if (atomic_dec_and_test(&req->count)) {
825d6d33 286 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
287 /*
288 * We get here in the unlikely case that a background
289 * request was allocated but not sent
290 */
291 spin_lock(&fc->lock);
292 if (!fc->blocked)
293 wake_up(&fc->blocked_waitq);
294 spin_unlock(&fc->lock);
295 }
296
825d6d33
MS
297 if (test_bit(FR_WAITING, &req->flags)) {
298 __clear_bit(FR_WAITING, &req->flags);
9bc5ddda 299 atomic_dec(&fc->num_waiting);
73e0e738 300 }
33649c91
MS
301
302 if (req->stolen_file)
303 put_reserved_req(fc, req);
304 else
305 fuse_request_free(req);
7128ec2a
MS
306 }
307}
08cbf542 308EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 309
d12def1b
MS
310static unsigned len_args(unsigned numargs, struct fuse_arg *args)
311{
312 unsigned nbytes = 0;
313 unsigned i;
314
315 for (i = 0; i < numargs; i++)
316 nbytes += args[i].size;
317
318 return nbytes;
319}
320
f88996a9 321static u64 fuse_get_unique(struct fuse_iqueue *fiq)
d12def1b 322{
f88996a9 323 return ++fiq->reqctr;
d12def1b
MS
324}
325
f88996a9 326static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
d12def1b 327{
d12def1b
MS
328 req->in.h.len = sizeof(struct fuse_in_header) +
329 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
f88996a9 330 list_add_tail(&req->list, &fiq->pending);
4ce60812 331 wake_up_locked(&fiq->waitq);
f88996a9 332 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
d12def1b
MS
333}
334
07e77dca
MS
335void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
336 u64 nodeid, u64 nlookup)
337{
f88996a9
MS
338 struct fuse_iqueue *fiq = &fc->iq;
339
02c048b9
MS
340 forget->forget_one.nodeid = nodeid;
341 forget->forget_one.nlookup = nlookup;
07e77dca 342
4ce60812 343 spin_lock(&fiq->waitq.lock);
e16714d8 344 if (fiq->connected) {
f88996a9
MS
345 fiq->forget_list_tail->next = forget;
346 fiq->forget_list_tail = forget;
4ce60812 347 wake_up_locked(&fiq->waitq);
f88996a9 348 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5dfcc87f
MS
349 } else {
350 kfree(forget);
351 }
4ce60812 352 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
353}
354
d12def1b
MS
355static void flush_bg_queue(struct fuse_conn *fc)
356{
7a6d3c8b 357 while (fc->active_background < fc->max_background &&
d12def1b
MS
358 !list_empty(&fc->bg_queue)) {
359 struct fuse_req *req;
f88996a9 360 struct fuse_iqueue *fiq = &fc->iq;
d12def1b
MS
361
362 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
363 list_del(&req->list);
364 fc->active_background++;
4ce60812 365 spin_lock(&fiq->waitq.lock);
f88996a9
MS
366 req->in.h.unique = fuse_get_unique(fiq);
367 queue_request(fiq, req);
4ce60812 368 spin_unlock(&fiq->waitq.lock);
d12def1b
MS
369 }
370}
371
334f485d
MS
372/*
373 * This function is called when a request is finished. Either a reply
f9a2842e 374 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 375 * occurred during communication with userspace, or the device file
51eb01e7
MS
376 * was closed. The requester thread is woken up (if still waiting),
377 * the 'end' callback is called if given, else the reference to the
378 * request is released
7128ec2a 379 *
d7133114 380 * Called with fc->lock, unlocks it
334f485d
MS
381 */
382static void request_end(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2 383__releases(fc->lock)
334f485d 384{
4ce60812 385 struct fuse_iqueue *fiq = &fc->iq;
51eb01e7
MS
386 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
387 req->end = NULL;
0d8e84b0 388 list_del_init(&req->list);
4ce60812 389 spin_lock(&fiq->waitq.lock);
0d8e84b0 390 list_del_init(&req->intr_entry);
4ce60812 391 spin_unlock(&fiq->waitq.lock);
33e14b4d
MS
392 WARN_ON(test_bit(FR_PENDING, &req->flags));
393 WARN_ON(test_bit(FR_SENT, &req->flags));
c4775267 394 smp_wmb();
33e14b4d 395 set_bit(FR_FINISHED, &req->flags);
825d6d33
MS
396 if (test_bit(FR_BACKGROUND, &req->flags)) {
397 clear_bit(FR_BACKGROUND, &req->flags);
722d2bea 398 if (fc->num_background == fc->max_background)
51eb01e7 399 fc->blocked = 0;
722d2bea
MP
400
401 /* Wake up next waiter, if any */
3c18ef81 402 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
722d2bea
MP
403 wake_up(&fc->blocked_waitq);
404
7a6d3c8b 405 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 406 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
407 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
408 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 409 }
51eb01e7 410 fc->num_background--;
d12def1b
MS
411 fc->active_background--;
412 flush_bg_queue(fc);
334f485d 413 }
51eb01e7 414 spin_unlock(&fc->lock);
51eb01e7
MS
415 wake_up(&req->waitq);
416 if (end)
417 end(fc, req);
e9bb09dd 418 fuse_put_request(fc, req);
334f485d
MS
419}
420
f88996a9 421static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
a4d27e75 422{
4ce60812 423 spin_lock(&fiq->waitq.lock);
8f7bb368
MS
424 if (list_empty(&req->intr_entry)) {
425 list_add_tail(&req->intr_entry, &fiq->interrupts);
426 wake_up_locked(&fiq->waitq);
427 }
4ce60812 428 spin_unlock(&fiq->waitq.lock);
f88996a9 429 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
a4d27e75
MS
430}
431
7c352bdf 432static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 433{
4ce60812 434 struct fuse_iqueue *fiq = &fc->iq;
c4775267
MS
435 int err;
436
a4d27e75
MS
437 if (!fc->no_interrupt) {
438 /* Any signal may interrupt this */
c4775267 439 err = wait_event_interruptible(req->waitq,
33e14b4d 440 test_bit(FR_FINISHED, &req->flags));
c4775267 441 if (!err)
a4d27e75
MS
442 return;
443
825d6d33 444 set_bit(FR_INTERRUPTED, &req->flags);
8f7bb368
MS
445 /* matches barrier in fuse_dev_do_read() */
446 smp_mb__after_atomic();
33e14b4d 447 if (test_bit(FR_SENT, &req->flags))
4ce60812 448 queue_interrupt(fiq, req);
a4d27e75
MS
449 }
450
825d6d33 451 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75
MS
452 sigset_t oldset;
453
454 /* Only fatal signals may interrupt this */
51eb01e7 455 block_sigs(&oldset);
c4775267 456 err = wait_event_interruptible(req->waitq,
33e14b4d 457 test_bit(FR_FINISHED, &req->flags));
51eb01e7 458 restore_sigs(&oldset);
a131de0a 459
c4775267 460 if (!err)
a131de0a
MS
461 return;
462
4ce60812 463 spin_lock(&fiq->waitq.lock);
a131de0a 464 /* Request is not yet in userspace, bail out */
33e14b4d 465 if (test_bit(FR_PENDING, &req->flags)) {
a131de0a 466 list_del(&req->list);
4ce60812 467 spin_unlock(&fiq->waitq.lock);
a131de0a
MS
468 __fuse_put_request(req);
469 req->out.h.error = -EINTR;
470 return;
471 }
4ce60812 472 spin_unlock(&fiq->waitq.lock);
51eb01e7 473 }
334f485d 474
a131de0a
MS
475 /*
476 * Either request is already in userspace, or it was forced.
477 * Wait it out.
478 */
33e14b4d 479 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
334f485d
MS
480}
481
6a4e922c 482static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 483{
e16714d8
MS
484 struct fuse_iqueue *fiq = &fc->iq;
485
825d6d33 486 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
4ce60812 487 spin_lock(&fiq->waitq.lock);
e16714d8 488 if (!fiq->connected) {
4ce60812 489 spin_unlock(&fiq->waitq.lock);
334f485d 490 req->out.h.error = -ENOTCONN;
c4775267 491 } else {
f88996a9
MS
492 req->in.h.unique = fuse_get_unique(fiq);
493 queue_request(fiq, req);
334f485d
MS
494 /* acquire extra reference, since request is still needed
495 after request_end() */
496 __fuse_get_request(req);
4ce60812 497 spin_unlock(&fiq->waitq.lock);
334f485d 498
7c352bdf 499 request_wait_answer(fc, req);
c4775267
MS
500 /* Pairs with smp_wmb() in request_end() */
501 smp_rmb();
334f485d 502 }
334f485d 503}
6a4e922c
EW
504
505void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
506{
825d6d33
MS
507 __set_bit(FR_ISREPLY, &req->flags);
508 if (!test_bit(FR_WAITING, &req->flags)) {
509 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
510 atomic_inc(&fc->num_waiting);
511 }
6a4e922c
EW
512 __fuse_request_send(fc, req);
513}
08cbf542 514EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 515
21f62174
MS
516static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
517{
518 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
519 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
520
521 if (fc->minor < 9) {
522 switch (args->in.h.opcode) {
523 case FUSE_LOOKUP:
524 case FUSE_CREATE:
525 case FUSE_MKNOD:
526 case FUSE_MKDIR:
527 case FUSE_SYMLINK:
528 case FUSE_LINK:
529 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
530 break;
531 case FUSE_GETATTR:
532 case FUSE_SETATTR:
533 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
534 break;
535 }
536 }
537 if (fc->minor < 12) {
538 switch (args->in.h.opcode) {
539 case FUSE_CREATE:
540 args->in.args[0].size = sizeof(struct fuse_open_in);
541 break;
542 case FUSE_MKNOD:
543 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
544 break;
545 }
546 }
547}
548
7078187a
MS
549ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
550{
551 struct fuse_req *req;
552 ssize_t ret;
553
554 req = fuse_get_req(fc, 0);
555 if (IS_ERR(req))
556 return PTR_ERR(req);
557
21f62174
MS
558 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
559 fuse_adjust_compat(fc, args);
560
7078187a
MS
561 req->in.h.opcode = args->in.h.opcode;
562 req->in.h.nodeid = args->in.h.nodeid;
563 req->in.numargs = args->in.numargs;
564 memcpy(req->in.args, args->in.args,
565 args->in.numargs * sizeof(struct fuse_in_arg));
566 req->out.argvar = args->out.argvar;
567 req->out.numargs = args->out.numargs;
568 memcpy(req->out.args, args->out.args,
569 args->out.numargs * sizeof(struct fuse_arg));
570 fuse_request_send(fc, req);
571 ret = req->out.h.error;
572 if (!ret && args->out.argvar) {
573 BUG_ON(args->out.numargs != 1);
574 ret = req->out.args[0].size;
575 }
576 fuse_put_request(fc, req);
577
578 return ret;
579}
580
f0139aa8
MS
581/*
582 * Called under fc->lock
583 *
584 * fc->connected must have been checked previously
585 */
586void fuse_request_send_background_locked(struct fuse_conn *fc,
587 struct fuse_req *req)
d12def1b 588{
825d6d33
MS
589 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
590 if (!test_bit(FR_WAITING, &req->flags)) {
591 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
592 atomic_inc(&fc->num_waiting);
593 }
825d6d33 594 __set_bit(FR_ISREPLY, &req->flags);
d12def1b 595 fc->num_background++;
7a6d3c8b 596 if (fc->num_background == fc->max_background)
d12def1b 597 fc->blocked = 1;
7a6d3c8b 598 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 599 fc->bdi_initialized) {
8aa7e847
JA
600 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
601 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
602 }
603 list_add_tail(&req->list, &fc->bg_queue);
604 flush_bg_queue(fc);
605}
606
f0139aa8 607void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d 608{
42dc6211 609 BUG_ON(!req->end);
d7133114 610 spin_lock(&fc->lock);
1e9a4ed9 611 if (fc->connected) {
f0139aa8 612 fuse_request_send_background_locked(fc, req);
d7133114 613 spin_unlock(&fc->lock);
334f485d 614 } else {
42dc6211 615 spin_unlock(&fc->lock);
334f485d 616 req->out.h.error = -ENOTCONN;
42dc6211
MS
617 req->end(fc, req);
618 fuse_put_request(fc, req);
334f485d
MS
619 }
620}
08cbf542 621EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 622
2d45ba38
MS
623static int fuse_request_send_notify_reply(struct fuse_conn *fc,
624 struct fuse_req *req, u64 unique)
625{
626 int err = -ENODEV;
f88996a9 627 struct fuse_iqueue *fiq = &fc->iq;
2d45ba38 628
825d6d33 629 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38 630 req->in.h.unique = unique;
4ce60812 631 spin_lock(&fiq->waitq.lock);
e16714d8 632 if (fiq->connected) {
f88996a9 633 queue_request(fiq, req);
2d45ba38
MS
634 err = 0;
635 }
4ce60812 636 spin_unlock(&fiq->waitq.lock);
2d45ba38
MS
637
638 return err;
639}
640
0b05b183
AA
641void fuse_force_forget(struct file *file, u64 nodeid)
642{
6131ffaa 643 struct inode *inode = file_inode(file);
0b05b183
AA
644 struct fuse_conn *fc = get_fuse_conn(inode);
645 struct fuse_req *req;
646 struct fuse_forget_in inarg;
647
648 memset(&inarg, 0, sizeof(inarg));
649 inarg.nlookup = 1;
b111c8c0 650 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
651 req->in.h.opcode = FUSE_FORGET;
652 req->in.h.nodeid = nodeid;
653 req->in.numargs = 1;
654 req->in.args[0].size = sizeof(inarg);
655 req->in.args[0].value = &inarg;
825d6d33 656 __clear_bit(FR_ISREPLY, &req->flags);
6a4e922c
EW
657 __fuse_request_send(fc, req);
658 /* ignore errors */
659 fuse_put_request(fc, req);
0b05b183
AA
660}
661
334f485d
MS
662/*
663 * Lock the request. Up to the next unlock_request() there mustn't be
664 * anything that could cause a page-fault. If the request was already
f9a2842e 665 * aborted bail out.
334f485d 666 */
dc00809a 667static int lock_request(struct fuse_req *req)
334f485d
MS
668{
669 int err = 0;
670 if (req) {
dc00809a 671 spin_lock(&req->waitq.lock);
825d6d33 672 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
673 err = -ENOENT;
674 else
825d6d33 675 set_bit(FR_LOCKED, &req->flags);
dc00809a 676 spin_unlock(&req->waitq.lock);
334f485d
MS
677 }
678 return err;
679}
680
681/*
0d8e84b0
MS
682 * Unlock request. If it was aborted while locked, caller is responsible
683 * for unlocking and ending the request.
334f485d 684 */
dc00809a 685static int unlock_request(struct fuse_req *req)
334f485d 686{
0d8e84b0 687 int err = 0;
334f485d 688 if (req) {
dc00809a 689 spin_lock(&req->waitq.lock);
825d6d33 690 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
691 err = -ENOENT;
692 else
825d6d33 693 clear_bit(FR_LOCKED, &req->flags);
dc00809a 694 spin_unlock(&req->waitq.lock);
334f485d 695 }
0d8e84b0 696 return err;
334f485d
MS
697}
698
699struct fuse_copy_state {
700 int write;
701 struct fuse_req *req;
6c09e94a 702 struct iov_iter *iter;
dd3bb14f
MS
703 struct pipe_buffer *pipebufs;
704 struct pipe_buffer *currbuf;
705 struct pipe_inode_info *pipe;
334f485d 706 unsigned long nr_segs;
334f485d 707 struct page *pg;
334f485d 708 unsigned len;
c55a01d3 709 unsigned offset;
ce534fb0 710 unsigned move_pages:1;
334f485d
MS
711};
712
dc00809a 713static void fuse_copy_init(struct fuse_copy_state *cs, int write,
6c09e94a 714 struct iov_iter *iter)
334f485d
MS
715{
716 memset(cs, 0, sizeof(*cs));
717 cs->write = write;
6c09e94a 718 cs->iter = iter;
334f485d
MS
719}
720
721/* Unmap and put previous page of userspace buffer */
8bfc016d 722static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 723{
dd3bb14f
MS
724 if (cs->currbuf) {
725 struct pipe_buffer *buf = cs->currbuf;
726
c55a01d3 727 if (cs->write)
c3021629 728 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 729 cs->currbuf = NULL;
c55a01d3 730 } else if (cs->pg) {
334f485d
MS
731 if (cs->write) {
732 flush_dcache_page(cs->pg);
733 set_page_dirty_lock(cs->pg);
734 }
735 put_page(cs->pg);
334f485d 736 }
c55a01d3 737 cs->pg = NULL;
334f485d
MS
738}
739
740/*
741 * Get another pagefull of userspace buffer, and map it to kernel
742 * address space, and lock request
743 */
744static int fuse_copy_fill(struct fuse_copy_state *cs)
745{
c55a01d3 746 struct page *page;
334f485d
MS
747 int err;
748
dc00809a 749 err = unlock_request(cs->req);
0d8e84b0
MS
750 if (err)
751 return err;
752
334f485d 753 fuse_copy_finish(cs);
dd3bb14f
MS
754 if (cs->pipebufs) {
755 struct pipe_buffer *buf = cs->pipebufs;
756
c3021629
MS
757 if (!cs->write) {
758 err = buf->ops->confirm(cs->pipe, buf);
759 if (err)
760 return err;
761
762 BUG_ON(!cs->nr_segs);
763 cs->currbuf = buf;
c55a01d3
MS
764 cs->pg = buf->page;
765 cs->offset = buf->offset;
c3021629 766 cs->len = buf->len;
c3021629
MS
767 cs->pipebufs++;
768 cs->nr_segs--;
769 } else {
c3021629
MS
770 if (cs->nr_segs == cs->pipe->buffers)
771 return -EIO;
772
773 page = alloc_page(GFP_HIGHUSER);
774 if (!page)
775 return -ENOMEM;
776
777 buf->page = page;
778 buf->offset = 0;
779 buf->len = 0;
780
781 cs->currbuf = buf;
c55a01d3
MS
782 cs->pg = page;
783 cs->offset = 0;
c3021629
MS
784 cs->len = PAGE_SIZE;
785 cs->pipebufs++;
786 cs->nr_segs++;
787 }
dd3bb14f 788 } else {
6c09e94a
AV
789 size_t off;
790 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
791 if (err < 0)
792 return err;
6c09e94a
AV
793 BUG_ON(!err);
794 cs->len = err;
795 cs->offset = off;
c55a01d3 796 cs->pg = page;
6c09e94a
AV
797 cs->offset = off;
798 iov_iter_advance(cs->iter, err);
334f485d 799 }
334f485d 800
dc00809a 801 return lock_request(cs->req);
334f485d
MS
802}
803
804/* Do as much copy to/from userspace buffer as we can */
8bfc016d 805static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
806{
807 unsigned ncpy = min(*size, cs->len);
808 if (val) {
c55a01d3
MS
809 void *pgaddr = kmap_atomic(cs->pg);
810 void *buf = pgaddr + cs->offset;
811
334f485d 812 if (cs->write)
c55a01d3 813 memcpy(buf, *val, ncpy);
334f485d 814 else
c55a01d3
MS
815 memcpy(*val, buf, ncpy);
816
817 kunmap_atomic(pgaddr);
334f485d
MS
818 *val += ncpy;
819 }
820 *size -= ncpy;
821 cs->len -= ncpy;
c55a01d3 822 cs->offset += ncpy;
334f485d
MS
823 return ncpy;
824}
825
ce534fb0
MS
826static int fuse_check_page(struct page *page)
827{
828 if (page_mapcount(page) ||
829 page->mapping != NULL ||
830 page_count(page) != 1 ||
831 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
832 ~(1 << PG_locked |
833 1 << PG_referenced |
834 1 << PG_uptodate |
835 1 << PG_lru |
836 1 << PG_active |
837 1 << PG_reclaim))) {
838 printk(KERN_WARNING "fuse: trying to steal weird page\n");
839 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
840 return 1;
841 }
842 return 0;
843}
844
845static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
846{
847 int err;
848 struct page *oldpage = *pagep;
849 struct page *newpage;
850 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 851
dc00809a 852 err = unlock_request(cs->req);
0d8e84b0
MS
853 if (err)
854 return err;
855
ce534fb0
MS
856 fuse_copy_finish(cs);
857
858 err = buf->ops->confirm(cs->pipe, buf);
859 if (err)
860 return err;
861
862 BUG_ON(!cs->nr_segs);
863 cs->currbuf = buf;
864 cs->len = buf->len;
865 cs->pipebufs++;
866 cs->nr_segs--;
867
868 if (cs->len != PAGE_SIZE)
869 goto out_fallback;
870
871 if (buf->ops->steal(cs->pipe, buf) != 0)
872 goto out_fallback;
873
874 newpage = buf->page;
875
aa991b3b
MS
876 if (!PageUptodate(newpage))
877 SetPageUptodate(newpage);
ce534fb0
MS
878
879 ClearPageMappedToDisk(newpage);
880
881 if (fuse_check_page(newpage) != 0)
882 goto out_fallback_unlock;
883
ce534fb0
MS
884 /*
885 * This is a new and locked page, it shouldn't be mapped or
886 * have any special flags on it
887 */
888 if (WARN_ON(page_mapped(oldpage)))
889 goto out_fallback_unlock;
890 if (WARN_ON(page_has_private(oldpage)))
891 goto out_fallback_unlock;
892 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
893 goto out_fallback_unlock;
894 if (WARN_ON(PageMlocked(oldpage)))
895 goto out_fallback_unlock;
896
ef6a3c63 897 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 898 if (err) {
ef6a3c63
MS
899 unlock_page(newpage);
900 return err;
ce534fb0 901 }
ef6a3c63 902
ce534fb0
MS
903 page_cache_get(newpage);
904
905 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
906 lru_cache_add_file(newpage);
907
908 err = 0;
dc00809a 909 spin_lock(&cs->req->waitq.lock);
825d6d33 910 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
911 err = -ENOENT;
912 else
913 *pagep = newpage;
dc00809a 914 spin_unlock(&cs->req->waitq.lock);
ce534fb0
MS
915
916 if (err) {
917 unlock_page(newpage);
918 page_cache_release(newpage);
919 return err;
920 }
921
922 unlock_page(oldpage);
923 page_cache_release(oldpage);
924 cs->len = 0;
925
926 return 0;
927
928out_fallback_unlock:
929 unlock_page(newpage);
930out_fallback:
c55a01d3
MS
931 cs->pg = buf->page;
932 cs->offset = buf->offset;
ce534fb0 933
dc00809a 934 err = lock_request(cs->req);
ce534fb0
MS
935 if (err)
936 return err;
937
938 return 1;
939}
940
c3021629
MS
941static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
942 unsigned offset, unsigned count)
943{
944 struct pipe_buffer *buf;
0d8e84b0 945 int err;
c3021629
MS
946
947 if (cs->nr_segs == cs->pipe->buffers)
948 return -EIO;
949
dc00809a 950 err = unlock_request(cs->req);
0d8e84b0
MS
951 if (err)
952 return err;
953
c3021629
MS
954 fuse_copy_finish(cs);
955
956 buf = cs->pipebufs;
957 page_cache_get(page);
958 buf->page = page;
959 buf->offset = offset;
960 buf->len = count;
961
962 cs->pipebufs++;
963 cs->nr_segs++;
964 cs->len = 0;
965
966 return 0;
967}
968
334f485d
MS
969/*
970 * Copy a page in the request to/from the userspace buffer. Must be
971 * done atomically
972 */
ce534fb0 973static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 974 unsigned offset, unsigned count, int zeroing)
334f485d 975{
ce534fb0
MS
976 int err;
977 struct page *page = *pagep;
978
b6777c40
MS
979 if (page && zeroing && count < PAGE_SIZE)
980 clear_highpage(page);
981
334f485d 982 while (count) {
c3021629
MS
983 if (cs->write && cs->pipebufs && page) {
984 return fuse_ref_page(cs, page, offset, count);
985 } else if (!cs->len) {
ce534fb0
MS
986 if (cs->move_pages && page &&
987 offset == 0 && count == PAGE_SIZE) {
988 err = fuse_try_move_page(cs, pagep);
989 if (err <= 0)
990 return err;
991 } else {
992 err = fuse_copy_fill(cs);
993 if (err)
994 return err;
995 }
1729a16c 996 }
334f485d 997 if (page) {
2408f6ef 998 void *mapaddr = kmap_atomic(page);
334f485d
MS
999 void *buf = mapaddr + offset;
1000 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 1001 kunmap_atomic(mapaddr);
334f485d
MS
1002 } else
1003 offset += fuse_copy_do(cs, NULL, &count);
1004 }
1005 if (page && !cs->write)
1006 flush_dcache_page(page);
1007 return 0;
1008}
1009
1010/* Copy pages in the request to/from userspace buffer */
1011static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1012 int zeroing)
1013{
1014 unsigned i;
1015 struct fuse_req *req = cs->req;
334f485d
MS
1016
1017 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 1018 int err;
85f40aec
MP
1019 unsigned offset = req->page_descs[i].offset;
1020 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
1021
1022 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1023 zeroing);
334f485d
MS
1024 if (err)
1025 return err;
1026
1027 nbytes -= count;
334f485d
MS
1028 }
1029 return 0;
1030}
1031
1032/* Copy a single argument in the request to/from userspace buffer */
1033static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1034{
1035 while (size) {
1729a16c
MS
1036 if (!cs->len) {
1037 int err = fuse_copy_fill(cs);
1038 if (err)
1039 return err;
1040 }
334f485d
MS
1041 fuse_copy_do(cs, &val, &size);
1042 }
1043 return 0;
1044}
1045
1046/* Copy request arguments to/from userspace buffer */
1047static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1048 unsigned argpages, struct fuse_arg *args,
1049 int zeroing)
1050{
1051 int err = 0;
1052 unsigned i;
1053
1054 for (i = 0; !err && i < numargs; i++) {
1055 struct fuse_arg *arg = &args[i];
1056 if (i == numargs - 1 && argpages)
1057 err = fuse_copy_pages(cs, arg->size, zeroing);
1058 else
1059 err = fuse_copy_one(cs, arg->value, arg->size);
1060 }
1061 return err;
1062}
1063
f88996a9 1064static int forget_pending(struct fuse_iqueue *fiq)
07e77dca 1065{
f88996a9 1066 return fiq->forget_list_head.next != NULL;
07e77dca
MS
1067}
1068
f88996a9 1069static int request_pending(struct fuse_iqueue *fiq)
a4d27e75 1070{
f88996a9
MS
1071 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1072 forget_pending(fiq);
a4d27e75
MS
1073}
1074
a4d27e75
MS
1075/*
1076 * Transfer an interrupt request to userspace
1077 *
1078 * Unlike other requests this is assembled on demand, without a need
1079 * to allocate a separate fuse_req structure.
1080 *
fd22d62e 1081 * Called with fiq->waitq.lock held, releases it
a4d27e75 1082 */
fd22d62e
MS
1083static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1084 struct fuse_copy_state *cs,
c3021629 1085 size_t nbytes, struct fuse_req *req)
fd22d62e 1086__releases(fiq->waitq.lock)
a4d27e75 1087{
a4d27e75
MS
1088 struct fuse_in_header ih;
1089 struct fuse_interrupt_in arg;
1090 unsigned reqsize = sizeof(ih) + sizeof(arg);
1091 int err;
1092
1093 list_del_init(&req->intr_entry);
4ce60812 1094 req->intr_unique = fuse_get_unique(fiq);
a4d27e75
MS
1095 memset(&ih, 0, sizeof(ih));
1096 memset(&arg, 0, sizeof(arg));
1097 ih.len = reqsize;
1098 ih.opcode = FUSE_INTERRUPT;
1099 ih.unique = req->intr_unique;
1100 arg.unique = req->in.h.unique;
1101
4ce60812 1102 spin_unlock(&fiq->waitq.lock);
c3021629 1103 if (nbytes < reqsize)
a4d27e75
MS
1104 return -EINVAL;
1105
c3021629 1106 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1107 if (!err)
c3021629
MS
1108 err = fuse_copy_one(cs, &arg, sizeof(arg));
1109 fuse_copy_finish(cs);
a4d27e75
MS
1110
1111 return err ? err : reqsize;
1112}
1113
f88996a9 1114static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
02c048b9
MS
1115 unsigned max,
1116 unsigned *countp)
07e77dca 1117{
f88996a9 1118 struct fuse_forget_link *head = fiq->forget_list_head.next;
02c048b9
MS
1119 struct fuse_forget_link **newhead = &head;
1120 unsigned count;
07e77dca 1121
02c048b9
MS
1122 for (count = 0; *newhead != NULL && count < max; count++)
1123 newhead = &(*newhead)->next;
1124
f88996a9 1125 fiq->forget_list_head.next = *newhead;
02c048b9 1126 *newhead = NULL;
f88996a9
MS
1127 if (fiq->forget_list_head.next == NULL)
1128 fiq->forget_list_tail = &fiq->forget_list_head;
07e77dca 1129
02c048b9
MS
1130 if (countp != NULL)
1131 *countp = count;
1132
1133 return head;
07e77dca
MS
1134}
1135
fd22d62e 1136static int fuse_read_single_forget(struct fuse_iqueue *fiq,
07e77dca
MS
1137 struct fuse_copy_state *cs,
1138 size_t nbytes)
fd22d62e 1139__releases(fiq->waitq.lock)
07e77dca
MS
1140{
1141 int err;
f88996a9 1142 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
07e77dca 1143 struct fuse_forget_in arg = {
02c048b9 1144 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1145 };
1146 struct fuse_in_header ih = {
1147 .opcode = FUSE_FORGET,
02c048b9 1148 .nodeid = forget->forget_one.nodeid,
f88996a9 1149 .unique = fuse_get_unique(fiq),
07e77dca
MS
1150 .len = sizeof(ih) + sizeof(arg),
1151 };
1152
4ce60812 1153 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
1154 kfree(forget);
1155 if (nbytes < ih.len)
1156 return -EINVAL;
1157
1158 err = fuse_copy_one(cs, &ih, sizeof(ih));
1159 if (!err)
1160 err = fuse_copy_one(cs, &arg, sizeof(arg));
1161 fuse_copy_finish(cs);
1162
1163 if (err)
1164 return err;
1165
1166 return ih.len;
1167}
1168
fd22d62e 1169static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
02c048b9 1170 struct fuse_copy_state *cs, size_t nbytes)
fd22d62e 1171__releases(fiq->waitq.lock)
02c048b9
MS
1172{
1173 int err;
1174 unsigned max_forgets;
1175 unsigned count;
1176 struct fuse_forget_link *head;
1177 struct fuse_batch_forget_in arg = { .count = 0 };
1178 struct fuse_in_header ih = {
1179 .opcode = FUSE_BATCH_FORGET,
f88996a9 1180 .unique = fuse_get_unique(fiq),
02c048b9
MS
1181 .len = sizeof(ih) + sizeof(arg),
1182 };
1183
1184 if (nbytes < ih.len) {
4ce60812 1185 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1186 return -EINVAL;
1187 }
1188
1189 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
f88996a9 1190 head = dequeue_forget(fiq, max_forgets, &count);
4ce60812 1191 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1192
1193 arg.count = count;
1194 ih.len += count * sizeof(struct fuse_forget_one);
1195 err = fuse_copy_one(cs, &ih, sizeof(ih));
1196 if (!err)
1197 err = fuse_copy_one(cs, &arg, sizeof(arg));
1198
1199 while (head) {
1200 struct fuse_forget_link *forget = head;
1201
1202 if (!err) {
1203 err = fuse_copy_one(cs, &forget->forget_one,
1204 sizeof(forget->forget_one));
1205 }
1206 head = forget->next;
1207 kfree(forget);
1208 }
1209
1210 fuse_copy_finish(cs);
1211
1212 if (err)
1213 return err;
1214
1215 return ih.len;
1216}
1217
fd22d62e
MS
1218static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1219 struct fuse_copy_state *cs,
02c048b9 1220 size_t nbytes)
fd22d62e 1221__releases(fiq->waitq.lock)
02c048b9 1222{
f88996a9 1223 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
fd22d62e 1224 return fuse_read_single_forget(fiq, cs, nbytes);
02c048b9 1225 else
fd22d62e 1226 return fuse_read_batch_forget(fiq, cs, nbytes);
02c048b9
MS
1227}
1228
334f485d
MS
1229/*
1230 * Read a single request into the userspace filesystem's buffer. This
1231 * function waits until a request is available, then removes it from
1232 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1233 * no reply is needed (FORGET) or request has been aborted or there
1234 * was an error during the copying then it's finished by calling
334f485d
MS
1235 * request_end(). Otherwise add it to the processing list, and set
1236 * the 'sent' flag.
1237 */
c3021629
MS
1238static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1239 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1240{
1241 int err;
f88996a9 1242 struct fuse_iqueue *fiq = &fc->iq;
3a2b5b9c 1243 struct fuse_pqueue *fpq = &fc->pq;
334f485d
MS
1244 struct fuse_req *req;
1245 struct fuse_in *in;
334f485d
MS
1246 unsigned reqsize;
1247
1d3d752b 1248 restart:
4ce60812 1249 spin_lock(&fiq->waitq.lock);
e5ac1d1e 1250 err = -EAGAIN;
e16714d8 1251 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
f88996a9 1252 !request_pending(fiq))
e5ac1d1e
JD
1253 goto err_unlock;
1254
5250921b
MS
1255 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1256 !fiq->connected || request_pending(fiq));
1257 if (err)
1258 goto err_unlock;
1259
334f485d 1260 err = -ENODEV;
e16714d8 1261 if (!fiq->connected)
334f485d 1262 goto err_unlock;
334f485d 1263
f88996a9
MS
1264 if (!list_empty(&fiq->interrupts)) {
1265 req = list_entry(fiq->interrupts.next, struct fuse_req,
a4d27e75 1266 intr_entry);
fd22d62e 1267 return fuse_read_interrupt(fiq, cs, nbytes, req);
a4d27e75
MS
1268 }
1269
f88996a9
MS
1270 if (forget_pending(fiq)) {
1271 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
fd22d62e 1272 return fuse_read_forget(fc, fiq, cs, nbytes);
07e77dca 1273
f88996a9
MS
1274 if (fiq->forget_batch <= -8)
1275 fiq->forget_batch = 16;
07e77dca
MS
1276 }
1277
f88996a9 1278 req = list_entry(fiq->pending.next, struct fuse_req, list);
33e14b4d 1279 clear_bit(FR_PENDING, &req->flags);
ef759258 1280 list_del_init(&req->list);
4ce60812
MS
1281 spin_unlock(&fiq->waitq.lock);
1282
fd22d62e 1283 spin_lock(&fc->lock);
3a2b5b9c 1284 list_add(&req->list, &fpq->io);
334f485d
MS
1285
1286 in = &req->in;
1d3d752b
MS
1287 reqsize = in->h.len;
1288 /* If request is too large, reply with an error and restart the read */
c3021629 1289 if (nbytes < reqsize) {
1d3d752b
MS
1290 req->out.h.error = -EIO;
1291 /* SETXATTR is special, since it may contain too large data */
1292 if (in->h.opcode == FUSE_SETXATTR)
1293 req->out.h.error = -E2BIG;
1294 request_end(fc, req);
1295 goto restart;
334f485d 1296 }
d7133114 1297 spin_unlock(&fc->lock);
c3021629
MS
1298 cs->req = req;
1299 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1300 if (!err)
c3021629 1301 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1302 (struct fuse_arg *) in->args, 0);
c3021629 1303 fuse_copy_finish(cs);
d7133114 1304 spin_lock(&fc->lock);
825d6d33 1305 clear_bit(FR_LOCKED, &req->flags);
0d8e84b0 1306 if (!fc->connected) {
c9c9d7df
MS
1307 request_end(fc, req);
1308 return -ENODEV;
1309 }
334f485d 1310 if (err) {
c9c9d7df 1311 req->out.h.error = -EIO;
334f485d
MS
1312 request_end(fc, req);
1313 return err;
1314 }
825d6d33 1315 if (!test_bit(FR_ISREPLY, &req->flags)) {
334f485d 1316 request_end(fc, req);
825d6d33 1317 } else {
3a2b5b9c 1318 list_move_tail(&req->list, &fpq->processing);
8f7bb368
MS
1319 set_bit(FR_SENT, &req->flags);
1320 /* matches barrier in request_wait_answer() */
1321 smp_mb__after_atomic();
825d6d33 1322 if (test_bit(FR_INTERRUPTED, &req->flags))
f88996a9 1323 queue_interrupt(fiq, req);
d7133114 1324 spin_unlock(&fc->lock);
334f485d
MS
1325 }
1326 return reqsize;
1327
1328 err_unlock:
4ce60812 1329 spin_unlock(&fiq->waitq.lock);
334f485d
MS
1330 return err;
1331}
1332
94e4fe2c
TVB
1333static int fuse_dev_open(struct inode *inode, struct file *file)
1334{
1335 /*
1336 * The fuse device's file's private_data is used to hold
1337 * the fuse_conn(ection) when it is mounted, and is used to
1338 * keep track of whether the file has been mounted already.
1339 */
1340 file->private_data = NULL;
1341 return 0;
1342}
1343
fbdbacca 1344static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1345{
1346 struct fuse_copy_state cs;
1347 struct file *file = iocb->ki_filp;
1348 struct fuse_conn *fc = fuse_get_conn(file);
1349 if (!fc)
1350 return -EPERM;
1351
fbdbacca
AV
1352 if (!iter_is_iovec(to))
1353 return -EINVAL;
1354
dc00809a 1355 fuse_copy_init(&cs, 1, to);
c3021629 1356
fbdbacca 1357 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
c3021629
MS
1358}
1359
c3021629
MS
1360static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1361 struct pipe_inode_info *pipe,
1362 size_t len, unsigned int flags)
1363{
1364 int ret;
1365 int page_nr = 0;
1366 int do_wakeup = 0;
1367 struct pipe_buffer *bufs;
1368 struct fuse_copy_state cs;
1369 struct fuse_conn *fc = fuse_get_conn(in);
1370 if (!fc)
1371 return -EPERM;
1372
07e77dca 1373 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1374 if (!bufs)
1375 return -ENOMEM;
1376
dc00809a 1377 fuse_copy_init(&cs, 1, NULL);
c3021629
MS
1378 cs.pipebufs = bufs;
1379 cs.pipe = pipe;
1380 ret = fuse_dev_do_read(fc, in, &cs, len);
1381 if (ret < 0)
1382 goto out;
1383
1384 ret = 0;
1385 pipe_lock(pipe);
1386
1387 if (!pipe->readers) {
1388 send_sig(SIGPIPE, current, 0);
1389 if (!ret)
1390 ret = -EPIPE;
1391 goto out_unlock;
1392 }
1393
1394 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1395 ret = -EIO;
1396 goto out_unlock;
1397 }
1398
1399 while (page_nr < cs.nr_segs) {
1400 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1401 struct pipe_buffer *buf = pipe->bufs + newbuf;
1402
1403 buf->page = bufs[page_nr].page;
1404 buf->offset = bufs[page_nr].offset;
1405 buf->len = bufs[page_nr].len;
28a625cb
MS
1406 /*
1407 * Need to be careful about this. Having buf->ops in module
1408 * code can Oops if the buffer persists after module unload.
1409 */
1410 buf->ops = &nosteal_pipe_buf_ops;
c3021629
MS
1411
1412 pipe->nrbufs++;
1413 page_nr++;
1414 ret += buf->len;
1415
6447a3cf 1416 if (pipe->files)
c3021629
MS
1417 do_wakeup = 1;
1418 }
1419
1420out_unlock:
1421 pipe_unlock(pipe);
1422
1423 if (do_wakeup) {
1424 smp_mb();
1425 if (waitqueue_active(&pipe->wait))
1426 wake_up_interruptible(&pipe->wait);
1427 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1428 }
1429
1430out:
1431 for (; page_nr < cs.nr_segs; page_nr++)
1432 page_cache_release(bufs[page_nr].page);
1433
1434 kfree(bufs);
1435 return ret;
1436}
1437
95668a69
TH
1438static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1439 struct fuse_copy_state *cs)
1440{
1441 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1442 int err = -EINVAL;
95668a69
TH
1443
1444 if (size != sizeof(outarg))
f6d47a17 1445 goto err;
95668a69
TH
1446
1447 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1448 if (err)
f6d47a17 1449 goto err;
95668a69 1450
f6d47a17 1451 fuse_copy_finish(cs);
95668a69 1452 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1453
1454err:
1455 fuse_copy_finish(cs);
1456 return err;
95668a69
TH
1457}
1458
3b463ae0
JM
1459static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1460 struct fuse_copy_state *cs)
1461{
1462 struct fuse_notify_inval_inode_out outarg;
1463 int err = -EINVAL;
1464
1465 if (size != sizeof(outarg))
1466 goto err;
1467
1468 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1469 if (err)
1470 goto err;
1471 fuse_copy_finish(cs);
1472
1473 down_read(&fc->killsb);
1474 err = -ENOENT;
b21dda43
MS
1475 if (fc->sb) {
1476 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1477 outarg.off, outarg.len);
1478 }
3b463ae0
JM
1479 up_read(&fc->killsb);
1480 return err;
1481
1482err:
1483 fuse_copy_finish(cs);
1484 return err;
1485}
1486
1487static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1488 struct fuse_copy_state *cs)
1489{
1490 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1491 int err = -ENOMEM;
1492 char *buf;
3b463ae0
JM
1493 struct qstr name;
1494
b2d82ee3
FW
1495 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1496 if (!buf)
1497 goto err;
1498
1499 err = -EINVAL;
3b463ae0
JM
1500 if (size < sizeof(outarg))
1501 goto err;
1502
1503 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1504 if (err)
1505 goto err;
1506
1507 err = -ENAMETOOLONG;
1508 if (outarg.namelen > FUSE_NAME_MAX)
1509 goto err;
1510
c2183d1e
MS
1511 err = -EINVAL;
1512 if (size != sizeof(outarg) + outarg.namelen + 1)
1513 goto err;
1514
3b463ae0
JM
1515 name.name = buf;
1516 name.len = outarg.namelen;
1517 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1518 if (err)
1519 goto err;
1520 fuse_copy_finish(cs);
1521 buf[outarg.namelen] = 0;
1522 name.hash = full_name_hash(name.name, name.len);
1523
1524 down_read(&fc->killsb);
1525 err = -ENOENT;
b21dda43 1526 if (fc->sb)
451d0f59
JM
1527 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1528 up_read(&fc->killsb);
1529 kfree(buf);
1530 return err;
1531
1532err:
1533 kfree(buf);
1534 fuse_copy_finish(cs);
1535 return err;
1536}
1537
1538static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1539 struct fuse_copy_state *cs)
1540{
1541 struct fuse_notify_delete_out outarg;
1542 int err = -ENOMEM;
1543 char *buf;
1544 struct qstr name;
1545
1546 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1547 if (!buf)
1548 goto err;
1549
1550 err = -EINVAL;
1551 if (size < sizeof(outarg))
1552 goto err;
1553
1554 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1555 if (err)
1556 goto err;
1557
1558 err = -ENAMETOOLONG;
1559 if (outarg.namelen > FUSE_NAME_MAX)
1560 goto err;
1561
1562 err = -EINVAL;
1563 if (size != sizeof(outarg) + outarg.namelen + 1)
1564 goto err;
1565
1566 name.name = buf;
1567 name.len = outarg.namelen;
1568 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1569 if (err)
1570 goto err;
1571 fuse_copy_finish(cs);
1572 buf[outarg.namelen] = 0;
1573 name.hash = full_name_hash(name.name, name.len);
1574
1575 down_read(&fc->killsb);
1576 err = -ENOENT;
1577 if (fc->sb)
1578 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1579 outarg.child, &name);
3b463ae0 1580 up_read(&fc->killsb);
b2d82ee3 1581 kfree(buf);
3b463ae0
JM
1582 return err;
1583
1584err:
b2d82ee3 1585 kfree(buf);
3b463ae0
JM
1586 fuse_copy_finish(cs);
1587 return err;
1588}
1589
a1d75f25
MS
1590static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1591 struct fuse_copy_state *cs)
1592{
1593 struct fuse_notify_store_out outarg;
1594 struct inode *inode;
1595 struct address_space *mapping;
1596 u64 nodeid;
1597 int err;
1598 pgoff_t index;
1599 unsigned int offset;
1600 unsigned int num;
1601 loff_t file_size;
1602 loff_t end;
1603
1604 err = -EINVAL;
1605 if (size < sizeof(outarg))
1606 goto out_finish;
1607
1608 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1609 if (err)
1610 goto out_finish;
1611
1612 err = -EINVAL;
1613 if (size - sizeof(outarg) != outarg.size)
1614 goto out_finish;
1615
1616 nodeid = outarg.nodeid;
1617
1618 down_read(&fc->killsb);
1619
1620 err = -ENOENT;
1621 if (!fc->sb)
1622 goto out_up_killsb;
1623
1624 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1625 if (!inode)
1626 goto out_up_killsb;
1627
1628 mapping = inode->i_mapping;
1629 index = outarg.offset >> PAGE_CACHE_SHIFT;
1630 offset = outarg.offset & ~PAGE_CACHE_MASK;
1631 file_size = i_size_read(inode);
1632 end = outarg.offset + outarg.size;
1633 if (end > file_size) {
1634 file_size = end;
1635 fuse_write_update_size(inode, file_size);
1636 }
1637
1638 num = outarg.size;
1639 while (num) {
1640 struct page *page;
1641 unsigned int this_num;
1642
1643 err = -ENOMEM;
1644 page = find_or_create_page(mapping, index,
1645 mapping_gfp_mask(mapping));
1646 if (!page)
1647 goto out_iput;
1648
1649 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1650 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5
MS
1651 if (!err && offset == 0 &&
1652 (this_num == PAGE_CACHE_SIZE || file_size == end))
a1d75f25
MS
1653 SetPageUptodate(page);
1654 unlock_page(page);
1655 page_cache_release(page);
1656
1657 if (err)
1658 goto out_iput;
1659
1660 num -= this_num;
1661 offset = 0;
1662 index++;
1663 }
1664
1665 err = 0;
1666
1667out_iput:
1668 iput(inode);
1669out_up_killsb:
1670 up_read(&fc->killsb);
1671out_finish:
1672 fuse_copy_finish(cs);
1673 return err;
1674}
1675
2d45ba38
MS
1676static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1677{
b745bc85 1678 release_pages(req->pages, req->num_pages, false);
2d45ba38
MS
1679}
1680
1681static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1682 struct fuse_notify_retrieve_out *outarg)
1683{
1684 int err;
1685 struct address_space *mapping = inode->i_mapping;
1686 struct fuse_req *req;
1687 pgoff_t index;
1688 loff_t file_size;
1689 unsigned int num;
1690 unsigned int offset;
0157443c 1691 size_t total_len = 0;
4d53dc99 1692 int num_pages;
2d45ba38 1693
4d53dc99
MP
1694 offset = outarg->offset & ~PAGE_CACHE_MASK;
1695 file_size = i_size_read(inode);
1696
1697 num = outarg->size;
1698 if (outarg->offset > file_size)
1699 num = 0;
1700 else if (outarg->offset + num > file_size)
1701 num = file_size - outarg->offset;
1702
1703 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1704 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1705
1706 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1707 if (IS_ERR(req))
1708 return PTR_ERR(req);
1709
2d45ba38
MS
1710 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1711 req->in.h.nodeid = outarg->nodeid;
1712 req->in.numargs = 2;
1713 req->in.argpages = 1;
b2430d75 1714 req->page_descs[0].offset = offset;
2d45ba38
MS
1715 req->end = fuse_retrieve_end;
1716
1717 index = outarg->offset >> PAGE_CACHE_SHIFT;
2d45ba38 1718
4d53dc99 1719 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1720 struct page *page;
1721 unsigned int this_num;
1722
1723 page = find_get_page(mapping, index);
1724 if (!page)
1725 break;
1726
1727 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1728 req->pages[req->num_pages] = page;
85f40aec 1729 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1730 req->num_pages++;
1731
c9e67d48 1732 offset = 0;
2d45ba38
MS
1733 num -= this_num;
1734 total_len += this_num;
48706d0a 1735 index++;
2d45ba38
MS
1736 }
1737 req->misc.retrieve_in.offset = outarg->offset;
1738 req->misc.retrieve_in.size = total_len;
1739 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1740 req->in.args[0].value = &req->misc.retrieve_in;
1741 req->in.args[1].size = total_len;
1742
1743 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1744 if (err)
1745 fuse_retrieve_end(fc, req);
1746
1747 return err;
1748}
1749
1750static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1751 struct fuse_copy_state *cs)
1752{
1753 struct fuse_notify_retrieve_out outarg;
1754 struct inode *inode;
1755 int err;
1756
1757 err = -EINVAL;
1758 if (size != sizeof(outarg))
1759 goto copy_finish;
1760
1761 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1762 if (err)
1763 goto copy_finish;
1764
1765 fuse_copy_finish(cs);
1766
1767 down_read(&fc->killsb);
1768 err = -ENOENT;
1769 if (fc->sb) {
1770 u64 nodeid = outarg.nodeid;
1771
1772 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1773 if (inode) {
1774 err = fuse_retrieve(fc, inode, &outarg);
1775 iput(inode);
1776 }
1777 }
1778 up_read(&fc->killsb);
1779
1780 return err;
1781
1782copy_finish:
1783 fuse_copy_finish(cs);
1784 return err;
1785}
1786
8599396b
TH
1787static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1788 unsigned int size, struct fuse_copy_state *cs)
1789{
0d278362
MS
1790 /* Don't try to move pages (yet) */
1791 cs->move_pages = 0;
1792
8599396b 1793 switch (code) {
95668a69
TH
1794 case FUSE_NOTIFY_POLL:
1795 return fuse_notify_poll(fc, size, cs);
1796
3b463ae0
JM
1797 case FUSE_NOTIFY_INVAL_INODE:
1798 return fuse_notify_inval_inode(fc, size, cs);
1799
1800 case FUSE_NOTIFY_INVAL_ENTRY:
1801 return fuse_notify_inval_entry(fc, size, cs);
1802
a1d75f25
MS
1803 case FUSE_NOTIFY_STORE:
1804 return fuse_notify_store(fc, size, cs);
1805
2d45ba38
MS
1806 case FUSE_NOTIFY_RETRIEVE:
1807 return fuse_notify_retrieve(fc, size, cs);
1808
451d0f59
JM
1809 case FUSE_NOTIFY_DELETE:
1810 return fuse_notify_delete(fc, size, cs);
1811
8599396b 1812 default:
f6d47a17 1813 fuse_copy_finish(cs);
8599396b
TH
1814 return -EINVAL;
1815 }
1816}
1817
334f485d 1818/* Look up request on processing list by unique ID */
3a2b5b9c 1819static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
334f485d 1820{
05726aca 1821 struct fuse_req *req;
334f485d 1822
3a2b5b9c 1823 list_for_each_entry(req, &fpq->processing, list) {
a4d27e75 1824 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1825 return req;
1826 }
1827 return NULL;
1828}
1829
1830static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1831 unsigned nbytes)
1832{
1833 unsigned reqsize = sizeof(struct fuse_out_header);
1834
1835 if (out->h.error)
1836 return nbytes != reqsize ? -EINVAL : 0;
1837
1838 reqsize += len_args(out->numargs, out->args);
1839
1840 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1841 return -EINVAL;
1842 else if (reqsize > nbytes) {
1843 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1844 unsigned diffsize = reqsize - nbytes;
1845 if (diffsize > lastarg->size)
1846 return -EINVAL;
1847 lastarg->size -= diffsize;
1848 }
1849 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1850 out->page_zeroing);
1851}
1852
1853/*
1854 * Write a single reply to a request. First the header is copied from
1855 * the write buffer. The request is then searched on the processing
1856 * list by the unique ID found in the header. If found, then remove
1857 * it from the list and copy the rest of the buffer to the request.
1858 * The request is finished by calling request_end()
1859 */
dd3bb14f
MS
1860static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1861 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1862{
1863 int err;
3a2b5b9c 1864 struct fuse_pqueue *fpq = &fc->pq;
334f485d
MS
1865 struct fuse_req *req;
1866 struct fuse_out_header oh;
334f485d 1867
334f485d
MS
1868 if (nbytes < sizeof(struct fuse_out_header))
1869 return -EINVAL;
1870
dd3bb14f 1871 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1872 if (err)
1873 goto err_finish;
8599396b
TH
1874
1875 err = -EINVAL;
1876 if (oh.len != nbytes)
1877 goto err_finish;
1878
1879 /*
1880 * Zero oh.unique indicates unsolicited notification message
1881 * and error contains notification code.
1882 */
1883 if (!oh.unique) {
dd3bb14f 1884 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1885 return err ? err : nbytes;
1886 }
1887
334f485d 1888 err = -EINVAL;
8599396b 1889 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1890 goto err_finish;
1891
d7133114 1892 spin_lock(&fc->lock);
69a53bf2
MS
1893 err = -ENOENT;
1894 if (!fc->connected)
1895 goto err_unlock;
1896
3a2b5b9c 1897 req = request_find(fpq, oh.unique);
334f485d
MS
1898 if (!req)
1899 goto err_unlock;
1900
a4d27e75
MS
1901 /* Is it an interrupt reply? */
1902 if (req->intr_unique == oh.unique) {
1903 err = -EINVAL;
1904 if (nbytes != sizeof(struct fuse_out_header))
1905 goto err_unlock;
1906
1907 if (oh.error == -ENOSYS)
1908 fc->no_interrupt = 1;
1909 else if (oh.error == -EAGAIN)
f88996a9 1910 queue_interrupt(&fc->iq, req);
a4d27e75
MS
1911
1912 spin_unlock(&fc->lock);
dd3bb14f 1913 fuse_copy_finish(cs);
a4d27e75
MS
1914 return nbytes;
1915 }
1916
33e14b4d 1917 clear_bit(FR_SENT, &req->flags);
3a2b5b9c 1918 list_move(&req->list, &fpq->io);
334f485d 1919 req->out.h = oh;
825d6d33 1920 set_bit(FR_LOCKED, &req->flags);
dd3bb14f 1921 cs->req = req;
ce534fb0
MS
1922 if (!req->out.page_replace)
1923 cs->move_pages = 0;
d7133114 1924 spin_unlock(&fc->lock);
334f485d 1925
dd3bb14f
MS
1926 err = copy_out_args(cs, &req->out, nbytes);
1927 fuse_copy_finish(cs);
334f485d 1928
d7133114 1929 spin_lock(&fc->lock);
825d6d33 1930 clear_bit(FR_LOCKED, &req->flags);
0d8e84b0
MS
1931 if (!fc->connected)
1932 err = -ENOENT;
1933 else if (err)
334f485d
MS
1934 req->out.h.error = -EIO;
1935 request_end(fc, req);
1936
1937 return err ? err : nbytes;
1938
1939 err_unlock:
d7133114 1940 spin_unlock(&fc->lock);
334f485d 1941 err_finish:
dd3bb14f 1942 fuse_copy_finish(cs);
334f485d
MS
1943 return err;
1944}
1945
fbdbacca 1946static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1947{
1948 struct fuse_copy_state cs;
1949 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1950 if (!fc)
1951 return -EPERM;
1952
fbdbacca
AV
1953 if (!iter_is_iovec(from))
1954 return -EINVAL;
1955
dc00809a 1956 fuse_copy_init(&cs, 0, from);
dd3bb14f 1957
fbdbacca 1958 return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
dd3bb14f
MS
1959}
1960
1961static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1962 struct file *out, loff_t *ppos,
1963 size_t len, unsigned int flags)
1964{
1965 unsigned nbuf;
1966 unsigned idx;
1967 struct pipe_buffer *bufs;
1968 struct fuse_copy_state cs;
1969 struct fuse_conn *fc;
1970 size_t rem;
1971 ssize_t ret;
1972
1973 fc = fuse_get_conn(out);
1974 if (!fc)
1975 return -EPERM;
1976
07e77dca 1977 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1978 if (!bufs)
1979 return -ENOMEM;
1980
1981 pipe_lock(pipe);
1982 nbuf = 0;
1983 rem = 0;
1984 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1985 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1986
1987 ret = -EINVAL;
1988 if (rem < len) {
1989 pipe_unlock(pipe);
1990 goto out;
1991 }
1992
1993 rem = len;
1994 while (rem) {
1995 struct pipe_buffer *ibuf;
1996 struct pipe_buffer *obuf;
1997
1998 BUG_ON(nbuf >= pipe->buffers);
1999 BUG_ON(!pipe->nrbufs);
2000 ibuf = &pipe->bufs[pipe->curbuf];
2001 obuf = &bufs[nbuf];
2002
2003 if (rem >= ibuf->len) {
2004 *obuf = *ibuf;
2005 ibuf->ops = NULL;
2006 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2007 pipe->nrbufs--;
2008 } else {
2009 ibuf->ops->get(pipe, ibuf);
2010 *obuf = *ibuf;
2011 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2012 obuf->len = rem;
2013 ibuf->offset += obuf->len;
2014 ibuf->len -= obuf->len;
2015 }
2016 nbuf++;
2017 rem -= obuf->len;
2018 }
2019 pipe_unlock(pipe);
2020
dc00809a 2021 fuse_copy_init(&cs, 0, NULL);
dd3bb14f 2022 cs.pipebufs = bufs;
6c09e94a 2023 cs.nr_segs = nbuf;
dd3bb14f
MS
2024 cs.pipe = pipe;
2025
ce534fb0
MS
2026 if (flags & SPLICE_F_MOVE)
2027 cs.move_pages = 1;
2028
dd3bb14f
MS
2029 ret = fuse_dev_do_write(fc, &cs, len);
2030
2031 for (idx = 0; idx < nbuf; idx++) {
2032 struct pipe_buffer *buf = &bufs[idx];
2033 buf->ops->release(pipe, buf);
2034 }
2035out:
2036 kfree(bufs);
2037 return ret;
2038}
2039
334f485d
MS
2040static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2041{
334f485d 2042 unsigned mask = POLLOUT | POLLWRNORM;
f88996a9 2043 struct fuse_iqueue *fiq;
7025d9ad 2044 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2045 if (!fc)
7025d9ad 2046 return POLLERR;
334f485d 2047
f88996a9
MS
2048 fiq = &fc->iq;
2049 poll_wait(file, &fiq->waitq, wait);
334f485d 2050
4ce60812 2051 spin_lock(&fiq->waitq.lock);
e16714d8 2052 if (!fiq->connected)
7025d9ad 2053 mask = POLLERR;
f88996a9 2054 else if (request_pending(fiq))
7025d9ad 2055 mask |= POLLIN | POLLRDNORM;
4ce60812 2056 spin_unlock(&fiq->waitq.lock);
334f485d
MS
2057
2058 return mask;
2059}
2060
69a53bf2
MS
2061/*
2062 * Abort all requests on the given list (pending or processing)
2063 *
d7133114 2064 * This function releases and reacquires fc->lock
69a53bf2 2065 */
334f485d 2066static void end_requests(struct fuse_conn *fc, struct list_head *head)
b9ca67b2
MS
2067__releases(fc->lock)
2068__acquires(fc->lock)
334f485d
MS
2069{
2070 while (!list_empty(head)) {
2071 struct fuse_req *req;
2072 req = list_entry(head->next, struct fuse_req, list);
334f485d 2073 req->out.h.error = -ECONNABORTED;
33e14b4d
MS
2074 clear_bit(FR_PENDING, &req->flags);
2075 clear_bit(FR_SENT, &req->flags);
334f485d 2076 request_end(fc, req);
d7133114 2077 spin_lock(&fc->lock);
334f485d
MS
2078 }
2079}
2080
357ccf2b
BG
2081static void end_polls(struct fuse_conn *fc)
2082{
2083 struct rb_node *p;
2084
2085 p = rb_first(&fc->polled_files);
2086
2087 while (p) {
2088 struct fuse_file *ff;
2089 ff = rb_entry(p, struct fuse_file, polled_node);
2090 wake_up_interruptible_all(&ff->poll_wait);
2091
2092 p = rb_next(p);
2093 }
2094}
2095
69a53bf2
MS
2096/*
2097 * Abort all requests.
2098 *
b716d425
MS
2099 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2100 * filesystem.
2101 *
2102 * The same effect is usually achievable through killing the filesystem daemon
2103 * and all users of the filesystem. The exception is the combination of an
2104 * asynchronous request and the tricky deadlock (see
2105 * Documentation/filesystems/fuse.txt).
69a53bf2 2106 *
b716d425
MS
2107 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2108 * requests, they should be finished off immediately. Locked requests will be
2109 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2110 * requests. It is possible that some request will finish before we can. This
2111 * is OK, the request will in that case be removed from the list before we touch
2112 * it.
69a53bf2
MS
2113 */
2114void fuse_abort_conn(struct fuse_conn *fc)
2115{
f88996a9 2116 struct fuse_iqueue *fiq = &fc->iq;
3a2b5b9c 2117 struct fuse_pqueue *fpq = &fc->pq;
f88996a9 2118
d7133114 2119 spin_lock(&fc->lock);
69a53bf2 2120 if (fc->connected) {
b716d425 2121 struct fuse_req *req, *next;
41f98274
MS
2122 LIST_HEAD(to_end1);
2123 LIST_HEAD(to_end2);
b716d425 2124
69a53bf2 2125 fc->connected = 0;
51eb01e7 2126 fc->blocked = 0;
9759bd51 2127 fuse_set_initialized(fc);
3a2b5b9c 2128 list_for_each_entry_safe(req, next, &fpq->io, list) {
b716d425
MS
2129 req->out.h.error = -ECONNABORTED;
2130 spin_lock(&req->waitq.lock);
2131 set_bit(FR_ABORTED, &req->flags);
2132 if (!test_bit(FR_LOCKED, &req->flags))
41f98274 2133 list_move(&req->list, &to_end1);
b716d425
MS
2134 spin_unlock(&req->waitq.lock);
2135 }
41f98274
MS
2136 fc->max_background = UINT_MAX;
2137 flush_bg_queue(fc);
8c91189a 2138
4ce60812 2139 spin_lock(&fiq->waitq.lock);
8c91189a 2140 fiq->connected = 0;
f88996a9 2141 list_splice_init(&fiq->pending, &to_end2);
8c91189a
MS
2142 while (forget_pending(fiq))
2143 kfree(dequeue_forget(fiq, 1, NULL));
4ce60812
MS
2144 wake_up_all_locked(&fiq->waitq);
2145 spin_unlock(&fiq->waitq.lock);
8c91189a
MS
2146 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2147
3a2b5b9c 2148 list_splice_init(&fpq->processing, &to_end2);
41f98274
MS
2149 while (!list_empty(&to_end1)) {
2150 req = list_first_entry(&to_end1, struct fuse_req, list);
b716d425
MS
2151 __fuse_get_request(req);
2152 request_end(fc, req);
2153 spin_lock(&fc->lock);
2154 }
41f98274 2155 end_requests(fc, &to_end2);
357ccf2b 2156 end_polls(fc);
51eb01e7 2157 wake_up_all(&fc->blocked_waitq);
69a53bf2 2158 }
d7133114 2159 spin_unlock(&fc->lock);
69a53bf2 2160}
08cbf542 2161EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2162
08cbf542 2163int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2164{
0720b315 2165 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2166 if (fc) {
3a2b5b9c 2167 WARN_ON(!list_empty(&fc->pq.io));
f88996a9 2168 WARN_ON(fc->iq.fasync != NULL);
ccd0a0bd 2169 fuse_abort_conn(fc);
bafa9654 2170 fuse_conn_put(fc);
385a17bf 2171 }
f543f253 2172
334f485d
MS
2173 return 0;
2174}
08cbf542 2175EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2176
385a17bf
JD
2177static int fuse_dev_fasync(int fd, struct file *file, int on)
2178{
2179 struct fuse_conn *fc = fuse_get_conn(file);
2180 if (!fc)
a87046d8 2181 return -EPERM;
385a17bf
JD
2182
2183 /* No locking - fasync_helper does its own locking */
f88996a9 2184 return fasync_helper(fd, file, on, &fc->iq.fasync);
385a17bf
JD
2185}
2186
4b6f5d20 2187const struct file_operations fuse_dev_operations = {
334f485d 2188 .owner = THIS_MODULE,
94e4fe2c 2189 .open = fuse_dev_open,
334f485d 2190 .llseek = no_llseek,
fbdbacca 2191 .read_iter = fuse_dev_read,
c3021629 2192 .splice_read = fuse_dev_splice_read,
fbdbacca 2193 .write_iter = fuse_dev_write,
dd3bb14f 2194 .splice_write = fuse_dev_splice_write,
334f485d
MS
2195 .poll = fuse_dev_poll,
2196 .release = fuse_dev_release,
385a17bf 2197 .fasync = fuse_dev_fasync,
334f485d 2198};
08cbf542 2199EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2200
2201static struct miscdevice fuse_miscdevice = {
2202 .minor = FUSE_MINOR,
2203 .name = "fuse",
2204 .fops = &fuse_dev_operations,
2205};
2206
2207int __init fuse_dev_init(void)
2208{
2209 int err = -ENOMEM;
2210 fuse_req_cachep = kmem_cache_create("fuse_request",
2211 sizeof(struct fuse_req),
20c2df83 2212 0, 0, NULL);
334f485d
MS
2213 if (!fuse_req_cachep)
2214 goto out;
2215
2216 err = misc_register(&fuse_miscdevice);
2217 if (err)
2218 goto out_cache_clean;
2219
2220 return 0;
2221
2222 out_cache_clean:
2223 kmem_cache_destroy(fuse_req_cachep);
2224 out:
2225 return err;
2226}
2227
2228void fuse_dev_cleanup(void)
2229{
2230 misc_deregister(&fuse_miscdevice);
2231 kmem_cache_destroy(fuse_req_cachep);
2232}
This page took 0.840172 seconds and 5 git commands to generate.