ocfs2: Add ioctl for reflink.
[deliverable/linux.git] / fs / nfs / read.c
1 /*
2 * linux/fs/nfs/read.c
3 *
4 * Block I/O for NFS
5 *
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
22
23 #include <asm/system.h>
24
25 #include "nfs4_fs.h"
26 #include "internal.h"
27 #include "iostat.h"
28 #include "fscache.h"
29
30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31
32 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
33 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
34 static const struct rpc_call_ops nfs_read_partial_ops;
35 static const struct rpc_call_ops nfs_read_full_ops;
36
37 static struct kmem_cache *nfs_rdata_cachep;
38 static mempool_t *nfs_rdata_mempool;
39
40 #define MIN_POOL_READ (32)
41
42 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
43 {
44 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
45
46 if (p) {
47 memset(p, 0, sizeof(*p));
48 INIT_LIST_HEAD(&p->pages);
49 p->npages = pagecount;
50 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
51 if (pagecount <= ARRAY_SIZE(p->page_array))
52 p->pagevec = p->page_array;
53 else {
54 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
55 if (!p->pagevec) {
56 mempool_free(p, nfs_rdata_mempool);
57 p = NULL;
58 }
59 }
60 }
61 return p;
62 }
63
64 static void nfs_readdata_free(struct nfs_read_data *p)
65 {
66 if (p && (p->pagevec != &p->page_array[0]))
67 kfree(p->pagevec);
68 mempool_free(p, nfs_rdata_mempool);
69 }
70
71 void nfs_readdata_release(void *data)
72 {
73 struct nfs_read_data *rdata = data;
74
75 put_nfs_open_context(rdata->args.context);
76 nfs_readdata_free(rdata);
77 }
78
79 static
80 int nfs_return_empty_page(struct page *page)
81 {
82 zero_user(page, 0, PAGE_CACHE_SIZE);
83 SetPageUptodate(page);
84 unlock_page(page);
85 return 0;
86 }
87
88 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
89 {
90 unsigned int remainder = data->args.count - data->res.count;
91 unsigned int base = data->args.pgbase + data->res.count;
92 unsigned int pglen;
93 struct page **pages;
94
95 if (data->res.eof == 0 || remainder == 0)
96 return;
97 /*
98 * Note: "remainder" can never be negative, since we check for
99 * this in the XDR code.
100 */
101 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
102 base &= ~PAGE_CACHE_MASK;
103 pglen = PAGE_CACHE_SIZE - base;
104 for (;;) {
105 if (remainder <= pglen) {
106 zero_user(*pages, base, remainder);
107 break;
108 }
109 zero_user(*pages, base, pglen);
110 pages++;
111 remainder -= pglen;
112 pglen = PAGE_CACHE_SIZE;
113 base = 0;
114 }
115 }
116
117 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 struct page *page)
119 {
120 LIST_HEAD(one_request);
121 struct nfs_page *new;
122 unsigned int len;
123
124 len = nfs_page_length(page);
125 if (len == 0)
126 return nfs_return_empty_page(page);
127 new = nfs_create_request(ctx, inode, page, 0, len);
128 if (IS_ERR(new)) {
129 unlock_page(page);
130 return PTR_ERR(new);
131 }
132 if (len < PAGE_CACHE_SIZE)
133 zero_user_segment(page, len, PAGE_CACHE_SIZE);
134
135 nfs_list_add_request(new, &one_request);
136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
137 nfs_pagein_multi(inode, &one_request, 1, len, 0);
138 else
139 nfs_pagein_one(inode, &one_request, 1, len, 0);
140 return 0;
141 }
142
143 static void nfs_readpage_release(struct nfs_page *req)
144 {
145 struct inode *d_inode = req->wb_context->path.dentry->d_inode;
146
147 if (PageUptodate(req->wb_page))
148 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
149
150 unlock_page(req->wb_page);
151
152 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
153 req->wb_context->path.dentry->d_inode->i_sb->s_id,
154 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
155 req->wb_bytes,
156 (long long)req_offset(req));
157 nfs_clear_request(req);
158 nfs_release_request(req);
159 }
160
161 /*
162 * Set up the NFS read request struct
163 */
164 static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
165 const struct rpc_call_ops *call_ops,
166 unsigned int count, unsigned int offset)
167 {
168 struct inode *inode = req->wb_context->path.dentry->d_inode;
169 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
170 struct rpc_task *task;
171 struct rpc_message msg = {
172 .rpc_argp = &data->args,
173 .rpc_resp = &data->res,
174 .rpc_cred = req->wb_context->cred,
175 };
176 struct rpc_task_setup task_setup_data = {
177 .task = &data->task,
178 .rpc_client = NFS_CLIENT(inode),
179 .rpc_message = &msg,
180 .callback_ops = call_ops,
181 .callback_data = data,
182 .workqueue = nfsiod_workqueue,
183 .flags = RPC_TASK_ASYNC | swap_flags,
184 };
185
186 data->req = req;
187 data->inode = inode;
188 data->cred = msg.rpc_cred;
189
190 data->args.fh = NFS_FH(inode);
191 data->args.offset = req_offset(req) + offset;
192 data->args.pgbase = req->wb_pgbase + offset;
193 data->args.pages = data->pagevec;
194 data->args.count = count;
195 data->args.context = get_nfs_open_context(req->wb_context);
196
197 data->res.fattr = &data->fattr;
198 data->res.count = count;
199 data->res.eof = 0;
200 nfs_fattr_init(&data->fattr);
201
202 /* Set up the initial task struct. */
203 NFS_PROTO(inode)->read_setup(data, &msg);
204
205 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
206 data->task.tk_pid,
207 inode->i_sb->s_id,
208 (long long)NFS_FILEID(inode),
209 count,
210 (unsigned long long)data->args.offset);
211
212 task = rpc_run_task(&task_setup_data);
213 if (IS_ERR(task))
214 return PTR_ERR(task);
215 rpc_put_task(task);
216 return 0;
217 }
218
219 static void
220 nfs_async_read_error(struct list_head *head)
221 {
222 struct nfs_page *req;
223
224 while (!list_empty(head)) {
225 req = nfs_list_entry(head->next);
226 nfs_list_remove_request(req);
227 SetPageError(req->wb_page);
228 nfs_readpage_release(req);
229 }
230 }
231
232 /*
233 * Generate multiple requests to fill a single page.
234 *
235 * We optimize to reduce the number of read operations on the wire. If we
236 * detect that we're reading a page, or an area of a page, that is past the
237 * end of file, we do not generate NFS read operations but just clear the
238 * parts of the page that would have come back zero from the server anyway.
239 *
240 * We rely on the cached value of i_size to make this determination; another
241 * client can fill pages on the server past our cached end-of-file, but we
242 * won't see the new data until our attribute cache is updated. This is more
243 * or less conventional NFS client behavior.
244 */
245 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
246 {
247 struct nfs_page *req = nfs_list_entry(head->next);
248 struct page *page = req->wb_page;
249 struct nfs_read_data *data;
250 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
251 unsigned int offset;
252 int requests = 0;
253 int ret = 0;
254 LIST_HEAD(list);
255
256 nfs_list_remove_request(req);
257
258 nbytes = count;
259 do {
260 size_t len = min(nbytes,rsize);
261
262 data = nfs_readdata_alloc(1);
263 if (!data)
264 goto out_bad;
265 list_add(&data->pages, &list);
266 requests++;
267 nbytes -= len;
268 } while(nbytes != 0);
269 atomic_set(&req->wb_complete, requests);
270
271 ClearPageError(page);
272 offset = 0;
273 nbytes = count;
274 do {
275 int ret2;
276
277 data = list_entry(list.next, struct nfs_read_data, pages);
278 list_del_init(&data->pages);
279
280 data->pagevec[0] = page;
281
282 if (nbytes < rsize)
283 rsize = nbytes;
284 ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
285 rsize, offset);
286 if (ret == 0)
287 ret = ret2;
288 offset += rsize;
289 nbytes -= rsize;
290 } while (nbytes != 0);
291
292 return ret;
293
294 out_bad:
295 while (!list_empty(&list)) {
296 data = list_entry(list.next, struct nfs_read_data, pages);
297 list_del(&data->pages);
298 nfs_readdata_free(data);
299 }
300 SetPageError(page);
301 nfs_readpage_release(req);
302 return -ENOMEM;
303 }
304
305 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
306 {
307 struct nfs_page *req;
308 struct page **pages;
309 struct nfs_read_data *data;
310 int ret = -ENOMEM;
311
312 data = nfs_readdata_alloc(npages);
313 if (!data)
314 goto out_bad;
315
316 pages = data->pagevec;
317 while (!list_empty(head)) {
318 req = nfs_list_entry(head->next);
319 nfs_list_remove_request(req);
320 nfs_list_add_request(req, &data->pages);
321 ClearPageError(req->wb_page);
322 *pages++ = req->wb_page;
323 }
324 req = nfs_list_entry(data->pages.next);
325
326 return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
327 out_bad:
328 nfs_async_read_error(head);
329 return ret;
330 }
331
332 /*
333 * This is the callback from RPC telling us whether a reply was
334 * received or some error occurred (timeout or socket shutdown).
335 */
336 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
337 {
338 int status;
339
340 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
341 task->tk_status);
342
343 status = NFS_PROTO(data->inode)->read_done(task, data);
344 if (status != 0)
345 return status;
346
347 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
348
349 if (task->tk_status == -ESTALE) {
350 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
351 nfs_mark_for_revalidate(data->inode);
352 }
353 return 0;
354 }
355
356 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
357 {
358 struct nfs_readargs *argp = &data->args;
359 struct nfs_readres *resp = &data->res;
360
361 if (resp->eof || resp->count == argp->count)
362 goto out;
363
364 /* This is a short read! */
365 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
366 /* Has the server at least made some progress? */
367 if (resp->count == 0)
368 goto out;
369
370 /* Yes, so retry the read at the end of the data */
371 argp->offset += resp->count;
372 argp->pgbase += resp->count;
373 argp->count -= resp->count;
374 nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
375 return;
376 out:
377 nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
378 &data->res.seq_res);
379 return;
380
381 }
382
383 /*
384 * Handle a read reply that fills part of a page.
385 */
386 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
387 {
388 struct nfs_read_data *data = calldata;
389
390 if (nfs_readpage_result(task, data) != 0)
391 return;
392 if (task->tk_status < 0)
393 return;
394
395 nfs_readpage_truncate_uninitialised_page(data);
396 nfs_readpage_retry(task, data);
397 }
398
399 static void nfs_readpage_release_partial(void *calldata)
400 {
401 struct nfs_read_data *data = calldata;
402 struct nfs_page *req = data->req;
403 struct page *page = req->wb_page;
404 int status = data->task.tk_status;
405
406 if (status < 0)
407 SetPageError(page);
408
409 if (atomic_dec_and_test(&req->wb_complete)) {
410 if (!PageError(page))
411 SetPageUptodate(page);
412 nfs_readpage_release(req);
413 }
414 nfs_readdata_release(calldata);
415 }
416
417 #if defined(CONFIG_NFS_V4_1)
418 void nfs_read_prepare(struct rpc_task *task, void *calldata)
419 {
420 struct nfs_read_data *data = calldata;
421
422 if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
423 &data->args.seq_args, &data->res.seq_res,
424 0, task))
425 return;
426 rpc_call_start(task);
427 }
428 #endif /* CONFIG_NFS_V4_1 */
429
430 static const struct rpc_call_ops nfs_read_partial_ops = {
431 #if defined(CONFIG_NFS_V4_1)
432 .rpc_call_prepare = nfs_read_prepare,
433 #endif /* CONFIG_NFS_V4_1 */
434 .rpc_call_done = nfs_readpage_result_partial,
435 .rpc_release = nfs_readpage_release_partial,
436 };
437
438 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
439 {
440 unsigned int count = data->res.count;
441 unsigned int base = data->args.pgbase;
442 struct page **pages;
443
444 if (data->res.eof)
445 count = data->args.count;
446 if (unlikely(count == 0))
447 return;
448 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
449 base &= ~PAGE_CACHE_MASK;
450 count += base;
451 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
452 SetPageUptodate(*pages);
453 if (count == 0)
454 return;
455 /* Was this a short read? */
456 if (data->res.eof || data->res.count == data->args.count)
457 SetPageUptodate(*pages);
458 }
459
460 /*
461 * This is the callback from RPC telling us whether a reply was
462 * received or some error occurred (timeout or socket shutdown).
463 */
464 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
465 {
466 struct nfs_read_data *data = calldata;
467
468 if (nfs_readpage_result(task, data) != 0)
469 return;
470 if (task->tk_status < 0)
471 return;
472 /*
473 * Note: nfs_readpage_retry may change the values of
474 * data->args. In the multi-page case, we therefore need
475 * to ensure that we call nfs_readpage_set_pages_uptodate()
476 * first.
477 */
478 nfs_readpage_truncate_uninitialised_page(data);
479 nfs_readpage_set_pages_uptodate(data);
480 nfs_readpage_retry(task, data);
481 }
482
483 static void nfs_readpage_release_full(void *calldata)
484 {
485 struct nfs_read_data *data = calldata;
486
487 while (!list_empty(&data->pages)) {
488 struct nfs_page *req = nfs_list_entry(data->pages.next);
489
490 nfs_list_remove_request(req);
491 nfs_readpage_release(req);
492 }
493 nfs_readdata_release(calldata);
494 }
495
496 static const struct rpc_call_ops nfs_read_full_ops = {
497 #if defined(CONFIG_NFS_V4_1)
498 .rpc_call_prepare = nfs_read_prepare,
499 #endif /* CONFIG_NFS_V4_1 */
500 .rpc_call_done = nfs_readpage_result_full,
501 .rpc_release = nfs_readpage_release_full,
502 };
503
504 /*
505 * Read a page over NFS.
506 * We read the page synchronously in the following case:
507 * - The error flag is set for this page. This happens only when a
508 * previous async read operation failed.
509 */
510 int nfs_readpage(struct file *file, struct page *page)
511 {
512 struct nfs_open_context *ctx;
513 struct inode *inode = page->mapping->host;
514 int error;
515
516 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
517 page, PAGE_CACHE_SIZE, page->index);
518 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
519 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
520
521 /*
522 * Try to flush any pending writes to the file..
523 *
524 * NOTE! Because we own the page lock, there cannot
525 * be any new pending writes generated at this point
526 * for this page (other pages can be written to).
527 */
528 error = nfs_wb_page(inode, page);
529 if (error)
530 goto out_unlock;
531 if (PageUptodate(page))
532 goto out_unlock;
533
534 error = -ESTALE;
535 if (NFS_STALE(inode))
536 goto out_unlock;
537
538 if (file == NULL) {
539 error = -EBADF;
540 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
541 if (ctx == NULL)
542 goto out_unlock;
543 } else
544 ctx = get_nfs_open_context(nfs_file_open_context(file));
545
546 if (!IS_SYNC(inode)) {
547 error = nfs_readpage_from_fscache(ctx, inode, page);
548 if (error == 0)
549 goto out;
550 }
551
552 error = nfs_readpage_async(ctx, inode, page);
553
554 out:
555 put_nfs_open_context(ctx);
556 return error;
557 out_unlock:
558 unlock_page(page);
559 return error;
560 }
561
562 struct nfs_readdesc {
563 struct nfs_pageio_descriptor *pgio;
564 struct nfs_open_context *ctx;
565 };
566
567 static int
568 readpage_async_filler(void *data, struct page *page)
569 {
570 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
571 struct inode *inode = page->mapping->host;
572 struct nfs_page *new;
573 unsigned int len;
574 int error;
575
576 len = nfs_page_length(page);
577 if (len == 0)
578 return nfs_return_empty_page(page);
579
580 new = nfs_create_request(desc->ctx, inode, page, 0, len);
581 if (IS_ERR(new))
582 goto out_error;
583
584 if (len < PAGE_CACHE_SIZE)
585 zero_user_segment(page, len, PAGE_CACHE_SIZE);
586 if (!nfs_pageio_add_request(desc->pgio, new)) {
587 error = desc->pgio->pg_error;
588 goto out_unlock;
589 }
590 return 0;
591 out_error:
592 error = PTR_ERR(new);
593 SetPageError(page);
594 out_unlock:
595 unlock_page(page);
596 return error;
597 }
598
599 int nfs_readpages(struct file *filp, struct address_space *mapping,
600 struct list_head *pages, unsigned nr_pages)
601 {
602 struct nfs_pageio_descriptor pgio;
603 struct nfs_readdesc desc = {
604 .pgio = &pgio,
605 };
606 struct inode *inode = mapping->host;
607 struct nfs_server *server = NFS_SERVER(inode);
608 size_t rsize = server->rsize;
609 unsigned long npages;
610 int ret = -ESTALE;
611
612 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
613 inode->i_sb->s_id,
614 (long long)NFS_FILEID(inode),
615 nr_pages);
616 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
617
618 if (NFS_STALE(inode))
619 goto out;
620
621 if (filp == NULL) {
622 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
623 if (desc.ctx == NULL)
624 return -EBADF;
625 } else
626 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
627
628 /* attempt to read as many of the pages as possible from the cache
629 * - this returns -ENOBUFS immediately if the cookie is negative
630 */
631 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
632 pages, &nr_pages);
633 if (ret == 0)
634 goto read_complete; /* all pages were read */
635
636 if (rsize < PAGE_CACHE_SIZE)
637 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
638 else
639 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
640
641 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
642
643 nfs_pageio_complete(&pgio);
644 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
645 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
646 read_complete:
647 put_nfs_open_context(desc.ctx);
648 out:
649 return ret;
650 }
651
652 int __init nfs_init_readpagecache(void)
653 {
654 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
655 sizeof(struct nfs_read_data),
656 0, SLAB_HWCACHE_ALIGN,
657 NULL);
658 if (nfs_rdata_cachep == NULL)
659 return -ENOMEM;
660
661 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
662 nfs_rdata_cachep);
663 if (nfs_rdata_mempool == NULL)
664 return -ENOMEM;
665
666 return 0;
667 }
668
669 void nfs_destroy_readpagecache(void)
670 {
671 mempool_destroy(nfs_rdata_mempool);
672 kmem_cache_destroy(nfs_rdata_cachep);
673 }
This page took 0.043739 seconds and 5 git commands to generate.