Merge remote-tracking branch 'kumar/merge' into merge
[deliverable/linux.git] / fs / nfs / read.c
1 /*
2 * linux/fs/nfs/read.c
3 *
4 * Block I/O for NFS
5 *
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
22
23 #include "nfs4_fs.h"
24 #include "internal.h"
25 #include "iostat.h"
26 #include "fscache.h"
27
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
29
30 static const struct nfs_pageio_ops nfs_pageio_read_ops;
31 static const struct rpc_call_ops nfs_read_common_ops;
32 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
33
34 static struct kmem_cache *nfs_rdata_cachep;
35
36 struct nfs_read_header *nfs_readhdr_alloc(void)
37 {
38 struct nfs_read_header *rhdr;
39
40 rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41 if (rhdr) {
42 struct nfs_pgio_header *hdr = &rhdr->header;
43
44 INIT_LIST_HEAD(&hdr->pages);
45 INIT_LIST_HEAD(&hdr->rpc_list);
46 spin_lock_init(&hdr->lock);
47 atomic_set(&hdr->refcnt, 0);
48 }
49 return rhdr;
50 }
51
52 static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
53 unsigned int pagecount)
54 {
55 struct nfs_read_data *data, *prealloc;
56
57 prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
58 if (prealloc->header == NULL)
59 data = prealloc;
60 else
61 data = kzalloc(sizeof(*data), GFP_KERNEL);
62 if (!data)
63 goto out;
64
65 if (nfs_pgarray_set(&data->pages, pagecount)) {
66 data->header = hdr;
67 atomic_inc(&hdr->refcnt);
68 } else {
69 if (data != prealloc)
70 kfree(data);
71 data = NULL;
72 }
73 out:
74 return data;
75 }
76
77 void nfs_readhdr_free(struct nfs_pgio_header *hdr)
78 {
79 struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
80
81 kmem_cache_free(nfs_rdata_cachep, rhdr);
82 }
83
84 void nfs_readdata_release(struct nfs_read_data *rdata)
85 {
86 struct nfs_pgio_header *hdr = rdata->header;
87 struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
88
89 put_nfs_open_context(rdata->args.context);
90 if (rdata->pages.pagevec != rdata->pages.page_array)
91 kfree(rdata->pages.pagevec);
92 if (rdata != &read_header->rpc_data)
93 kfree(rdata);
94 else
95 rdata->header = NULL;
96 if (atomic_dec_and_test(&hdr->refcnt))
97 hdr->completion_ops->completion(hdr);
98 }
99
100 static
101 int nfs_return_empty_page(struct page *page)
102 {
103 zero_user(page, 0, PAGE_CACHE_SIZE);
104 SetPageUptodate(page);
105 unlock_page(page);
106 return 0;
107 }
108
109 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
110 struct inode *inode,
111 const struct nfs_pgio_completion_ops *compl_ops)
112 {
113 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
114 NFS_SERVER(inode)->rsize, 0);
115 }
116
117 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
118 {
119 pgio->pg_ops = &nfs_pageio_read_ops;
120 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
121 }
122 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
123
124 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
125 struct page *page)
126 {
127 struct nfs_page *new;
128 unsigned int len;
129 struct nfs_pageio_descriptor pgio;
130
131 len = nfs_page_length(page);
132 if (len == 0)
133 return nfs_return_empty_page(page);
134 new = nfs_create_request(ctx, inode, page, 0, len);
135 if (IS_ERR(new)) {
136 unlock_page(page);
137 return PTR_ERR(new);
138 }
139 if (len < PAGE_CACHE_SIZE)
140 zero_user_segment(page, len, PAGE_CACHE_SIZE);
141
142 NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops);
143 nfs_pageio_add_request(&pgio, new);
144 nfs_pageio_complete(&pgio);
145 NFS_I(inode)->read_io += pgio.pg_bytes_written;
146 return 0;
147 }
148
149 static void nfs_readpage_release(struct nfs_page *req)
150 {
151 struct inode *d_inode = req->wb_context->dentry->d_inode;
152
153 if (PageUptodate(req->wb_page))
154 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
155
156 unlock_page(req->wb_page);
157
158 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
159 req->wb_context->dentry->d_inode->i_sb->s_id,
160 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
161 req->wb_bytes,
162 (long long)req_offset(req));
163 nfs_release_request(req);
164 }
165
166 /* Note io was page aligned */
167 static void nfs_read_completion(struct nfs_pgio_header *hdr)
168 {
169 unsigned long bytes = 0;
170
171 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
172 goto out;
173 while (!list_empty(&hdr->pages)) {
174 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
175 struct page *page = req->wb_page;
176
177 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
178 if (bytes > hdr->good_bytes)
179 zero_user(page, 0, PAGE_SIZE);
180 else if (hdr->good_bytes - bytes < PAGE_SIZE)
181 zero_user_segment(page,
182 hdr->good_bytes & ~PAGE_MASK,
183 PAGE_SIZE);
184 }
185 bytes += req->wb_bytes;
186 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
187 if (bytes <= hdr->good_bytes)
188 SetPageUptodate(page);
189 } else
190 SetPageUptodate(page);
191 nfs_list_remove_request(req);
192 nfs_readpage_release(req);
193 }
194 out:
195 hdr->release(hdr);
196 }
197
198 int nfs_initiate_read(struct rpc_clnt *clnt,
199 struct nfs_read_data *data,
200 const struct rpc_call_ops *call_ops, int flags)
201 {
202 struct inode *inode = data->header->inode;
203 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
204 struct rpc_task *task;
205 struct rpc_message msg = {
206 .rpc_argp = &data->args,
207 .rpc_resp = &data->res,
208 .rpc_cred = data->header->cred,
209 };
210 struct rpc_task_setup task_setup_data = {
211 .task = &data->task,
212 .rpc_client = clnt,
213 .rpc_message = &msg,
214 .callback_ops = call_ops,
215 .callback_data = data,
216 .workqueue = nfsiod_workqueue,
217 .flags = RPC_TASK_ASYNC | swap_flags | flags,
218 };
219
220 /* Set up the initial task struct. */
221 NFS_PROTO(inode)->read_setup(data, &msg);
222
223 dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
224 "offset %llu)\n",
225 data->task.tk_pid,
226 inode->i_sb->s_id,
227 (long long)NFS_FILEID(inode),
228 data->args.count,
229 (unsigned long long)data->args.offset);
230
231 task = rpc_run_task(&task_setup_data);
232 if (IS_ERR(task))
233 return PTR_ERR(task);
234 rpc_put_task(task);
235 return 0;
236 }
237 EXPORT_SYMBOL_GPL(nfs_initiate_read);
238
239 /*
240 * Set up the NFS read request struct
241 */
242 static void nfs_read_rpcsetup(struct nfs_read_data *data,
243 unsigned int count, unsigned int offset)
244 {
245 struct nfs_page *req = data->header->req;
246
247 data->args.fh = NFS_FH(data->header->inode);
248 data->args.offset = req_offset(req) + offset;
249 data->args.pgbase = req->wb_pgbase + offset;
250 data->args.pages = data->pages.pagevec;
251 data->args.count = count;
252 data->args.context = get_nfs_open_context(req->wb_context);
253 data->args.lock_context = req->wb_lock_context;
254
255 data->res.fattr = &data->fattr;
256 data->res.count = count;
257 data->res.eof = 0;
258 nfs_fattr_init(&data->fattr);
259 }
260
261 static int nfs_do_read(struct nfs_read_data *data,
262 const struct rpc_call_ops *call_ops)
263 {
264 struct inode *inode = data->header->inode;
265
266 return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
267 }
268
269 static int
270 nfs_do_multiple_reads(struct list_head *head,
271 const struct rpc_call_ops *call_ops)
272 {
273 struct nfs_read_data *data;
274 int ret = 0;
275
276 while (!list_empty(head)) {
277 int ret2;
278
279 data = list_first_entry(head, struct nfs_read_data, list);
280 list_del_init(&data->list);
281
282 ret2 = nfs_do_read(data, call_ops);
283 if (ret == 0)
284 ret = ret2;
285 }
286 return ret;
287 }
288
289 static void
290 nfs_async_read_error(struct list_head *head)
291 {
292 struct nfs_page *req;
293
294 while (!list_empty(head)) {
295 req = nfs_list_entry(head->next);
296 nfs_list_remove_request(req);
297 nfs_readpage_release(req);
298 }
299 }
300
301 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
302 .error_cleanup = nfs_async_read_error,
303 .completion = nfs_read_completion,
304 };
305
306 static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
307 struct nfs_pgio_header *hdr)
308 {
309 set_bit(NFS_IOHDR_REDO, &hdr->flags);
310 while (!list_empty(&hdr->rpc_list)) {
311 struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
312 struct nfs_read_data, list);
313 list_del(&data->list);
314 nfs_readdata_release(data);
315 }
316 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
317 }
318
319 /*
320 * Generate multiple requests to fill a single page.
321 *
322 * We optimize to reduce the number of read operations on the wire. If we
323 * detect that we're reading a page, or an area of a page, that is past the
324 * end of file, we do not generate NFS read operations but just clear the
325 * parts of the page that would have come back zero from the server anyway.
326 *
327 * We rely on the cached value of i_size to make this determination; another
328 * client can fill pages on the server past our cached end-of-file, but we
329 * won't see the new data until our attribute cache is updated. This is more
330 * or less conventional NFS client behavior.
331 */
332 static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
333 struct nfs_pgio_header *hdr)
334 {
335 struct nfs_page *req = hdr->req;
336 struct page *page = req->wb_page;
337 struct nfs_read_data *data;
338 size_t rsize = desc->pg_bsize, nbytes;
339 unsigned int offset;
340
341 offset = 0;
342 nbytes = desc->pg_count;
343 do {
344 size_t len = min(nbytes,rsize);
345
346 data = nfs_readdata_alloc(hdr, 1);
347 if (!data) {
348 nfs_pagein_error(desc, hdr);
349 return -ENOMEM;
350 }
351 data->pages.pagevec[0] = page;
352 nfs_read_rpcsetup(data, len, offset);
353 list_add(&data->list, &hdr->rpc_list);
354 nbytes -= len;
355 offset += len;
356 } while (nbytes != 0);
357
358 nfs_list_remove_request(req);
359 nfs_list_add_request(req, &hdr->pages);
360 desc->pg_rpc_callops = &nfs_read_common_ops;
361 return 0;
362 }
363
364 static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
365 struct nfs_pgio_header *hdr)
366 {
367 struct nfs_page *req;
368 struct page **pages;
369 struct nfs_read_data *data;
370 struct list_head *head = &desc->pg_list;
371
372 data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
373 desc->pg_count));
374 if (!data) {
375 nfs_pagein_error(desc, hdr);
376 return -ENOMEM;
377 }
378
379 pages = data->pages.pagevec;
380 while (!list_empty(head)) {
381 req = nfs_list_entry(head->next);
382 nfs_list_remove_request(req);
383 nfs_list_add_request(req, &hdr->pages);
384 *pages++ = req->wb_page;
385 }
386
387 nfs_read_rpcsetup(data, desc->pg_count, 0);
388 list_add(&data->list, &hdr->rpc_list);
389 desc->pg_rpc_callops = &nfs_read_common_ops;
390 return 0;
391 }
392
393 int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
394 struct nfs_pgio_header *hdr)
395 {
396 if (desc->pg_bsize < PAGE_CACHE_SIZE)
397 return nfs_pagein_multi(desc, hdr);
398 return nfs_pagein_one(desc, hdr);
399 }
400
401 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
402 {
403 struct nfs_read_header *rhdr;
404 struct nfs_pgio_header *hdr;
405 int ret;
406
407 rhdr = nfs_readhdr_alloc();
408 if (!rhdr) {
409 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
410 return -ENOMEM;
411 }
412 hdr = &rhdr->header;
413 nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
414 atomic_inc(&hdr->refcnt);
415 ret = nfs_generic_pagein(desc, hdr);
416 if (ret == 0)
417 ret = nfs_do_multiple_reads(&hdr->rpc_list,
418 desc->pg_rpc_callops);
419 if (atomic_dec_and_test(&hdr->refcnt))
420 hdr->completion_ops->completion(hdr);
421 return ret;
422 }
423
424 static const struct nfs_pageio_ops nfs_pageio_read_ops = {
425 .pg_test = nfs_generic_pg_test,
426 .pg_doio = nfs_generic_pg_readpages,
427 };
428
429 /*
430 * This is the callback from RPC telling us whether a reply was
431 * received or some error occurred (timeout or socket shutdown).
432 */
433 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
434 {
435 struct inode *inode = data->header->inode;
436 int status;
437
438 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
439 task->tk_status);
440
441 status = NFS_PROTO(inode)->read_done(task, data);
442 if (status != 0)
443 return status;
444
445 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
446
447 if (task->tk_status == -ESTALE) {
448 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
449 nfs_mark_for_revalidate(inode);
450 }
451 return 0;
452 }
453
454 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
455 {
456 struct nfs_readargs *argp = &data->args;
457 struct nfs_readres *resp = &data->res;
458
459 /* This is a short read! */
460 nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
461 /* Has the server at least made some progress? */
462 if (resp->count == 0) {
463 nfs_set_pgio_error(data->header, -EIO, argp->offset);
464 return;
465 }
466 /* Yes, so retry the read at the end of the data */
467 data->mds_offset += resp->count;
468 argp->offset += resp->count;
469 argp->pgbase += resp->count;
470 argp->count -= resp->count;
471 rpc_restart_call_prepare(task);
472 }
473
474 static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
475 {
476 struct nfs_read_data *data = calldata;
477 struct nfs_pgio_header *hdr = data->header;
478
479 /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
480 if (nfs_readpage_result(task, data) != 0)
481 return;
482 if (task->tk_status < 0)
483 nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
484 else if (data->res.eof) {
485 loff_t bound;
486
487 bound = data->args.offset + data->res.count;
488 spin_lock(&hdr->lock);
489 if (bound < hdr->io_start + hdr->good_bytes) {
490 set_bit(NFS_IOHDR_EOF, &hdr->flags);
491 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
492 hdr->good_bytes = bound - hdr->io_start;
493 }
494 spin_unlock(&hdr->lock);
495 } else if (data->res.count != data->args.count)
496 nfs_readpage_retry(task, data);
497 }
498
499 static void nfs_readpage_release_common(void *calldata)
500 {
501 nfs_readdata_release(calldata);
502 }
503
504 void nfs_read_prepare(struct rpc_task *task, void *calldata)
505 {
506 struct nfs_read_data *data = calldata;
507 NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
508 }
509
510 static const struct rpc_call_ops nfs_read_common_ops = {
511 .rpc_call_prepare = nfs_read_prepare,
512 .rpc_call_done = nfs_readpage_result_common,
513 .rpc_release = nfs_readpage_release_common,
514 };
515
516 /*
517 * Read a page over NFS.
518 * We read the page synchronously in the following case:
519 * - The error flag is set for this page. This happens only when a
520 * previous async read operation failed.
521 */
522 int nfs_readpage(struct file *file, struct page *page)
523 {
524 struct nfs_open_context *ctx;
525 struct inode *inode = page->mapping->host;
526 int error;
527
528 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
529 page, PAGE_CACHE_SIZE, page->index);
530 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
531 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
532
533 /*
534 * Try to flush any pending writes to the file..
535 *
536 * NOTE! Because we own the page lock, there cannot
537 * be any new pending writes generated at this point
538 * for this page (other pages can be written to).
539 */
540 error = nfs_wb_page(inode, page);
541 if (error)
542 goto out_unlock;
543 if (PageUptodate(page))
544 goto out_unlock;
545
546 error = -ESTALE;
547 if (NFS_STALE(inode))
548 goto out_unlock;
549
550 if (file == NULL) {
551 error = -EBADF;
552 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
553 if (ctx == NULL)
554 goto out_unlock;
555 } else
556 ctx = get_nfs_open_context(nfs_file_open_context(file));
557
558 if (!IS_SYNC(inode)) {
559 error = nfs_readpage_from_fscache(ctx, inode, page);
560 if (error == 0)
561 goto out;
562 }
563
564 error = nfs_readpage_async(ctx, inode, page);
565
566 out:
567 put_nfs_open_context(ctx);
568 return error;
569 out_unlock:
570 unlock_page(page);
571 return error;
572 }
573
574 struct nfs_readdesc {
575 struct nfs_pageio_descriptor *pgio;
576 struct nfs_open_context *ctx;
577 };
578
579 static int
580 readpage_async_filler(void *data, struct page *page)
581 {
582 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
583 struct inode *inode = page->mapping->host;
584 struct nfs_page *new;
585 unsigned int len;
586 int error;
587
588 len = nfs_page_length(page);
589 if (len == 0)
590 return nfs_return_empty_page(page);
591
592 new = nfs_create_request(desc->ctx, inode, page, 0, len);
593 if (IS_ERR(new))
594 goto out_error;
595
596 if (len < PAGE_CACHE_SIZE)
597 zero_user_segment(page, len, PAGE_CACHE_SIZE);
598 if (!nfs_pageio_add_request(desc->pgio, new)) {
599 error = desc->pgio->pg_error;
600 goto out_unlock;
601 }
602 return 0;
603 out_error:
604 error = PTR_ERR(new);
605 out_unlock:
606 unlock_page(page);
607 return error;
608 }
609
610 int nfs_readpages(struct file *filp, struct address_space *mapping,
611 struct list_head *pages, unsigned nr_pages)
612 {
613 struct nfs_pageio_descriptor pgio;
614 struct nfs_readdesc desc = {
615 .pgio = &pgio,
616 };
617 struct inode *inode = mapping->host;
618 unsigned long npages;
619 int ret = -ESTALE;
620
621 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
622 inode->i_sb->s_id,
623 (long long)NFS_FILEID(inode),
624 nr_pages);
625 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
626
627 if (NFS_STALE(inode))
628 goto out;
629
630 if (filp == NULL) {
631 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
632 if (desc.ctx == NULL)
633 return -EBADF;
634 } else
635 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
636
637 /* attempt to read as many of the pages as possible from the cache
638 * - this returns -ENOBUFS immediately if the cookie is negative
639 */
640 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
641 pages, &nr_pages);
642 if (ret == 0)
643 goto read_complete; /* all pages were read */
644
645 NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops);
646
647 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
648
649 nfs_pageio_complete(&pgio);
650 NFS_I(inode)->read_io += pgio.pg_bytes_written;
651 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
652 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
653 read_complete:
654 put_nfs_open_context(desc.ctx);
655 out:
656 return ret;
657 }
658
659 int __init nfs_init_readpagecache(void)
660 {
661 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
662 sizeof(struct nfs_read_header),
663 0, SLAB_HWCACHE_ALIGN,
664 NULL);
665 if (nfs_rdata_cachep == NULL)
666 return -ENOMEM;
667
668 return 0;
669 }
670
671 void nfs_destroy_readpagecache(void)
672 {
673 kmem_cache_destroy(nfs_rdata_cachep);
674 }
This page took 0.045485 seconds and 6 git commands to generate.