NFS: Simplify O_DIRECT page referencing
[deliverable/linux.git] / fs / nfs / direct.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/direct.c
3 *
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5 *
6 * High-performance uncached I/O for the Linux NFS client
7 *
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
88467055 10 * (multiple copies of the same instance running on separate hosts)
1da177e4 11 * implement their own cache coherency protocol that subsumes file
88467055
CL
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
1da177e4
LT
15 * need to cache the contents of a file.
16 *
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
22 * an application.
23 *
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
28 *
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
31 *
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
88467055 37 * 04 May 2005 support O_DIRECT with aio --cel
1da177e4
LT
38 *
39 */
40
1da177e4
LT
41#include <linux/errno.h>
42#include <linux/sched.h>
43#include <linux/kernel.h>
1da177e4
LT
44#include <linux/file.h>
45#include <linux/pagemap.h>
46#include <linux/kref.h>
5a0e3ad6 47#include <linux/slab.h>
7ec10f26 48#include <linux/task_io_accounting_ops.h>
1da177e4
LT
49
50#include <linux/nfs_fs.h>
51#include <linux/nfs_page.h>
52#include <linux/sunrpc/clnt.h>
53
1da177e4 54#include <asm/uaccess.h>
60063497 55#include <linux/atomic.h>
1da177e4 56
8d5658c9 57#include "internal.h"
91d5b470 58#include "iostat.h"
1763da12 59#include "pnfs.h"
91d5b470 60
1da177e4 61#define NFSDBG_FACILITY NFSDBG_VFS
1da177e4 62
e18b890b 63static struct kmem_cache *nfs_direct_cachep;
1da177e4
LT
64
65/*
66 * This represents a set of asynchronous requests that we're waiting on
67 */
68struct nfs_direct_req {
69 struct kref kref; /* release manager */
15ce4a0c
CL
70
71 /* I/O parameters */
a8881f5a 72 struct nfs_open_context *ctx; /* file open context info */
f11ac8db 73 struct nfs_lock_context *l_ctx; /* Lock context info */
99514f8f 74 struct kiocb * iocb; /* controlling i/o request */
88467055 75 struct inode * inode; /* target file of i/o */
15ce4a0c
CL
76
77 /* completion state */
607f31e8 78 atomic_t io_count; /* i/os we're waiting for */
15ce4a0c 79 spinlock_t lock; /* protect completion state */
15ce4a0c 80 ssize_t count, /* bytes actually processed */
1da177e4 81 error; /* any reported error */
d72b7a6b 82 struct completion completion; /* wait for i/o completion */
fad61490
TM
83
84 /* commit state */
1763da12
FI
85 struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
86 struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
87 struct work_struct work;
fad61490
TM
88 int flags;
89#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
90#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
91 struct nfs_writeverf verf; /* unstable write verifier */
1da177e4
LT
92};
93
1763da12
FI
94static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
fad61490 96static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
1763da12 97static void nfs_direct_write_schedule_work(struct work_struct *work);
607f31e8
TM
98
99static inline void get_dreq(struct nfs_direct_req *dreq)
100{
101 atomic_inc(&dreq->io_count);
102}
103
104static inline int put_dreq(struct nfs_direct_req *dreq)
105{
106 return atomic_dec_and_test(&dreq->io_count);
107}
108
1da177e4 109/**
b8a32e2b
CL
110 * nfs_direct_IO - NFS address space operation for direct I/O
111 * @rw: direction (read or write)
112 * @iocb: target I/O control block
113 * @iov: array of vectors that define I/O buffer
114 * @pos: offset in file to begin the operation
115 * @nr_segs: size of iovec array
116 *
117 * The presence of this routine in the address space ops vector means
118 * the NFS client supports direct I/O. However, we shunt off direct
119 * read and write requests before the VFS gets them, so this method
120 * should never be called.
1da177e4 121 */
b8a32e2b
CL
122ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123{
b8a32e2b 124 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
01cce933 125 iocb->ki_filp->f_path.dentry->d_name.name,
e99170ff 126 (long long) pos, nr_segs);
b8a32e2b
CL
127
128 return -EINVAL;
129}
130
749e146e 131static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
9c93ab7d 132{
749e146e 133 unsigned int i;
607f31e8
TM
134 for (i = 0; i < npages; i++)
135 page_cache_release(pages[i]);
6b45d858
TM
136}
137
1763da12
FI
138void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139 struct nfs_direct_req *dreq)
140{
141 cinfo->lock = &dreq->lock;
142 cinfo->mds = &dreq->mds_cinfo;
143 cinfo->ds = &dreq->ds_cinfo;
144 cinfo->dreq = dreq;
145 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146}
147
93619e59 148static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
1da177e4 149{
93619e59
CL
150 struct nfs_direct_req *dreq;
151
e94b1766 152 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
93619e59
CL
153 if (!dreq)
154 return NULL;
155
156 kref_init(&dreq->kref);
607f31e8 157 kref_get(&dreq->kref);
d72b7a6b 158 init_completion(&dreq->completion);
1763da12
FI
159 dreq->mds_cinfo.ncommit = 0;
160 atomic_set(&dreq->mds_cinfo.rpcs_out, 0);
161 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
162 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
163 memset(&dreq->ds_cinfo, 0, sizeof(dreq->ds_cinfo));
93619e59 164 dreq->iocb = NULL;
a8881f5a 165 dreq->ctx = NULL;
f11ac8db 166 dreq->l_ctx = NULL;
15ce4a0c 167 spin_lock_init(&dreq->lock);
607f31e8 168 atomic_set(&dreq->io_count, 0);
15ce4a0c
CL
169 dreq->count = 0;
170 dreq->error = 0;
fad61490 171 dreq->flags = 0;
93619e59
CL
172
173 return dreq;
1da177e4
LT
174}
175
b4946ffb 176static void nfs_direct_req_free(struct kref *kref)
1da177e4
LT
177{
178 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
a8881f5a 179
f11ac8db
TM
180 if (dreq->l_ctx != NULL)
181 nfs_put_lock_context(dreq->l_ctx);
a8881f5a
TM
182 if (dreq->ctx != NULL)
183 put_nfs_open_context(dreq->ctx);
1da177e4
LT
184 kmem_cache_free(nfs_direct_cachep, dreq);
185}
186
b4946ffb
TM
187static void nfs_direct_req_release(struct nfs_direct_req *dreq)
188{
189 kref_put(&dreq->kref, nfs_direct_req_free);
190}
191
bc0fb201
CL
192/*
193 * Collects and returns the final error value/byte-count.
194 */
195static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
196{
15ce4a0c 197 ssize_t result = -EIOCBQUEUED;
bc0fb201
CL
198
199 /* Async requests don't wait here */
200 if (dreq->iocb)
201 goto out;
202
150030b7 203 result = wait_for_completion_killable(&dreq->completion);
bc0fb201
CL
204
205 if (!result)
15ce4a0c 206 result = dreq->error;
bc0fb201 207 if (!result)
15ce4a0c 208 result = dreq->count;
bc0fb201
CL
209
210out:
bc0fb201
CL
211 return (ssize_t) result;
212}
213
63ab46ab 214/*
607f31e8
TM
215 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
216 * the iocb is still valid here if this is a synchronous request.
63ab46ab
CL
217 */
218static void nfs_direct_complete(struct nfs_direct_req *dreq)
219{
63ab46ab 220 if (dreq->iocb) {
15ce4a0c 221 long res = (long) dreq->error;
63ab46ab 222 if (!res)
15ce4a0c 223 res = (long) dreq->count;
63ab46ab 224 aio_complete(dreq->iocb, res, 0);
d72b7a6b
TM
225 }
226 complete_all(&dreq->completion);
63ab46ab 227
b4946ffb 228 nfs_direct_req_release(dreq);
63ab46ab
CL
229}
230
584aa810 231void nfs_direct_readpage_release(struct nfs_page *req)
1da177e4 232{
584aa810
FI
233 dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
234 req->wb_context->dentry->d_inode->i_sb->s_id,
235 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
236 req->wb_bytes,
237 (long long)req_offset(req));
238 nfs_release_request(req);
fdd1e74c
TM
239}
240
584aa810 241static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
fdd1e74c 242{
584aa810
FI
243 unsigned long bytes = 0;
244 struct nfs_direct_req *dreq = hdr->dreq;
fdd1e74c 245
584aa810
FI
246 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
247 goto out_put;
15ce4a0c
CL
248
249 spin_lock(&dreq->lock);
584aa810
FI
250 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
251 dreq->error = hdr->error;
252 else
253 dreq->count += hdr->good_bytes;
254 spin_unlock(&dreq->lock);
255
256 if (!test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
257 while (!list_empty(&hdr->pages)) {
258 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
259 struct page *page = req->wb_page;
260
261 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
262 if (bytes > hdr->good_bytes)
263 zero_user(page, 0, PAGE_SIZE);
264 else if (hdr->good_bytes - bytes < PAGE_SIZE)
265 zero_user_segment(page,
266 hdr->good_bytes & ~PAGE_MASK,
267 PAGE_SIZE);
268 }
269 bytes += req->wb_bytes;
270 nfs_list_remove_request(req);
584aa810
FI
271 if (!PageCompound(page))
272 set_page_dirty(page);
6d74743b 273 nfs_direct_readpage_release(req);
584aa810 274 }
d4a8f367 275 } else {
584aa810
FI
276 while (!list_empty(&hdr->pages)) {
277 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
278
279 if (bytes < hdr->good_bytes)
280 if (!PageCompound(req->wb_page))
281 set_page_dirty(req->wb_page);
282 bytes += req->wb_bytes;
584aa810
FI
283 nfs_list_remove_request(req);
284 nfs_direct_readpage_release(req);
285 }
d4a8f367 286 }
584aa810 287out_put:
607f31e8
TM
288 if (put_dreq(dreq))
289 nfs_direct_complete(dreq);
584aa810 290 hdr->release(hdr);
1da177e4
LT
291}
292
3e9e0ca3 293static void nfs_read_sync_pgio_error(struct list_head *head)
cd841605 294{
584aa810 295 struct nfs_page *req;
cd841605 296
584aa810
FI
297 while (!list_empty(head)) {
298 req = nfs_list_entry(head->next);
299 nfs_list_remove_request(req);
300 nfs_release_request(req);
301 }
cd841605
FI
302}
303
584aa810
FI
304static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
305{
306 get_dreq(hdr->dreq);
307}
308
309static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
3e9e0ca3 310 .error_cleanup = nfs_read_sync_pgio_error,
584aa810
FI
311 .init_hdr = nfs_direct_pgio_init,
312 .completion = nfs_direct_read_completion,
313};
314
d4cc948b 315/*
607f31e8
TM
316 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
317 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
318 * bail and stop sending more reads. Read length accounting is
319 * handled automatically by nfs_direct_read_result(). Otherwise, if
320 * no requests have been sent, just return an error.
1da177e4 321 */
584aa810 322static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
02fe4946
CL
323 const struct iovec *iov,
324 loff_t pos)
1da177e4 325{
584aa810 326 struct nfs_direct_req *dreq = desc->pg_dreq;
a8881f5a 327 struct nfs_open_context *ctx = dreq->ctx;
3d4ff43d 328 struct inode *inode = ctx->dentry->d_inode;
02fe4946
CL
329 unsigned long user_addr = (unsigned long)iov->iov_base;
330 size_t count = iov->iov_len;
5dd602f2 331 size_t rsize = NFS_SERVER(inode)->rsize;
607f31e8
TM
332 unsigned int pgbase;
333 int result;
334 ssize_t started = 0;
584aa810
FI
335 struct page **pagevec = NULL;
336 unsigned int npages;
607f31e8 337
1da177e4 338 do {
5dd602f2 339 size_t bytes;
584aa810 340 int i;
1da177e4 341
e9f7bee1 342 pgbase = user_addr & ~PAGE_MASK;
584aa810 343 bytes = min(max(rsize, PAGE_SIZE), count);
e9f7bee1 344
607f31e8 345 result = -ENOMEM;
584aa810
FI
346 npages = nfs_page_array_len(pgbase, bytes);
347 if (!pagevec)
348 pagevec = kmalloc(npages * sizeof(struct page *),
349 GFP_KERNEL);
350 if (!pagevec)
4db6e0b7 351 break;
607f31e8
TM
352 down_read(&current->mm->mmap_sem);
353 result = get_user_pages(current, current->mm, user_addr,
584aa810 354 npages, 1, 0, pagevec, NULL);
607f31e8 355 up_read(&current->mm->mmap_sem);
584aa810 356 if (result < 0)
749e146e 357 break;
584aa810 358 if ((unsigned)result < npages) {
d9df8d6b
TM
359 bytes = result * PAGE_SIZE;
360 if (bytes <= pgbase) {
584aa810 361 nfs_direct_release_pages(pagevec, result);
d9df8d6b
TM
362 break;
363 }
364 bytes -= pgbase;
584aa810 365 npages = result;
607f31e8
TM
366 }
367
584aa810
FI
368 for (i = 0; i < npages; i++) {
369 struct nfs_page *req;
370 unsigned int req_len = min(bytes, PAGE_SIZE - pgbase);
371 /* XXX do we need to do the eof zeroing found in async_filler? */
372 req = nfs_create_request(dreq->ctx, dreq->inode,
373 pagevec[i],
374 pgbase, req_len);
375 if (IS_ERR(req)) {
584aa810
FI
376 result = PTR_ERR(req);
377 break;
378 }
379 req->wb_index = pos >> PAGE_SHIFT;
380 req->wb_offset = pos & ~PAGE_MASK;
381 if (!nfs_pageio_add_request(desc, req)) {
382 result = desc->pg_error;
383 nfs_release_request(req);
584aa810
FI
384 break;
385 }
386 pgbase = 0;
387 bytes -= req_len;
388 started += req_len;
389 user_addr += req_len;
390 pos += req_len;
391 count -= req_len;
392 }
6d74743b
TM
393 /* The nfs_page now hold references to these pages */
394 nfs_direct_release_pages(pagevec, npages);
71e8cc00 395 } while (count != 0 && result >= 0);
607f31e8 396
584aa810
FI
397 kfree(pagevec);
398
607f31e8 399 if (started)
c216fd70 400 return started;
607f31e8 401 return result < 0 ? (ssize_t) result : -EFAULT;
1da177e4
LT
402}
403
19f73787
CL
404static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
405 const struct iovec *iov,
406 unsigned long nr_segs,
407 loff_t pos)
408{
584aa810 409 struct nfs_pageio_descriptor desc;
19f73787
CL
410 ssize_t result = -EINVAL;
411 size_t requested_bytes = 0;
412 unsigned long seg;
413
584aa810
FI
414 nfs_pageio_init_read(&desc, dreq->inode,
415 &nfs_direct_read_completion_ops);
19f73787 416 get_dreq(dreq);
584aa810 417 desc.pg_dreq = dreq;
19f73787
CL
418
419 for (seg = 0; seg < nr_segs; seg++) {
420 const struct iovec *vec = &iov[seg];
584aa810 421 result = nfs_direct_read_schedule_segment(&desc, vec, pos);
19f73787
CL
422 if (result < 0)
423 break;
424 requested_bytes += result;
425 if ((size_t)result < vec->iov_len)
426 break;
427 pos += vec->iov_len;
428 }
429
584aa810
FI
430 nfs_pageio_complete(&desc);
431
839f7ad6
CL
432 /*
433 * If no bytes were started, return the error, and let the
434 * generic layer handle the completion.
435 */
436 if (requested_bytes == 0) {
437 nfs_direct_req_release(dreq);
438 return result < 0 ? result : -EIO;
439 }
440
19f73787
CL
441 if (put_dreq(dreq))
442 nfs_direct_complete(dreq);
839f7ad6 443 return 0;
19f73787
CL
444}
445
c216fd70
CL
446static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
447 unsigned long nr_segs, loff_t pos)
1da177e4 448{
f11ac8db 449 ssize_t result = -ENOMEM;
99514f8f 450 struct inode *inode = iocb->ki_filp->f_mapping->host;
1da177e4
LT
451 struct nfs_direct_req *dreq;
452
607f31e8 453 dreq = nfs_direct_req_alloc();
f11ac8db
TM
454 if (dreq == NULL)
455 goto out;
1da177e4 456
91d5b470 457 dreq->inode = inode;
cd3758e3 458 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
f11ac8db
TM
459 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
460 if (dreq->l_ctx == NULL)
461 goto out_release;
487b8372
CL
462 if (!is_sync_kiocb(iocb))
463 dreq->iocb = iocb;
1da177e4 464
c216fd70 465 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
607f31e8
TM
466 if (!result)
467 result = nfs_direct_wait(dreq);
f11ac8db 468out_release:
b4946ffb 469 nfs_direct_req_release(dreq);
f11ac8db 470out:
1da177e4
LT
471 return result;
472}
473
fad61490
TM
474#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
475static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
476{
1763da12
FI
477 struct nfs_pageio_descriptor desc;
478 struct nfs_page *req, *tmp;
479 LIST_HEAD(reqs);
480 struct nfs_commit_info cinfo;
481 LIST_HEAD(failed);
482
483 nfs_init_cinfo_from_dreq(&cinfo, dreq);
484 pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
485 spin_lock(cinfo.lock);
486 nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
487 spin_unlock(cinfo.lock);
1da177e4 488
fad61490 489 dreq->count = 0;
607f31e8
TM
490 get_dreq(dreq);
491
1763da12
FI
492 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
493 &nfs_direct_write_completion_ops);
494 desc.pg_dreq = dreq;
fedb595c 495
1763da12
FI
496 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
497 if (!nfs_pageio_add_request(&desc, req)) {
498 nfs_list_add_request(req, &failed);
499 spin_lock(cinfo.lock);
500 dreq->flags = 0;
501 dreq->error = -EIO;
502 spin_unlock(cinfo.lock);
503 }
504 }
505 nfs_pageio_complete(&desc);
fad61490 506
1763da12 507 while (!list_empty(&failed)) {
1763da12
FI
508 nfs_release_request(req);
509 nfs_unlock_request(req);
510 }
fad61490 511
1763da12
FI
512 if (put_dreq(dreq))
513 nfs_direct_write_complete(dreq, dreq->inode);
c9d8f89d
TM
514}
515
1763da12 516static void nfs_direct_commit_complete(struct nfs_commit_data *data)
c9d8f89d 517{
0b7c0153 518 struct nfs_direct_req *dreq = data->dreq;
1763da12
FI
519 struct nfs_commit_info cinfo;
520 struct nfs_page *req;
c9d8f89d
TM
521 int status = data->task.tk_status;
522
1763da12 523 nfs_init_cinfo_from_dreq(&cinfo, dreq);
c9d8f89d 524 if (status < 0) {
60fa3f76 525 dprintk("NFS: %5u commit failed with error %d.\n",
1763da12 526 data->task.tk_pid, status);
fad61490 527 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
60fa3f76 528 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
c9d8f89d 529 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
fad61490 530 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
1da177e4
LT
531 }
532
c9d8f89d 533 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
1763da12
FI
534 while (!list_empty(&data->pages)) {
535 req = nfs_list_entry(data->pages.next);
536 nfs_list_remove_request(req);
537 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
538 /* Note the rewrite will go through mds */
539 nfs_mark_request_commit(req, NULL, &cinfo);
6d74743b 540 } else
1763da12 541 nfs_release_request(req);
1763da12
FI
542 nfs_unlock_request(req);
543 }
544
545 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
546 nfs_direct_write_complete(dreq, data->inode);
1da177e4
LT
547}
548
1763da12
FI
549static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
550{
551 /* There is no lock to clear */
552}
553
554static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
555 .completion = nfs_direct_commit_complete,
556 .error_cleanup = nfs_direct_error_cleanup,
fad61490
TM
557};
558
559static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
1da177e4 560{
1763da12
FI
561 int res;
562 struct nfs_commit_info cinfo;
563 LIST_HEAD(mds_list);
564
565 nfs_init_cinfo_from_dreq(&cinfo, dreq);
566 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
567 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
568 if (res < 0) /* res == -ENOMEM */
569 nfs_direct_write_reschedule(dreq);
fad61490 570}
1da177e4 571
1763da12 572static void nfs_direct_write_schedule_work(struct work_struct *work)
fad61490 573{
1763da12 574 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
fad61490 575 int flags = dreq->flags;
1da177e4 576
fad61490
TM
577 dreq->flags = 0;
578 switch (flags) {
579 case NFS_ODIRECT_DO_COMMIT:
580 nfs_direct_commit_schedule(dreq);
1da177e4 581 break;
fad61490
TM
582 case NFS_ODIRECT_RESCHED_WRITES:
583 nfs_direct_write_reschedule(dreq);
584 break;
585 default:
1763da12 586 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
fad61490
TM
587 nfs_direct_complete(dreq);
588 }
589}
1da177e4 590
1763da12 591static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
fad61490 592{
1763da12 593 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
fad61490 594}
1763da12 595
fad61490 596#else
24fc9211
BS
597static void nfs_direct_write_schedule_work(struct work_struct *work)
598{
599}
1da177e4 600
fad61490
TM
601static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
602{
cd9ae2b6 603 nfs_zap_mapping(inode, inode->i_mapping);
fad61490
TM
604 nfs_direct_complete(dreq);
605}
606#endif
1da177e4 607
c9d8f89d
TM
608/*
609 * NB: Return the value of the first error return code. Subsequent
610 * errors after the first one are ignored.
611 */
462d5b32 612/*
607f31e8
TM
613 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
614 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
615 * bail and stop sending more writes. Write length accounting is
616 * handled automatically by nfs_direct_write_result(). Otherwise, if
617 * no requests have been sent, just return an error.
462d5b32 618 */
1763da12 619static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
02fe4946 620 const struct iovec *iov,
1763da12 621 loff_t pos)
462d5b32 622{
1763da12 623 struct nfs_direct_req *dreq = desc->pg_dreq;
a8881f5a 624 struct nfs_open_context *ctx = dreq->ctx;
3d4ff43d 625 struct inode *inode = ctx->dentry->d_inode;
02fe4946
CL
626 unsigned long user_addr = (unsigned long)iov->iov_base;
627 size_t count = iov->iov_len;
462d5b32 628 size_t wsize = NFS_SERVER(inode)->wsize;
607f31e8
TM
629 unsigned int pgbase;
630 int result;
631 ssize_t started = 0;
1763da12
FI
632 struct page **pagevec = NULL;
633 unsigned int npages;
82b145c5 634
1da177e4 635 do {
462d5b32 636 size_t bytes;
1763da12 637 int i;
462d5b32 638
e9f7bee1 639 pgbase = user_addr & ~PAGE_MASK;
1763da12 640 bytes = min(max(wsize, PAGE_SIZE), count);
e9f7bee1 641
607f31e8 642 result = -ENOMEM;
1763da12
FI
643 npages = nfs_page_array_len(pgbase, bytes);
644 if (!pagevec)
645 pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
646 if (!pagevec)
607f31e8
TM
647 break;
648
607f31e8
TM
649 down_read(&current->mm->mmap_sem);
650 result = get_user_pages(current, current->mm, user_addr,
1763da12 651 npages, 0, 0, pagevec, NULL);
607f31e8 652 up_read(&current->mm->mmap_sem);
1763da12 653 if (result < 0)
749e146e 654 break;
1763da12
FI
655
656 if ((unsigned)result < npages) {
d9df8d6b
TM
657 bytes = result * PAGE_SIZE;
658 if (bytes <= pgbase) {
1763da12 659 nfs_direct_release_pages(pagevec, result);
d9df8d6b
TM
660 break;
661 }
662 bytes -= pgbase;
1763da12 663 npages = result;
607f31e8
TM
664 }
665
1763da12
FI
666 for (i = 0; i < npages; i++) {
667 struct nfs_page *req;
668 unsigned int req_len = min(bytes, PAGE_SIZE - pgbase);
1da177e4 669
1763da12
FI
670 req = nfs_create_request(dreq->ctx, dreq->inode,
671 pagevec[i],
672 pgbase, req_len);
673 if (IS_ERR(req)) {
1763da12
FI
674 result = PTR_ERR(req);
675 break;
676 }
677 nfs_lock_request(req);
678 req->wb_index = pos >> PAGE_SHIFT;
679 req->wb_offset = pos & ~PAGE_MASK;
680 if (!nfs_pageio_add_request(desc, req)) {
681 result = desc->pg_error;
682 nfs_unlock_request(req);
683 nfs_release_request(req);
71e8cc00 684 break;
1763da12
FI
685 }
686 pgbase = 0;
687 bytes -= req_len;
688 started += req_len;
689 user_addr += req_len;
690 pos += req_len;
691 count -= req_len;
692 }
6d74743b
TM
693 /* The nfs_page now hold references to these pages */
694 nfs_direct_release_pages(pagevec, npages);
71e8cc00 695 } while (count != 0 && result >= 0);
607f31e8 696
1763da12
FI
697 kfree(pagevec);
698
607f31e8 699 if (started)
c216fd70 700 return started;
607f31e8 701 return result < 0 ? (ssize_t) result : -EFAULT;
462d5b32 702}
1da177e4 703
1763da12
FI
704static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
705{
706 struct nfs_direct_req *dreq = hdr->dreq;
707 struct nfs_commit_info cinfo;
708 int bit = -1;
709 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
710
711 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
712 goto out_put;
713
714 nfs_init_cinfo_from_dreq(&cinfo, dreq);
715
716 spin_lock(&dreq->lock);
717
718 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
719 dreq->flags = 0;
720 dreq->error = hdr->error;
721 }
722 if (dreq->error != 0)
723 bit = NFS_IOHDR_ERROR;
724 else {
725 dreq->count += hdr->good_bytes;
726 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
727 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
728 bit = NFS_IOHDR_NEED_RESCHED;
729 } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
730 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
731 bit = NFS_IOHDR_NEED_RESCHED;
732 else if (dreq->flags == 0) {
733 memcpy(&dreq->verf, &req->wb_verf,
734 sizeof(dreq->verf));
735 bit = NFS_IOHDR_NEED_COMMIT;
736 dreq->flags = NFS_ODIRECT_DO_COMMIT;
737 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
738 if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
739 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
740 bit = NFS_IOHDR_NEED_RESCHED;
741 } else
742 bit = NFS_IOHDR_NEED_COMMIT;
743 }
744 }
745 }
746 spin_unlock(&dreq->lock);
747
748 while (!list_empty(&hdr->pages)) {
749 req = nfs_list_entry(hdr->pages.next);
750 nfs_list_remove_request(req);
751 switch (bit) {
752 case NFS_IOHDR_NEED_RESCHED:
753 case NFS_IOHDR_NEED_COMMIT:
754 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
755 break;
756 default:
1763da12
FI
757 nfs_release_request(req);
758 }
759 nfs_unlock_request(req);
760 }
761
762out_put:
763 if (put_dreq(dreq))
764 nfs_direct_write_complete(dreq, hdr->inode);
765 hdr->release(hdr);
766}
767
3e9e0ca3
TM
768static void nfs_write_sync_pgio_error(struct list_head *head)
769{
770 struct nfs_page *req;
771
772 while (!list_empty(head)) {
773 req = nfs_list_entry(head->next);
774 nfs_list_remove_request(req);
775 nfs_release_request(req);
776 nfs_unlock_request(req);
777 }
778}
779
1763da12 780static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
3e9e0ca3 781 .error_cleanup = nfs_write_sync_pgio_error,
1763da12
FI
782 .init_hdr = nfs_direct_pgio_init,
783 .completion = nfs_direct_write_completion,
784};
785
19f73787
CL
786static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
787 const struct iovec *iov,
788 unsigned long nr_segs,
1763da12 789 loff_t pos)
19f73787 790{
1763da12 791 struct nfs_pageio_descriptor desc;
19f73787
CL
792 ssize_t result = 0;
793 size_t requested_bytes = 0;
794 unsigned long seg;
795
1763da12
FI
796 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE,
797 &nfs_direct_write_completion_ops);
798 desc.pg_dreq = dreq;
19f73787
CL
799 get_dreq(dreq);
800
801 for (seg = 0; seg < nr_segs; seg++) {
802 const struct iovec *vec = &iov[seg];
1763da12 803 result = nfs_direct_write_schedule_segment(&desc, vec, pos);
19f73787
CL
804 if (result < 0)
805 break;
806 requested_bytes += result;
807 if ((size_t)result < vec->iov_len)
808 break;
809 pos += vec->iov_len;
810 }
1763da12 811 nfs_pageio_complete(&desc);
19f73787 812
839f7ad6
CL
813 /*
814 * If no bytes were started, return the error, and let the
815 * generic layer handle the completion.
816 */
817 if (requested_bytes == 0) {
818 nfs_direct_req_release(dreq);
819 return result < 0 ? result : -EIO;
820 }
821
19f73787
CL
822 if (put_dreq(dreq))
823 nfs_direct_write_complete(dreq, dreq->inode);
839f7ad6 824 return 0;
19f73787
CL
825}
826
c216fd70
CL
827static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
828 unsigned long nr_segs, loff_t pos,
829 size_t count)
462d5b32 830{
f11ac8db 831 ssize_t result = -ENOMEM;
c89f2ee5 832 struct inode *inode = iocb->ki_filp->f_mapping->host;
462d5b32 833 struct nfs_direct_req *dreq;
1da177e4 834
607f31e8 835 dreq = nfs_direct_req_alloc();
462d5b32 836 if (!dreq)
f11ac8db 837 goto out;
1da177e4 838
c89f2ee5 839 dreq->inode = inode;
cd3758e3 840 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
f11ac8db 841 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
568a810d 842 if (dreq->l_ctx == NULL)
f11ac8db 843 goto out_release;
c89f2ee5
CL
844 if (!is_sync_kiocb(iocb))
845 dreq->iocb = iocb;
1da177e4 846
1763da12 847 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
607f31e8
TM
848 if (!result)
849 result = nfs_direct_wait(dreq);
f11ac8db 850out_release:
b4946ffb 851 nfs_direct_req_release(dreq);
f11ac8db 852out:
1da177e4
LT
853 return result;
854}
855
856/**
857 * nfs_file_direct_read - file direct read operation for NFS files
858 * @iocb: target I/O control block
027445c3
BP
859 * @iov: vector of user buffers into which to read data
860 * @nr_segs: size of iov vector
88467055 861 * @pos: byte offset in file where reading starts
1da177e4
LT
862 *
863 * We use this function for direct reads instead of calling
864 * generic_file_aio_read() in order to avoid gfar's check to see if
865 * the request starts before the end of the file. For that check
866 * to work, we must generate a GETATTR before each direct read, and
867 * even then there is a window between the GETATTR and the subsequent
88467055 868 * READ where the file size could change. Our preference is simply
1da177e4
LT
869 * to do all reads the application wants, and the server will take
870 * care of managing the end of file boundary.
88467055 871 *
1da177e4
LT
872 * This function also eliminates unnecessarily updating the file's
873 * atime locally, as the NFS server sets the file's atime, and this
874 * client must read the updated atime from the server back into its
875 * cache.
876 */
027445c3
BP
877ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
878 unsigned long nr_segs, loff_t pos)
1da177e4
LT
879{
880 ssize_t retval = -EINVAL;
1da177e4 881 struct file *file = iocb->ki_filp;
1da177e4 882 struct address_space *mapping = file->f_mapping;
c216fd70
CL
883 size_t count;
884
885 count = iov_length(iov, nr_segs);
886 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
1da177e4 887
6da24bc9 888 dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
01cce933
JJS
889 file->f_path.dentry->d_parent->d_name.name,
890 file->f_path.dentry->d_name.name,
c216fd70 891 count, (long long) pos);
1da177e4 892
1da177e4
LT
893 retval = 0;
894 if (!count)
895 goto out;
896
29884df0
TM
897 retval = nfs_sync_mapping(mapping);
898 if (retval)
899 goto out;
1da177e4 900
7ec10f26
KK
901 task_io_account_read(count);
902
c216fd70 903 retval = nfs_direct_read(iocb, iov, nr_segs, pos);
1da177e4 904 if (retval > 0)
0cdd80d0 905 iocb->ki_pos = pos + retval;
1da177e4
LT
906
907out:
908 return retval;
909}
910
911/**
912 * nfs_file_direct_write - file direct write operation for NFS files
913 * @iocb: target I/O control block
027445c3
BP
914 * @iov: vector of user buffers from which to write data
915 * @nr_segs: size of iov vector
88467055 916 * @pos: byte offset in file where writing starts
1da177e4
LT
917 *
918 * We use this function for direct writes instead of calling
919 * generic_file_aio_write() in order to avoid taking the inode
920 * semaphore and updating the i_size. The NFS server will set
921 * the new i_size and this client must read the updated size
922 * back into its cache. We let the server do generic write
923 * parameter checking and report problems.
924 *
1da177e4
LT
925 * We eliminate local atime updates, see direct read above.
926 *
927 * We avoid unnecessary page cache invalidations for normal cached
928 * readers of this file.
929 *
930 * Note that O_APPEND is not supported for NFS direct writes, as there
931 * is no atomic O_APPEND write facility in the NFS protocol.
932 */
027445c3
BP
933ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
934 unsigned long nr_segs, loff_t pos)
1da177e4 935{
070ea602 936 ssize_t retval = -EINVAL;
1da177e4 937 struct file *file = iocb->ki_filp;
1da177e4 938 struct address_space *mapping = file->f_mapping;
c216fd70 939 size_t count;
1da177e4 940
c216fd70
CL
941 count = iov_length(iov, nr_segs);
942 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
943
6da24bc9 944 dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
01cce933
JJS
945 file->f_path.dentry->d_parent->d_name.name,
946 file->f_path.dentry->d_name.name,
c216fd70 947 count, (long long) pos);
027445c3 948
ce1a8e67
CL
949 retval = generic_write_checks(file, &pos, &count, 0);
950 if (retval)
1da177e4 951 goto out;
ce1a8e67
CL
952
953 retval = -EINVAL;
954 if ((ssize_t) count < 0)
1da177e4 955 goto out;
1da177e4
LT
956 retval = 0;
957 if (!count)
958 goto out;
ce1a8e67 959
29884df0
TM
960 retval = nfs_sync_mapping(mapping);
961 if (retval)
962 goto out;
1da177e4 963
7ec10f26
KK
964 task_io_account_write(count);
965
c216fd70 966 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
1763da12
FI
967 if (retval > 0) {
968 struct inode *inode = mapping->host;
9eafa8cc 969
ce1a8e67 970 iocb->ki_pos = pos + retval;
1763da12
FI
971 spin_lock(&inode->i_lock);
972 if (i_size_read(inode) < iocb->ki_pos)
973 i_size_write(inode, iocb->ki_pos);
974 spin_unlock(&inode->i_lock);
975 }
1da177e4
LT
976out:
977 return retval;
978}
979
88467055
CL
980/**
981 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
982 *
983 */
f7b422b1 984int __init nfs_init_directcache(void)
1da177e4
LT
985{
986 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
987 sizeof(struct nfs_direct_req),
fffb60f9
PJ
988 0, (SLAB_RECLAIM_ACCOUNT|
989 SLAB_MEM_SPREAD),
20c2df83 990 NULL);
1da177e4
LT
991 if (nfs_direct_cachep == NULL)
992 return -ENOMEM;
993
994 return 0;
995}
996
88467055 997/**
f7b422b1 998 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
88467055
CL
999 *
1000 */
266bee88 1001void nfs_destroy_directcache(void)
1da177e4 1002{
1a1d92c1 1003 kmem_cache_destroy(nfs_direct_cachep);
1da177e4 1004}
This page took 0.651735 seconds and 5 git commands to generate.