NFS: Use wait_on_atomic_t() for unlock after readahead
[deliverable/linux.git] / fs / nfs / write.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/write.c
3 *
7c85d900 4 * Write file data over NFS.
1da177e4
LT
5 *
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7 */
8
1da177e4
LT
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/file.h>
1da177e4 14#include <linux/writeback.h>
89a09141 15#include <linux/swap.h>
074cc1de 16#include <linux/migrate.h>
1da177e4
LT
17
18#include <linux/sunrpc/clnt.h>
19#include <linux/nfs_fs.h>
20#include <linux/nfs_mount.h>
21#include <linux/nfs_page.h>
3fcfab16 22#include <linux/backing-dev.h>
afeacc8c 23#include <linux/export.h>
af7cf057
TM
24#include <linux/freezer.h>
25#include <linux/wait.h>
3fcfab16 26
1da177e4 27#include <asm/uaccess.h>
1da177e4
LT
28
29#include "delegation.h"
49a70f27 30#include "internal.h"
91d5b470 31#include "iostat.h"
def6ed7e 32#include "nfs4_fs.h"
074cc1de 33#include "fscache.h"
94ad1c80 34#include "pnfs.h"
1da177e4 35
f4ce1299
TM
36#include "nfstrace.h"
37
1da177e4
LT
38#define NFSDBG_FACILITY NFSDBG_PAGECACHE
39
40#define MIN_POOL_WRITE (32)
41#define MIN_POOL_COMMIT (4)
42
43/*
44 * Local function declarations
45 */
f8512ad0 46static void nfs_redirty_request(struct nfs_page *req);
788e7a89 47static const struct rpc_call_ops nfs_commit_ops;
061ae2ed 48static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
f453a54a 49static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
4a0de55c 50static const struct nfs_rw_ops nfs_rw_write_ops;
d4581383 51static void nfs_clear_request_commit(struct nfs_page *req);
02d1426c
WAA
52static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
53 struct inode *inode);
3a3908c8
TM
54static struct nfs_page *
55nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
56 struct page *page);
1da177e4 57
e18b890b 58static struct kmem_cache *nfs_wdata_cachep;
3feb2d49 59static mempool_t *nfs_wdata_mempool;
0b7c0153 60static struct kmem_cache *nfs_cdata_cachep;
1da177e4
LT
61static mempool_t *nfs_commit_mempool;
62
0b7c0153 63struct nfs_commit_data *nfs_commitdata_alloc(void)
1da177e4 64{
192e501b 65 struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
40859d7e 66
1da177e4
LT
67 if (p) {
68 memset(p, 0, sizeof(*p));
69 INIT_LIST_HEAD(&p->pages);
70 }
71 return p;
72}
e0c2b380 73EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
1da177e4 74
0b7c0153 75void nfs_commit_free(struct nfs_commit_data *p)
1da177e4
LT
76{
77 mempool_free(p, nfs_commit_mempool);
78}
e0c2b380 79EXPORT_SYMBOL_GPL(nfs_commit_free);
1da177e4 80
1e7f3a48 81static struct nfs_pgio_header *nfs_writehdr_alloc(void)
3feb2d49 82{
1e7f3a48 83 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
cd841605 84
4a0de55c 85 if (p)
3feb2d49 86 memset(p, 0, sizeof(*p));
3feb2d49
TM
87 return p;
88}
6c75dc0d 89
1e7f3a48 90static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
3feb2d49 91{
1e7f3a48 92 mempool_free(hdr, nfs_wdata_mempool);
3feb2d49 93}
1da177e4 94
7b159fc1
TM
95static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
96{
97 ctx->error = error;
98 smp_wmb();
99 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
100}
101
84d3a9a9
WAA
102/*
103 * nfs_page_find_head_request_locked - find head request associated with @page
104 *
105 * must be called while holding the inode lock.
106 *
107 * returns matching head request with reference held, or NULL if not found.
108 */
29418aa4 109static struct nfs_page *
84d3a9a9 110nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
277459d2
TM
111{
112 struct nfs_page *req = NULL;
113
29418aa4 114 if (PagePrivate(page))
277459d2 115 req = (struct nfs_page *)page_private(page);
02d1426c
WAA
116 else if (unlikely(PageSwapCache(page)))
117 req = nfs_page_search_commits_for_head_request_locked(nfsi,
118 page);
29418aa4 119
84d3a9a9
WAA
120 if (req) {
121 WARN_ON_ONCE(req->wb_head != req);
29418aa4 122 kref_get(&req->wb_kref);
84d3a9a9 123 }
29418aa4 124
277459d2
TM
125 return req;
126}
127
84d3a9a9
WAA
128/*
129 * nfs_page_find_head_request - find head request associated with @page
130 *
131 * returns matching head request with reference held, or NULL if not found.
132 */
133static struct nfs_page *nfs_page_find_head_request(struct page *page)
277459d2 134{
d56b4ddf 135 struct inode *inode = page_file_mapping(page)->host;
277459d2 136 struct nfs_page *req = NULL;
277459d2 137
587142f8 138 spin_lock(&inode->i_lock);
84d3a9a9 139 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
587142f8 140 spin_unlock(&inode->i_lock);
277459d2
TM
141 return req;
142}
143
1da177e4
LT
144/* Adjust the file length if we're writing beyond the end */
145static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
146{
d56b4ddf 147 struct inode *inode = page_file_mapping(page)->host;
a3d01454
TM
148 loff_t end, i_size;
149 pgoff_t end_index;
1da177e4 150
a3d01454
TM
151 spin_lock(&inode->i_lock);
152 i_size = i_size_read(inode);
153 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
d56b4ddf 154 if (i_size > 0 && page_file_index(page) < end_index)
a3d01454 155 goto out;
d56b4ddf 156 end = page_file_offset(page) + ((loff_t)offset+count);
1da177e4 157 if (i_size >= end)
a3d01454 158 goto out;
1da177e4 159 i_size_write(inode, end);
a3d01454
TM
160 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
161out:
162 spin_unlock(&inode->i_lock);
1da177e4
LT
163}
164
a301b777
TM
165/* A writeback failed: mark the page as bad, and invalidate the page cache */
166static void nfs_set_pageerror(struct page *page)
167{
d56b4ddf 168 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
a301b777
TM
169}
170
d72ddcba
WAA
171/*
172 * nfs_page_group_search_locked
173 * @head - head request of page group
174 * @page_offset - offset into page
175 *
176 * Search page group with head @head to find a request that contains the
177 * page offset @page_offset.
178 *
179 * Returns a pointer to the first matching nfs request, or NULL if no
180 * match is found.
181 *
182 * Must be called with the page group lock held
183 */
184static struct nfs_page *
185nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
186{
187 struct nfs_page *req;
188
189 WARN_ON_ONCE(head != head->wb_head);
190 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags));
191
192 req = head;
193 do {
194 if (page_offset >= req->wb_pgbase &&
195 page_offset < (req->wb_pgbase + req->wb_bytes))
196 return req;
197
198 req = req->wb_this_page;
199 } while (req != head);
200
201 return NULL;
202}
203
204/*
205 * nfs_page_group_covers_page
206 * @head - head request of page group
207 *
208 * Return true if the page group with head @head covers the whole page,
209 * returns false otherwise
210 */
211static bool nfs_page_group_covers_page(struct nfs_page *req)
212{
213 struct nfs_page *tmp;
214 unsigned int pos = 0;
215 unsigned int len = nfs_page_length(req->wb_page);
216
fd2f3a06 217 nfs_page_group_lock(req, false);
d72ddcba
WAA
218
219 do {
220 tmp = nfs_page_group_search_locked(req->wb_head, pos);
221 if (tmp) {
222 /* no way this should happen */
223 WARN_ON_ONCE(tmp->wb_pgbase != pos);
224 pos += tmp->wb_bytes - (pos - tmp->wb_pgbase);
225 }
226 } while (tmp && pos < len);
227
228 nfs_page_group_unlock(req);
229 WARN_ON_ONCE(pos > len);
230 return pos == len;
231}
232
1da177e4
LT
233/* We can set the PG_uptodate flag if we see that a write request
234 * covers the full page.
235 */
d72ddcba 236static void nfs_mark_uptodate(struct nfs_page *req)
1da177e4 237{
d72ddcba 238 if (PageUptodate(req->wb_page))
1da177e4 239 return;
d72ddcba 240 if (!nfs_page_group_covers_page(req))
1da177e4 241 return;
d72ddcba 242 SetPageUptodate(req->wb_page);
1da177e4
LT
243}
244
1da177e4
LT
245static int wb_priority(struct writeback_control *wbc)
246{
e87b4c7a 247 int ret = 0;
1da177e4 248 if (wbc->for_reclaim)
c63c7b05 249 return FLUSH_HIGHPRI | FLUSH_STABLE;
e87b4c7a
N
250 if (wbc->sync_mode == WB_SYNC_ALL)
251 ret = FLUSH_COND_STABLE;
b17621fe 252 if (wbc->for_kupdate || wbc->for_background)
e87b4c7a
N
253 ret |= FLUSH_LOWPRI;
254 return ret;
1da177e4
LT
255}
256
89a09141
PZ
257/*
258 * NFS congestion control
259 */
260
261int nfs_congestion_kb;
262
263#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
264#define NFS_CONGESTION_OFF_THRESH \
265 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
266
deed85e7 267static void nfs_set_page_writeback(struct page *page)
89a09141 268{
deed85e7 269 struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
5a6d41b3
TM
270 int ret = test_set_page_writeback(page);
271
deed85e7 272 WARN_ON_ONCE(ret != 0);
89a09141 273
deed85e7
TM
274 if (atomic_long_inc_return(&nfss->writeback) >
275 NFS_CONGESTION_ON_THRESH) {
276 set_bdi_congested(&nfss->backing_dev_info,
277 BLK_RW_ASYNC);
89a09141
PZ
278 }
279}
280
20633f04 281static void nfs_end_page_writeback(struct nfs_page *req)
89a09141 282{
20633f04 283 struct inode *inode = page_file_mapping(req->wb_page)->host;
89a09141
PZ
284 struct nfs_server *nfss = NFS_SERVER(inode);
285
20633f04
WAA
286 if (!nfs_page_group_sync_on_bit(req, PG_WB_END))
287 return;
288
289 end_page_writeback(req->wb_page);
c4dc4bee 290 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
8aa7e847 291 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
89a09141
PZ
292}
293
d4581383
WAA
294
295/* nfs_page_group_clear_bits
296 * @req - an nfs request
297 * clears all page group related bits from @req
298 */
299static void
300nfs_page_group_clear_bits(struct nfs_page *req)
301{
302 clear_bit(PG_TEARDOWN, &req->wb_flags);
303 clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
304 clear_bit(PG_UPTODATE, &req->wb_flags);
305 clear_bit(PG_WB_END, &req->wb_flags);
306 clear_bit(PG_REMOVE, &req->wb_flags);
307}
308
309
310/*
311 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
312 *
313 * this is a helper function for nfs_lock_and_join_requests
314 *
315 * @inode - inode associated with request page group, must be holding inode lock
316 * @head - head request of page group, must be holding head lock
317 * @req - request that couldn't lock and needs to wait on the req bit lock
318 * @nonblock - if true, don't actually wait
319 *
320 * NOTE: this must be called holding page_group bit lock and inode spin lock
321 * and BOTH will be released before returning.
322 *
323 * returns 0 on success, < 0 on error.
324 */
325static int
326nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
327 struct nfs_page *req, bool nonblock)
328 __releases(&inode->i_lock)
329{
330 struct nfs_page *tmp;
331 int ret;
332
333 /* relinquish all the locks successfully grabbed this run */
334 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
335 nfs_unlock_request(tmp);
336
337 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
338
339 /* grab a ref on the request that will be waited on */
340 kref_get(&req->wb_kref);
341
342 nfs_page_group_unlock(head);
343 spin_unlock(&inode->i_lock);
344
345 /* release ref from nfs_page_find_head_request_locked */
346 nfs_release_request(head);
347
348 if (!nonblock)
349 ret = nfs_wait_on_request(req);
350 else
351 ret = -EAGAIN;
352 nfs_release_request(req);
353
354 return ret;
355}
356
357/*
358 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
359 *
360 * @destroy_list - request list (using wb_this_page) terminated by @old_head
361 * @old_head - the old head of the list
362 *
363 * All subrequests must be locked and removed from all lists, so at this point
364 * they are only "active" in this function, and possibly in nfs_wait_on_request
365 * with a reference held by some other context.
366 */
367static void
368nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
369 struct nfs_page *old_head)
370{
371 while (destroy_list) {
372 struct nfs_page *subreq = destroy_list;
373
374 destroy_list = (subreq->wb_this_page == old_head) ?
375 NULL : subreq->wb_this_page;
376
377 WARN_ON_ONCE(old_head != subreq->wb_head);
378
379 /* make sure old group is not used */
380 subreq->wb_head = subreq;
381 subreq->wb_this_page = subreq;
382
d4581383
WAA
383 /* subreq is now totally disconnected from page group or any
384 * write / commit lists. last chance to wake any waiters */
385 nfs_unlock_request(subreq);
386
387 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
388 /* release ref on old head request */
389 nfs_release_request(old_head);
390
391 nfs_page_group_clear_bits(subreq);
392
393 /* release the PG_INODE_REF reference */
394 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
395 nfs_release_request(subreq);
396 else
397 WARN_ON_ONCE(1);
398 } else {
399 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
400 /* zombie requests have already released the last
401 * reference and were waiting on the rest of the
402 * group to complete. Since it's no longer part of a
403 * group, simply free the request */
404 nfs_page_group_clear_bits(subreq);
405 nfs_free_request(subreq);
406 }
407 }
408}
409
410/*
411 * nfs_lock_and_join_requests - join all subreqs to the head req and return
412 * a locked reference, cancelling any pending
413 * operations for this page.
414 *
415 * @page - the page used to lookup the "page group" of nfs_page structures
416 * @nonblock - if true, don't block waiting for request locks
417 *
418 * This function joins all sub requests to the head request by first
419 * locking all requests in the group, cancelling any pending operations
420 * and finally updating the head request to cover the whole range covered by
421 * the (former) group. All subrequests are removed from any write or commit
422 * lists, unlinked from the group and destroyed.
423 *
424 * Returns a locked, referenced pointer to the head request - which after
425 * this call is guaranteed to be the only request associated with the page.
426 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
427 * error was encountered.
428 */
429static struct nfs_page *
430nfs_lock_and_join_requests(struct page *page, bool nonblock)
e261f51f 431{
d56b4ddf 432 struct inode *inode = page_file_mapping(page)->host;
d4581383
WAA
433 struct nfs_page *head, *subreq;
434 struct nfs_page *destroy_list = NULL;
435 unsigned int total_bytes;
e261f51f
TM
436 int ret;
437
d4581383
WAA
438try_again:
439 total_bytes = 0;
440
441 WARN_ON_ONCE(destroy_list);
442
587142f8 443 spin_lock(&inode->i_lock);
d4581383
WAA
444
445 /*
446 * A reference is taken only on the head request which acts as a
447 * reference to the whole page group - the group will not be destroyed
448 * until the head reference is released.
449 */
450 head = nfs_page_find_head_request_locked(NFS_I(inode), page);
451
452 if (!head) {
587142f8 453 spin_unlock(&inode->i_lock);
d4581383
WAA
454 return NULL;
455 }
456
7c3af975
WAA
457 /* holding inode lock, so always make a non-blocking call to try the
458 * page group lock */
fd2f3a06 459 ret = nfs_page_group_lock(head, true);
94970014
WAA
460 if (ret < 0) {
461 spin_unlock(&inode->i_lock);
7c3af975
WAA
462
463 if (!nonblock && ret == -EAGAIN) {
464 nfs_page_group_lock_wait(head);
465 nfs_release_request(head);
466 goto try_again;
467 }
468
94970014 469 nfs_release_request(head);
e7029206 470 return ERR_PTR(ret);
94970014 471 }
7c3af975
WAA
472
473 /* lock each request in the page group */
d4581383
WAA
474 subreq = head;
475 do {
476 /*
477 * Subrequests are always contiguous, non overlapping
309a1d65 478 * and in order - but may be repeated (mirrored writes).
d4581383 479 */
309a1d65
WAA
480 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
481 /* keep track of how many bytes this group covers */
482 total_bytes += subreq->wb_bytes;
483 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
484 ((subreq->wb_offset + subreq->wb_bytes) >
485 (head->wb_offset + total_bytes)))) {
486 nfs_page_group_unlock(head);
487 spin_unlock(&inode->i_lock);
488 return ERR_PTR(-EIO);
489 }
d4581383
WAA
490
491 if (!nfs_lock_request(subreq)) {
492 /* releases page group bit lock and
493 * inode spin lock and all references */
494 ret = nfs_unroll_locks_and_wait(inode, head,
495 subreq, nonblock);
496
497 if (ret == 0)
498 goto try_again;
499
074cc1de 500 return ERR_PTR(ret);
d4581383
WAA
501 }
502
503 subreq = subreq->wb_this_page;
504 } while (subreq != head);
505
506 /* Now that all requests are locked, make sure they aren't on any list.
507 * Commit list removal accounting is done after locks are dropped */
508 subreq = head;
509 do {
411a99ad 510 nfs_clear_request_commit(subreq);
d4581383
WAA
511 subreq = subreq->wb_this_page;
512 } while (subreq != head);
513
514 /* unlink subrequests from head, destroy them later */
515 if (head->wb_this_page != head) {
516 /* destroy list will be terminated by head */
517 destroy_list = head->wb_this_page;
518 head->wb_this_page = head;
519
520 /* change head request to cover whole range that
521 * the former page group covered */
522 head->wb_bytes = total_bytes;
e261f51f 523 }
d4581383
WAA
524
525 /*
526 * prepare head request to be added to new pgio descriptor
527 */
528 nfs_page_group_clear_bits(head);
529
530 /*
531 * some part of the group was still on the inode list - otherwise
532 * the group wouldn't be involved in async write.
533 * grab a reference for the head request, iff it needs one.
534 */
535 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
536 kref_get(&head->wb_kref);
537
538 nfs_page_group_unlock(head);
539
411a99ad 540 /* drop lock to clean uprequests on destroy list */
587142f8 541 spin_unlock(&inode->i_lock);
d4581383
WAA
542
543 nfs_destroy_unlinked_subrequests(destroy_list, head);
544
d4581383
WAA
545 /* still holds ref on head from nfs_page_find_head_request_locked
546 * and still has lock on head from lock loop */
547 return head;
074cc1de
TM
548}
549
0bcbf039
PT
550static void nfs_write_error_remove_page(struct nfs_page *req)
551{
552 nfs_unlock_request(req);
553 nfs_end_page_writeback(req);
554 nfs_release_request(req);
555 generic_error_remove_page(page_file_mapping(req->wb_page),
556 req->wb_page);
557}
558
074cc1de
TM
559/*
560 * Find an associated nfs write request, and prepare to flush it out
561 * May return an error if the user signalled nfs_wait_on_request().
562 */
563static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
d6c843b9
PT
564 struct page *page, bool nonblock,
565 bool launder)
074cc1de
TM
566{
567 struct nfs_page *req;
568 int ret = 0;
569
d4581383 570 req = nfs_lock_and_join_requests(page, nonblock);
074cc1de
TM
571 if (!req)
572 goto out;
573 ret = PTR_ERR(req);
574 if (IS_ERR(req))
575 goto out;
576
deed85e7
TM
577 nfs_set_page_writeback(page);
578 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
074cc1de 579
deed85e7 580 ret = 0;
f8512ad0 581 if (!nfs_pageio_add_request(pgio, req)) {
074cc1de 582 ret = pgio->pg_error;
0bcbf039 583 /*
d6c843b9
PT
584 * Remove the problematic req upon fatal errors
585 * in launder case, while other dirty pages can
586 * still be around until they get flushed.
0bcbf039
PT
587 */
588 if (nfs_error_is_fatal(ret)) {
589 nfs_context_set_write_error(req->wb_context, ret);
d6c843b9
PT
590 if (launder) {
591 nfs_write_error_remove_page(req);
592 goto out;
593 }
0bcbf039 594 }
d6c843b9
PT
595 nfs_redirty_request(req);
596 ret = -EAGAIN;
40f90271
TM
597 } else
598 nfs_add_stats(page_file_mapping(page)->host,
599 NFSIOS_WRITEPAGES, 1);
074cc1de
TM
600out:
601 return ret;
e261f51f
TM
602}
603
d6c843b9
PT
604static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
605 struct nfs_pageio_descriptor *pgio, bool launder)
1da177e4 606{
cfb506e1 607 int ret;
1da177e4 608
d56b4ddf 609 nfs_pageio_cond_complete(pgio, page_file_index(page));
d6c843b9
PT
610 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
611 launder);
cfb506e1
TM
612 if (ret == -EAGAIN) {
613 redirty_page_for_writepage(wbc, page);
614 ret = 0;
615 }
616 return ret;
f758c885 617}
7fe7f848 618
f758c885
TM
619/*
620 * Write an mmapped page to the server.
621 */
d6c843b9
PT
622static int nfs_writepage_locked(struct page *page,
623 struct writeback_control *wbc,
624 bool launder)
f758c885
TM
625{
626 struct nfs_pageio_descriptor pgio;
40f90271 627 struct inode *inode = page_file_mapping(page)->host;
f758c885 628 int err;
49a70f27 629
40f90271
TM
630 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
631 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
a20c93e3 632 false, &nfs_async_write_completion_ops);
d6c843b9 633 err = nfs_do_writepage(page, wbc, &pgio, launder);
f758c885
TM
634 nfs_pageio_complete(&pgio);
635 if (err < 0)
636 return err;
637 if (pgio.pg_error < 0)
638 return pgio.pg_error;
639 return 0;
4d770ccf
TM
640}
641
642int nfs_writepage(struct page *page, struct writeback_control *wbc)
643{
f758c885 644 int ret;
4d770ccf 645
d6c843b9 646 ret = nfs_writepage_locked(page, wbc, false);
1da177e4 647 unlock_page(page);
f758c885
TM
648 return ret;
649}
650
651static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
652{
653 int ret;
654
d6c843b9 655 ret = nfs_do_writepage(page, wbc, data, false);
f758c885
TM
656 unlock_page(page);
657 return ret;
1da177e4
LT
658}
659
1da177e4
LT
660int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
661{
1da177e4 662 struct inode *inode = mapping->host;
72cb77f4 663 unsigned long *bitlock = &NFS_I(inode)->flags;
c63c7b05 664 struct nfs_pageio_descriptor pgio;
1da177e4
LT
665 int err;
666
72cb77f4 667 /* Stop dirtying of new pages while we sync */
74316201 668 err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING,
72cb77f4
TM
669 nfs_wait_bit_killable, TASK_KILLABLE);
670 if (err)
671 goto out_err;
672
91d5b470
CL
673 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
674
a20c93e3
CH
675 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
676 &nfs_async_write_completion_ops);
f758c885 677 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
c63c7b05 678 nfs_pageio_complete(&pgio);
72cb77f4
TM
679
680 clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
4e857c58 681 smp_mb__after_atomic();
72cb77f4
TM
682 wake_up_bit(bitlock, NFS_INO_FLUSHING);
683
f758c885 684 if (err < 0)
72cb77f4
TM
685 goto out_err;
686 err = pgio.pg_error;
687 if (err < 0)
688 goto out_err;
c63c7b05 689 return 0;
72cb77f4
TM
690out_err:
691 return err;
1da177e4
LT
692}
693
694/*
695 * Insert a write request into an inode
696 */
d6d6dc7c 697static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
1da177e4
LT
698{
699 struct nfs_inode *nfsi = NFS_I(inode);
e7d39069 700
2bfc6e56
WAA
701 WARN_ON_ONCE(req->wb_this_page != req);
702
e7d39069 703 /* Lock the request! */
7ad84aa9 704 nfs_lock_request(req);
e7d39069
TM
705
706 spin_lock(&inode->i_lock);
cb1410c7
WAA
707 if (!nfsi->nrequests &&
708 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
a9a4a87a 709 inode->i_version++;
29418aa4
MG
710 /*
711 * Swap-space should not get truncated. Hence no need to plug the race
712 * with invalidate/truncate.
713 */
714 if (likely(!PageSwapCache(req->wb_page))) {
715 set_bit(PG_MAPPED, &req->wb_flags);
716 SetPagePrivate(req->wb_page);
717 set_page_private(req->wb_page, (unsigned long)req);
718 }
cb1410c7 719 nfsi->nrequests++;
17089a29 720 /* this a head request for a page group - mark it as having an
cb1410c7
WAA
721 * extra reference so sub groups can follow suit.
722 * This flag also informs pgio layer when to bump nrequests when
723 * adding subrequests. */
17089a29 724 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
c03b4024 725 kref_get(&req->wb_kref);
e7d39069 726 spin_unlock(&inode->i_lock);
1da177e4
LT
727}
728
729/*
89a09141 730 * Remove a write request from an inode
1da177e4
LT
731 */
732static void nfs_inode_remove_request(struct nfs_page *req)
733{
2b0143b5 734 struct inode *inode = d_inode(req->wb_context->dentry);
1da177e4 735 struct nfs_inode *nfsi = NFS_I(inode);
20633f04 736 struct nfs_page *head;
1da177e4 737
20633f04
WAA
738 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
739 head = req->wb_head;
740
741 spin_lock(&inode->i_lock);
742 if (likely(!PageSwapCache(head->wb_page))) {
743 set_page_private(head->wb_page, 0);
744 ClearPagePrivate(head->wb_page);
95905446
N
745 smp_mb__after_atomic();
746 wake_up_page(head->wb_page, PG_private);
20633f04
WAA
747 clear_bit(PG_MAPPED, &head->wb_flags);
748 }
cb1410c7
WAA
749 nfsi->nrequests--;
750 spin_unlock(&inode->i_lock);
751 } else {
752 spin_lock(&inode->i_lock);
753 nfsi->nrequests--;
20633f04 754 spin_unlock(&inode->i_lock);
29418aa4 755 }
17089a29
WAA
756
757 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
758 nfs_release_request(req);
1da177e4
LT
759}
760
61822ab5 761static void
6d884e8f 762nfs_mark_request_dirty(struct nfs_page *req)
61822ab5 763{
61822ab5
TM
764 __set_page_dirty_nobuffers(req->wb_page);
765}
766
3a3908c8
TM
767/*
768 * nfs_page_search_commits_for_head_request_locked
769 *
770 * Search through commit lists on @inode for the head request for @page.
771 * Must be called while holding the inode (which is cinfo) lock.
772 *
773 * Returns the head request if found, or NULL if not found.
774 */
775static struct nfs_page *
776nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
777 struct page *page)
778{
779 struct nfs_page *freq, *t;
780 struct nfs_commit_info cinfo;
781 struct inode *inode = &nfsi->vfs_inode;
782
783 nfs_init_cinfo_from_inode(&cinfo, inode);
784
785 /* search through pnfs commit lists */
786 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
787 if (freq)
788 return freq->wb_head;
789
790 /* Linearly search the commit list for the correct request */
791 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
792 if (freq->wb_page == page)
793 return freq->wb_head;
794 }
795
796 return NULL;
797}
798
86d80f97
TM
799/**
800 * nfs_request_add_commit_list_locked - add request to a commit list
801 * @req: pointer to a struct nfs_page
802 * @dst: commit list head
803 * @cinfo: holds list lock and accounting info
804 *
805 * This sets the PG_CLEAN bit, updates the cinfo count of
806 * number of outstanding requests requiring a commit as well as
807 * the MM page stats.
808 *
809 * The caller must hold the cinfo->lock, and the nfs_page lock.
810 */
811void
812nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
813 struct nfs_commit_info *cinfo)
814{
815 set_bit(PG_CLEAN, &req->wb_flags);
816 nfs_list_add_request(req, dst);
817 cinfo->mds->ncommit++;
818}
819EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
820
8dd37758
TM
821/**
822 * nfs_request_add_commit_list - add request to a commit list
823 * @req: pointer to a struct nfs_page
ea2cf228
FI
824 * @dst: commit list head
825 * @cinfo: holds list lock and accounting info
8dd37758 826 *
ea2cf228 827 * This sets the PG_CLEAN bit, updates the cinfo count of
8dd37758
TM
828 * number of outstanding requests requiring a commit as well as
829 * the MM page stats.
830 *
ea2cf228 831 * The caller must _not_ hold the cinfo->lock, but must be
8dd37758 832 * holding the nfs_page lock.
1da177e4 833 */
8dd37758 834void
ea2cf228
FI
835nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
836 struct nfs_commit_info *cinfo)
1da177e4 837{
ea2cf228 838 spin_lock(cinfo->lock);
86d80f97 839 nfs_request_add_commit_list_locked(req, dst, cinfo);
ea2cf228 840 spin_unlock(cinfo->lock);
86d80f97 841 nfs_mark_page_unstable(req->wb_page, cinfo);
1da177e4 842}
8dd37758
TM
843EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
844
845/**
846 * nfs_request_remove_commit_list - Remove request from a commit list
847 * @req: pointer to a nfs_page
ea2cf228 848 * @cinfo: holds list lock and accounting info
8dd37758 849 *
ea2cf228 850 * This clears the PG_CLEAN bit, and updates the cinfo's count of
8dd37758
TM
851 * number of outstanding requests requiring a commit
852 * It does not update the MM page stats.
853 *
ea2cf228 854 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
8dd37758
TM
855 */
856void
ea2cf228
FI
857nfs_request_remove_commit_list(struct nfs_page *req,
858 struct nfs_commit_info *cinfo)
8dd37758 859{
8dd37758
TM
860 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
861 return;
862 nfs_list_remove_request(req);
ea2cf228 863 cinfo->mds->ncommit--;
8dd37758
TM
864}
865EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
866
ea2cf228
FI
867static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
868 struct inode *inode)
869{
870 cinfo->lock = &inode->i_lock;
871 cinfo->mds = &NFS_I(inode)->commit_info;
872 cinfo->ds = pnfs_get_ds_info(inode);
b359f9d0 873 cinfo->dreq = NULL;
f453a54a 874 cinfo->completion_ops = &nfs_commit_completion_ops;
ea2cf228
FI
875}
876
877void nfs_init_cinfo(struct nfs_commit_info *cinfo,
878 struct inode *inode,
879 struct nfs_direct_req *dreq)
880{
1763da12
FI
881 if (dreq)
882 nfs_init_cinfo_from_dreq(cinfo, dreq);
883 else
884 nfs_init_cinfo_from_inode(cinfo, inode);
ea2cf228
FI
885}
886EXPORT_SYMBOL_GPL(nfs_init_cinfo);
8dd37758
TM
887
888/*
889 * Add a request to the inode's commit list.
890 */
1763da12 891void
ea2cf228 892nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
b57ff130 893 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
8dd37758 894{
b57ff130 895 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
8dd37758 896 return;
ea2cf228 897 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
8dd37758 898}
8e821cad 899
d6d6dc7c
FI
900static void
901nfs_clear_page_commit(struct page *page)
902{
903 dec_zone_page_state(page, NR_UNSTABLE_NFS);
93f78d88
TH
904 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
905 WB_RECLAIMABLE);
d6d6dc7c
FI
906}
907
411a99ad 908/* Called holding inode (/cinfo) lock */
8dd37758 909static void
e468bae9
TM
910nfs_clear_request_commit(struct nfs_page *req)
911{
8dd37758 912 if (test_bit(PG_CLEAN, &req->wb_flags)) {
2b0143b5 913 struct inode *inode = d_inode(req->wb_context->dentry);
ea2cf228 914 struct nfs_commit_info cinfo;
e468bae9 915
ea2cf228
FI
916 nfs_init_cinfo_from_inode(&cinfo, inode);
917 if (!pnfs_clear_request_commit(req, &cinfo)) {
ea2cf228 918 nfs_request_remove_commit_list(req, &cinfo);
8dd37758 919 }
d6d6dc7c 920 nfs_clear_page_commit(req->wb_page);
e468bae9 921 }
e468bae9
TM
922}
923
d45f60c6 924int nfs_write_need_commit(struct nfs_pgio_header *hdr)
8e821cad 925{
c65e6254 926 if (hdr->verf.committed == NFS_DATA_SYNC)
d45f60c6 927 return hdr->lseg == NULL;
c65e6254 928 return hdr->verf.committed != NFS_FILE_SYNC;
8e821cad
TM
929}
930
061ae2ed 931static void nfs_write_completion(struct nfs_pgio_header *hdr)
8e821cad 932{
ea2cf228 933 struct nfs_commit_info cinfo;
6c75dc0d
FI
934 unsigned long bytes = 0;
935
936 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
937 goto out;
ea2cf228 938 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
6c75dc0d
FI
939 while (!list_empty(&hdr->pages)) {
940 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
6c75dc0d
FI
941
942 bytes += req->wb_bytes;
943 nfs_list_remove_request(req);
944 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
945 (hdr->good_bytes < bytes)) {
d1182b33 946 nfs_set_pageerror(req->wb_page);
6c75dc0d
FI
947 nfs_context_set_write_error(req->wb_context, hdr->error);
948 goto remove_req;
949 }
c65e6254 950 if (nfs_write_need_commit(hdr)) {
f79d06f5 951 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
b57ff130 952 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
a7d42ddb 953 hdr->pgio_mirror_idx);
6c75dc0d
FI
954 goto next;
955 }
956remove_req:
957 nfs_inode_remove_request(req);
958next:
1d1afcbc 959 nfs_unlock_request(req);
20633f04 960 nfs_end_page_writeback(req);
3aff4ebb 961 nfs_release_request(req);
6c75dc0d
FI
962 }
963out:
964 hdr->release(hdr);
8e821cad 965}
1da177e4 966
ce59515c 967unsigned long
ea2cf228 968nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
fb8a1f11 969{
ea2cf228 970 return cinfo->mds->ncommit;
d6d6dc7c
FI
971}
972
ea2cf228 973/* cinfo->lock held by caller */
1763da12 974int
ea2cf228
FI
975nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
976 struct nfs_commit_info *cinfo, int max)
d6d6dc7c
FI
977{
978 struct nfs_page *req, *tmp;
979 int ret = 0;
980
981 list_for_each_entry_safe(req, tmp, src, wb_list) {
8dd37758
TM
982 if (!nfs_lock_request(req))
983 continue;
7ad84aa9 984 kref_get(&req->wb_kref);
ea2cf228 985 if (cond_resched_lock(cinfo->lock))
3b3be88d 986 list_safe_reset_next(req, tmp, wb_list);
ea2cf228 987 nfs_request_remove_commit_list(req, cinfo);
8dd37758
TM
988 nfs_list_add_request(req, dst);
989 ret++;
1763da12 990 if ((ret == max) && !cinfo->dreq)
8dd37758 991 break;
d6d6dc7c
FI
992 }
993 return ret;
fb8a1f11
TM
994}
995
1da177e4
LT
996/*
997 * nfs_scan_commit - Scan an inode for commit requests
998 * @inode: NFS inode to scan
ea2cf228
FI
999 * @dst: mds destination list
1000 * @cinfo: mds and ds lists of reqs ready to commit
1da177e4
LT
1001 *
1002 * Moves requests from the inode's 'commit' request list.
1003 * The requests are *not* checked to ensure that they form a contiguous set.
1004 */
1763da12 1005int
ea2cf228
FI
1006nfs_scan_commit(struct inode *inode, struct list_head *dst,
1007 struct nfs_commit_info *cinfo)
1da177e4 1008{
d6d6dc7c 1009 int ret = 0;
fb8a1f11 1010
ea2cf228
FI
1011 spin_lock(cinfo->lock);
1012 if (cinfo->mds->ncommit > 0) {
8dd37758 1013 const int max = INT_MAX;
d6d6dc7c 1014
ea2cf228
FI
1015 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1016 cinfo, max);
1017 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
d6d6dc7c 1018 }
ea2cf228 1019 spin_unlock(cinfo->lock);
ff778d02 1020 return ret;
1da177e4 1021}
d6d6dc7c 1022
1da177e4 1023/*
e7d39069
TM
1024 * Search for an existing write request, and attempt to update
1025 * it to reflect a new dirty region on a given page.
1da177e4 1026 *
e7d39069
TM
1027 * If the attempt fails, then the existing request is flushed out
1028 * to disk.
1da177e4 1029 */
e7d39069
TM
1030static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1031 struct page *page,
1032 unsigned int offset,
1033 unsigned int bytes)
1da177e4 1034{
e7d39069
TM
1035 struct nfs_page *req;
1036 unsigned int rqend;
1037 unsigned int end;
1038 int error;
1039
1040 if (!PagePrivate(page))
1041 return NULL;
1da177e4
LT
1042
1043 end = offset + bytes;
e7d39069 1044 spin_lock(&inode->i_lock);
1da177e4 1045
1da177e4 1046 for (;;) {
84d3a9a9 1047 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
e7d39069
TM
1048 if (req == NULL)
1049 goto out_unlock;
1050
2bfc6e56
WAA
1051 /* should be handled by nfs_flush_incompatible */
1052 WARN_ON_ONCE(req->wb_head != req);
1053 WARN_ON_ONCE(req->wb_this_page != req);
1054
e7d39069
TM
1055 rqend = req->wb_offset + req->wb_bytes;
1056 /*
1057 * Tell the caller to flush out the request if
1058 * the offsets are non-contiguous.
1059 * Note: nfs_flush_incompatible() will already
1060 * have flushed out requests having wrong owners.
1061 */
e468bae9 1062 if (offset > rqend
e7d39069
TM
1063 || end < req->wb_offset)
1064 goto out_flushme;
1065
7ad84aa9 1066 if (nfs_lock_request(req))
1da177e4 1067 break;
1da177e4 1068
e7d39069 1069 /* The request is locked, so wait and then retry */
587142f8 1070 spin_unlock(&inode->i_lock);
e7d39069
TM
1071 error = nfs_wait_on_request(req);
1072 nfs_release_request(req);
1073 if (error != 0)
1074 goto out_err;
1075 spin_lock(&inode->i_lock);
1da177e4
LT
1076 }
1077
1078 /* Okay, the request matches. Update the region */
1079 if (offset < req->wb_offset) {
1080 req->wb_offset = offset;
1081 req->wb_pgbase = offset;
1da177e4 1082 }
1da177e4
LT
1083 if (end > rqend)
1084 req->wb_bytes = end - req->wb_offset;
e7d39069
TM
1085 else
1086 req->wb_bytes = rqend - req->wb_offset;
1087out_unlock:
ca138f36
FI
1088 if (req)
1089 nfs_clear_request_commit(req);
411a99ad 1090 spin_unlock(&inode->i_lock);
e7d39069
TM
1091 return req;
1092out_flushme:
1093 spin_unlock(&inode->i_lock);
1094 nfs_release_request(req);
1095 error = nfs_wb_page(inode, page);
1096out_err:
1097 return ERR_PTR(error);
1098}
1099
1100/*
1101 * Try to update an existing write request, or create one if there is none.
1102 *
1103 * Note: Should always be called with the Page Lock held to prevent races
1104 * if we have to add a new request. Also assumes that the caller has
1105 * already called nfs_flush_incompatible() if necessary.
1106 */
1107static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1108 struct page *page, unsigned int offset, unsigned int bytes)
1109{
d56b4ddf 1110 struct inode *inode = page_file_mapping(page)->host;
e7d39069 1111 struct nfs_page *req;
1da177e4 1112
e7d39069
TM
1113 req = nfs_try_to_update_request(inode, page, offset, bytes);
1114 if (req != NULL)
1115 goto out;
2bfc6e56 1116 req = nfs_create_request(ctx, page, NULL, offset, bytes);
e7d39069
TM
1117 if (IS_ERR(req))
1118 goto out;
d6d6dc7c 1119 nfs_inode_add_request(inode, req);
efc91ed0 1120out:
61e930a9 1121 return req;
1da177e4
LT
1122}
1123
e7d39069
TM
1124static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1125 unsigned int offset, unsigned int count)
1126{
1127 struct nfs_page *req;
1128
1129 req = nfs_setup_write_request(ctx, page, offset, count);
1130 if (IS_ERR(req))
1131 return PTR_ERR(req);
1132 /* Update file length */
1133 nfs_grow_file(page, offset, count);
d72ddcba 1134 nfs_mark_uptodate(req);
a6305ddb 1135 nfs_mark_request_dirty(req);
1d1afcbc 1136 nfs_unlock_and_release_request(req);
e7d39069
TM
1137 return 0;
1138}
1139
1da177e4
LT
1140int nfs_flush_incompatible(struct file *file, struct page *page)
1141{
cd3758e3 1142 struct nfs_open_context *ctx = nfs_file_open_context(file);
2a369153 1143 struct nfs_lock_context *l_ctx;
bd61e0a9 1144 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1da177e4 1145 struct nfs_page *req;
1a54533e 1146 int do_flush, status;
1da177e4
LT
1147 /*
1148 * Look for a request corresponding to this page. If there
1149 * is one, and it belongs to another file, we flush it out
1150 * before we try to copy anything into the page. Do this
1151 * due to the lack of an ACCESS-type call in NFSv2.
1152 * Also do the same if we find a request from an existing
1153 * dropped page.
1154 */
1a54533e 1155 do {
84d3a9a9 1156 req = nfs_page_find_head_request(page);
1a54533e
TM
1157 if (req == NULL)
1158 return 0;
2a369153 1159 l_ctx = req->wb_lock_context;
138a2935
TM
1160 do_flush = req->wb_page != page ||
1161 !nfs_match_open_context(req->wb_context, ctx);
2bfc6e56
WAA
1162 /* for now, flush if more than 1 request in page_group */
1163 do_flush |= req->wb_this_page != req;
bd61e0a9
JL
1164 if (l_ctx && flctx &&
1165 !(list_empty_careful(&flctx->flc_posix) &&
1166 list_empty_careful(&flctx->flc_flock))) {
5263e31e
JL
1167 do_flush |= l_ctx->lockowner.l_owner != current->files
1168 || l_ctx->lockowner.l_pid != current->tgid;
1169 }
1da177e4 1170 nfs_release_request(req);
1a54533e
TM
1171 if (!do_flush)
1172 return 0;
d56b4ddf 1173 status = nfs_wb_page(page_file_mapping(page)->host, page);
1a54533e
TM
1174 } while (status == 0);
1175 return status;
1da177e4
LT
1176}
1177
dc24826b
AA
1178/*
1179 * Avoid buffered writes when a open context credential's key would
1180 * expire soon.
1181 *
1182 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1183 *
1184 * Return 0 and set a credential flag which triggers the inode to flush
1185 * and performs NFS_FILE_SYNC writes if the key will expired within
1186 * RPC_KEY_EXPIRE_TIMEO.
1187 */
1188int
1189nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1190{
1191 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1192 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1193
1194 return rpcauth_key_timeout_notify(auth, ctx->cred);
1195}
1196
1197/*
1198 * Test if the open context credential key is marked to expire soon.
1199 */
1200bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
1201{
1202 return rpcauth_cred_key_to_expire(ctx->cred);
1203}
1204
5d47a356
TM
1205/*
1206 * If the page cache is marked as unsafe or invalid, then we can't rely on
1207 * the PageUptodate() flag. In this case, we will need to turn off
1208 * write optimisations that depend on the page contents being correct.
1209 */
8d197a56 1210static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
5d47a356 1211{
d529ef83
JL
1212 struct nfs_inode *nfsi = NFS_I(inode);
1213
8d197a56
TM
1214 if (nfs_have_delegated_attributes(inode))
1215 goto out;
18dd78c4 1216 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
d529ef83 1217 return false;
4db72b40 1218 smp_rmb();
d529ef83 1219 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
8d197a56
TM
1220 return false;
1221out:
18dd78c4
SM
1222 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1223 return false;
8d197a56 1224 return PageUptodate(page) != 0;
5d47a356
TM
1225}
1226
5263e31e
JL
1227static bool
1228is_whole_file_wrlock(struct file_lock *fl)
1229{
1230 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1231 fl->fl_type == F_WRLCK;
1232}
1233
c7559663
SM
1234/* If we know the page is up to date, and we're not using byte range locks (or
1235 * if we have the whole file locked for writing), it may be more efficient to
1236 * extend the write to cover the entire page in order to avoid fragmentation
1237 * inefficiencies.
1238 *
263b4509
SM
1239 * If the file is opened for synchronous writes then we can just skip the rest
1240 * of the checks.
c7559663
SM
1241 */
1242static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1243{
5263e31e
JL
1244 int ret;
1245 struct file_lock_context *flctx = inode->i_flctx;
1246 struct file_lock *fl;
1247
c7559663
SM
1248 if (file->f_flags & O_DSYNC)
1249 return 0;
263b4509
SM
1250 if (!nfs_write_pageuptodate(page, inode))
1251 return 0;
c7559663
SM
1252 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1253 return 1;
bd61e0a9
JL
1254 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1255 list_empty_careful(&flctx->flc_posix)))
8fa4592a 1256 return 1;
5263e31e
JL
1257
1258 /* Check to see if there are whole file write locks */
5263e31e 1259 ret = 0;
6109c850 1260 spin_lock(&flctx->flc_lock);
bd61e0a9
JL
1261 if (!list_empty(&flctx->flc_posix)) {
1262 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1263 fl_list);
1264 if (is_whole_file_wrlock(fl))
1265 ret = 1;
1266 } else if (!list_empty(&flctx->flc_flock)) {
5263e31e
JL
1267 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1268 fl_list);
1269 if (fl->fl_type == F_WRLCK)
1270 ret = 1;
1271 }
6109c850 1272 spin_unlock(&flctx->flc_lock);
5263e31e 1273 return ret;
c7559663
SM
1274}
1275
1da177e4
LT
1276/*
1277 * Update and possibly write a cached page of an NFS file.
1278 *
1279 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1280 * things with a page scheduled for an RPC call (e.g. invalidate it).
1281 */
1282int nfs_updatepage(struct file *file, struct page *page,
1283 unsigned int offset, unsigned int count)
1284{
cd3758e3 1285 struct nfs_open_context *ctx = nfs_file_open_context(file);
d56b4ddf 1286 struct inode *inode = page_file_mapping(page)->host;
1da177e4
LT
1287 int status = 0;
1288
91d5b470
CL
1289 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1290
6de1472f
AV
1291 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1292 file, count, (long long)(page_file_offset(page) + offset));
1da177e4 1293
c7559663 1294 if (nfs_can_extend_write(file, page, inode)) {
49a70f27 1295 count = max(count + offset, nfs_page_length(page));
1da177e4 1296 offset = 0;
1da177e4
LT
1297 }
1298
e21195a7 1299 status = nfs_writepage_setup(ctx, page, offset, count);
03fa9e84
TM
1300 if (status < 0)
1301 nfs_set_pageerror(page);
59b7c05f
TM
1302 else
1303 __set_page_dirty_nobuffers(page);
1da177e4 1304
48186c7d 1305 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1da177e4 1306 status, (long long)i_size_read(inode));
1da177e4
LT
1307 return status;
1308}
1309
3ff7576d 1310static int flush_task_priority(int how)
1da177e4
LT
1311{
1312 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1313 case FLUSH_HIGHPRI:
1314 return RPC_PRIORITY_HIGH;
1315 case FLUSH_LOWPRI:
1316 return RPC_PRIORITY_LOW;
1317 }
1318 return RPC_PRIORITY_NORMAL;
1319}
1320
d45f60c6
WAA
1321static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1322 struct rpc_message *msg,
abde71f4 1323 const struct nfs_rpc_ops *rpc_ops,
1ed26f33 1324 struct rpc_task_setup *task_setup_data, int how)
1da177e4 1325{
3ff7576d 1326 int priority = flush_task_priority(how);
d138d5d1 1327
1ed26f33 1328 task_setup_data->priority = priority;
abde71f4 1329 rpc_ops->write_setup(hdr, msg);
d138d5d1 1330
abde71f4 1331 nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
d45f60c6 1332 &task_setup_data->rpc_client, msg, hdr);
275acaaf
TM
1333}
1334
6d884e8f
F
1335/* If a nfs_flush_* function fails, it should remove reqs from @head and
1336 * call this on each, which will prepare them to be retried on next
1337 * writeback using standard nfs.
1338 */
1339static void nfs_redirty_request(struct nfs_page *req)
1340{
1341 nfs_mark_request_dirty(req);
c7070113 1342 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1d1afcbc 1343 nfs_unlock_request(req);
20633f04 1344 nfs_end_page_writeback(req);
3aff4ebb 1345 nfs_release_request(req);
6d884e8f
F
1346}
1347
061ae2ed 1348static void nfs_async_write_error(struct list_head *head)
6c75dc0d
FI
1349{
1350 struct nfs_page *req;
1351
1352 while (!list_empty(head)) {
1353 req = nfs_list_entry(head->next);
1354 nfs_list_remove_request(req);
1355 nfs_redirty_request(req);
1356 }
1357}
1358
dc602dd7
TM
1359static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1360{
1361 nfs_async_write_error(&hdr->pages);
1362}
1363
061ae2ed
FI
1364static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1365 .error_cleanup = nfs_async_write_error,
1366 .completion = nfs_write_completion,
dc602dd7 1367 .reschedule_io = nfs_async_write_reschedule_io,
061ae2ed
FI
1368};
1369
57208fa7 1370void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
a20c93e3 1371 struct inode *inode, int ioflags, bool force_mds,
061ae2ed 1372 const struct nfs_pgio_completion_ops *compl_ops)
1da177e4 1373{
a20c93e3 1374 struct nfs_server *server = NFS_SERVER(inode);
41d8d5b7 1375 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
a20c93e3
CH
1376
1377#ifdef CONFIG_NFS_V4_1
1378 if (server->pnfs_curr_ld && !force_mds)
1379 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1380#endif
4a0de55c
AS
1381 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1382 server->wsize, ioflags);
1751c363 1383}
ddda8e0a 1384EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1da177e4 1385
dce81290
TM
1386void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1387{
a7d42ddb
WAA
1388 struct nfs_pgio_mirror *mirror;
1389
6f29b9bb
KM
1390 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1391 pgio->pg_ops->pg_cleanup(pgio);
1392
41d8d5b7 1393 pgio->pg_ops = &nfs_pgio_rw_ops;
a7d42ddb
WAA
1394
1395 nfs_pageio_stop_mirroring(pgio);
1396
1397 mirror = &pgio->pg_mirrors[0];
1398 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
dce81290 1399}
1f945357 1400EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
dce81290 1401
1da177e4 1402
0b7c0153
FI
1403void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1404{
1405 struct nfs_commit_data *data = calldata;
1406
1407 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1408}
1409
1f2edbe3
TM
1410/*
1411 * Special version of should_remove_suid() that ignores capabilities.
1412 */
1413static int nfs_should_remove_suid(const struct inode *inode)
1414{
1415 umode_t mode = inode->i_mode;
1416 int kill = 0;
1417
1418 /* suid always must be killed */
1419 if (unlikely(mode & S_ISUID))
1420 kill = ATTR_KILL_SUID;
788e7a89 1421
1f2edbe3
TM
1422 /*
1423 * sgid without any exec bits is just a mandatory locking mark; leave
1424 * it alone. If some exec bits are set, it's a real sgid; kill it.
1425 */
1426 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1427 kill |= ATTR_KILL_SGID;
1428
1429 if (unlikely(kill && S_ISREG(mode)))
1430 return kill;
1431
1432 return 0;
1433}
788e7a89 1434
a08a8cd3
TM
1435static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1436 struct nfs_fattr *fattr)
1437{
1438 struct nfs_pgio_args *argp = &hdr->args;
1439 struct nfs_pgio_res *resp = &hdr->res;
2b83d3de 1440 u64 size = argp->offset + resp->count;
a08a8cd3
TM
1441
1442 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
2b83d3de
TM
1443 fattr->size = size;
1444 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1445 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
a08a8cd3 1446 return;
2b83d3de
TM
1447 }
1448 if (size != fattr->size)
a08a8cd3
TM
1449 return;
1450 /* Set attribute barrier */
1451 nfs_fattr_set_barrier(fattr);
2b83d3de
TM
1452 /* ...and update size */
1453 fattr->valid |= NFS_ATTR_FATTR_SIZE;
a08a8cd3
TM
1454}
1455
1456void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1457{
2b83d3de 1458 struct nfs_fattr *fattr = &hdr->fattr;
a08a8cd3
TM
1459 struct inode *inode = hdr->inode;
1460
a08a8cd3
TM
1461 spin_lock(&inode->i_lock);
1462 nfs_writeback_check_extend(hdr, fattr);
1463 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1464 spin_unlock(&inode->i_lock);
1465}
1466EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1467
1da177e4
LT
1468/*
1469 * This function is called when the WRITE call is complete.
1470 */
d45f60c6
WAA
1471static int nfs_writeback_done(struct rpc_task *task,
1472 struct nfs_pgio_header *hdr,
0eecb214 1473 struct inode *inode)
1da177e4 1474{
788e7a89 1475 int status;
1da177e4 1476
f551e44f
CL
1477 /*
1478 * ->write_done will attempt to use post-op attributes to detect
1479 * conflicting writes by other clients. A strict interpretation
1480 * of close-to-open would allow us to continue caching even if
1481 * another writer had changed the file, but some applications
1482 * depend on tighter cache coherency when writing.
1483 */
d45f60c6 1484 status = NFS_PROTO(inode)->write_done(task, hdr);
788e7a89 1485 if (status != 0)
0eecb214 1486 return status;
d45f60c6 1487 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
91d5b470 1488
d45f60c6
WAA
1489 if (hdr->res.verf->committed < hdr->args.stable &&
1490 task->tk_status >= 0) {
1da177e4
LT
1491 /* We tried a write call, but the server did not
1492 * commit data to stable storage even though we
1493 * requested it.
1494 * Note: There is a known bug in Tru64 < 5.0 in which
1495 * the server reports NFS_DATA_SYNC, but performs
1496 * NFS_FILE_SYNC. We therefore implement this checking
1497 * as a dprintk() in order to avoid filling syslog.
1498 */
1499 static unsigned long complain;
1500
a69aef14 1501 /* Note this will print the MDS for a DS write */
1da177e4 1502 if (time_before(complain, jiffies)) {
48186c7d 1503 dprintk("NFS: faulty NFS server %s:"
1da177e4 1504 " (committed = %d) != (stable = %d)\n",
cd841605 1505 NFS_SERVER(inode)->nfs_client->cl_hostname,
d45f60c6 1506 hdr->res.verf->committed, hdr->args.stable);
1da177e4
LT
1507 complain = jiffies + 300 * HZ;
1508 }
1509 }
1f2edbe3
TM
1510
1511 /* Deal with the suid/sgid bit corner case */
1512 if (nfs_should_remove_suid(inode))
1513 nfs_mark_for_revalidate(inode);
0eecb214
AS
1514 return 0;
1515}
1516
1517/*
1518 * This function is called when the WRITE call is complete.
1519 */
d45f60c6
WAA
1520static void nfs_writeback_result(struct rpc_task *task,
1521 struct nfs_pgio_header *hdr)
0eecb214 1522{
d45f60c6
WAA
1523 struct nfs_pgio_args *argp = &hdr->args;
1524 struct nfs_pgio_res *resp = &hdr->res;
1f2edbe3
TM
1525
1526 if (resp->count < argp->count) {
1da177e4
LT
1527 static unsigned long complain;
1528
6c75dc0d 1529 /* This a short write! */
d45f60c6 1530 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
91d5b470 1531
1da177e4 1532 /* Has the server at least made some progress? */
6c75dc0d
FI
1533 if (resp->count == 0) {
1534 if (time_before(complain, jiffies)) {
1535 printk(KERN_WARNING
1536 "NFS: Server wrote zero bytes, expected %u.\n",
1537 argp->count);
1538 complain = jiffies + 300 * HZ;
1da177e4 1539 }
d45f60c6 1540 nfs_set_pgio_error(hdr, -EIO, argp->offset);
6c75dc0d 1541 task->tk_status = -EIO;
13602896 1542 return;
1da177e4 1543 }
f8417b48
KM
1544
1545 /* For non rpc-based layout drivers, retry-through-MDS */
1546 if (!task->tk_ops) {
1547 hdr->pnfs_error = -EAGAIN;
1548 return;
1549 }
1550
6c75dc0d
FI
1551 /* Was this an NFSv2 write or an NFSv3 stable write? */
1552 if (resp->verf->committed != NFS_UNSTABLE) {
1553 /* Resend from where the server left off */
d45f60c6 1554 hdr->mds_offset += resp->count;
6c75dc0d
FI
1555 argp->offset += resp->count;
1556 argp->pgbase += resp->count;
1557 argp->count -= resp->count;
1558 } else {
1559 /* Resend as a stable write in order to avoid
1560 * headaches in the case of a server crash.
1561 */
1562 argp->stable = NFS_FILE_SYNC;
1da177e4 1563 }
6c75dc0d 1564 rpc_restart_call_prepare(task);
1da177e4 1565 }
1da177e4
LT
1566}
1567
af7cf057 1568static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
71d0a611 1569{
af7cf057
TM
1570 return wait_on_atomic_t(&cinfo->rpcs_out,
1571 nfs_wait_atomic_killable, TASK_KILLABLE);
1572}
b8413f98 1573
af7cf057
TM
1574static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1575{
1576 atomic_inc(&cinfo->rpcs_out);
71d0a611
TM
1577}
1578
af7cf057 1579static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
71d0a611 1580{
af7cf057
TM
1581 if (atomic_dec_and_test(&cinfo->rpcs_out))
1582 wake_up_atomic_t(&cinfo->rpcs_out);
71d0a611
TM
1583}
1584
0b7c0153 1585void nfs_commitdata_release(struct nfs_commit_data *data)
1da177e4 1586{
0b7c0153
FI
1587 put_nfs_open_context(data->context);
1588 nfs_commit_free(data);
1da177e4 1589}
e0c2b380 1590EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1da177e4 1591
0b7c0153 1592int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
c36aae9a 1593 const struct nfs_rpc_ops *nfs_ops,
9ace33cd 1594 const struct rpc_call_ops *call_ops,
9f0ec176 1595 int how, int flags)
1da177e4 1596{
07737691 1597 struct rpc_task *task;
9ace33cd 1598 int priority = flush_task_priority(how);
bdc7f021
TM
1599 struct rpc_message msg = {
1600 .rpc_argp = &data->args,
1601 .rpc_resp = &data->res,
9ace33cd 1602 .rpc_cred = data->cred,
bdc7f021 1603 };
84115e1c 1604 struct rpc_task_setup task_setup_data = {
07737691 1605 .task = &data->task,
9ace33cd 1606 .rpc_client = clnt,
bdc7f021 1607 .rpc_message = &msg,
9ace33cd 1608 .callback_ops = call_ops,
84115e1c 1609 .callback_data = data,
101070ca 1610 .workqueue = nfsiod_workqueue,
9f0ec176 1611 .flags = RPC_TASK_ASYNC | flags,
3ff7576d 1612 .priority = priority,
84115e1c 1613 };
9ace33cd 1614 /* Set up the initial task struct. */
c36aae9a 1615 nfs_ops->commit_setup(data, &msg);
9ace33cd 1616
b4839ebe 1617 dprintk("NFS: initiated commit call\n");
9ace33cd 1618
8c21c62c
WAA
1619 nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
1620 NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
1621
9ace33cd
FI
1622 task = rpc_run_task(&task_setup_data);
1623 if (IS_ERR(task))
1624 return PTR_ERR(task);
1625 if (how & FLUSH_SYNC)
1626 rpc_wait_for_completion_task(task);
1627 rpc_put_task(task);
1628 return 0;
1629}
e0c2b380 1630EXPORT_SYMBOL_GPL(nfs_initiate_commit);
9ace33cd 1631
378520b8
PT
1632static loff_t nfs_get_lwb(struct list_head *head)
1633{
1634 loff_t lwb = 0;
1635 struct nfs_page *req;
1636
1637 list_for_each_entry(req, head, wb_list)
1638 if (lwb < (req_offset(req) + req->wb_bytes))
1639 lwb = req_offset(req) + req->wb_bytes;
1640
1641 return lwb;
1642}
1643
9ace33cd
FI
1644/*
1645 * Set up the argument/result storage required for the RPC call.
1646 */
0b7c0153 1647void nfs_init_commit(struct nfs_commit_data *data,
f453a54a
FI
1648 struct list_head *head,
1649 struct pnfs_layout_segment *lseg,
1650 struct nfs_commit_info *cinfo)
9ace33cd
FI
1651{
1652 struct nfs_page *first = nfs_list_entry(head->next);
2b0143b5 1653 struct inode *inode = d_inode(first->wb_context->dentry);
1da177e4
LT
1654
1655 /* Set up the RPC argument and reply structs
1656 * NB: take care not to mess about with data->commit et al. */
1657
1658 list_splice_init(head, &data->pages);
1da177e4 1659
1da177e4 1660 data->inode = inode;
9ace33cd 1661 data->cred = first->wb_context->cred;
988b6dce 1662 data->lseg = lseg; /* reference transferred */
378520b8
PT
1663 /* only set lwb for pnfs commit */
1664 if (lseg)
1665 data->lwb = nfs_get_lwb(&data->pages);
9ace33cd 1666 data->mds_ops = &nfs_commit_ops;
f453a54a 1667 data->completion_ops = cinfo->completion_ops;
b359f9d0 1668 data->dreq = cinfo->dreq;
1da177e4
LT
1669
1670 data->args.fh = NFS_FH(data->inode);
3da28eb1
TM
1671 /* Note: we always request a commit of the entire inode */
1672 data->args.offset = 0;
1673 data->args.count = 0;
0b7c0153 1674 data->context = get_nfs_open_context(first->wb_context);
1da177e4
LT
1675 data->res.fattr = &data->fattr;
1676 data->res.verf = &data->verf;
0e574af1 1677 nfs_fattr_init(&data->fattr);
1da177e4 1678}
e0c2b380 1679EXPORT_SYMBOL_GPL(nfs_init_commit);
1da177e4 1680
e0c2b380 1681void nfs_retry_commit(struct list_head *page_list,
ea2cf228 1682 struct pnfs_layout_segment *lseg,
b57ff130
WAA
1683 struct nfs_commit_info *cinfo,
1684 u32 ds_commit_idx)
64bfeb49
FI
1685{
1686 struct nfs_page *req;
1687
1688 while (!list_empty(page_list)) {
1689 req = nfs_list_entry(page_list->next);
1690 nfs_list_remove_request(req);
b57ff130 1691 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
487b9b8a
TH
1692 if (!cinfo->dreq)
1693 nfs_clear_page_commit(req->wb_page);
1d1afcbc 1694 nfs_unlock_and_release_request(req);
64bfeb49
FI
1695 }
1696}
e0c2b380 1697EXPORT_SYMBOL_GPL(nfs_retry_commit);
64bfeb49 1698
b20135d0
TM
1699static void
1700nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1701 struct nfs_page *req)
1702{
1703 __set_page_dirty_nobuffers(req->wb_page);
1704}
1705
1da177e4
LT
1706/*
1707 * Commit dirty pages
1708 */
1709static int
ea2cf228
FI
1710nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1711 struct nfs_commit_info *cinfo)
1da177e4 1712{
0b7c0153 1713 struct nfs_commit_data *data;
1da177e4 1714
c9d8f89d 1715 data = nfs_commitdata_alloc();
1da177e4
LT
1716
1717 if (!data)
1718 goto out_bad;
1719
1720 /* Set up the argument struct */
f453a54a
FI
1721 nfs_init_commit(data, head, NULL, cinfo);
1722 atomic_inc(&cinfo->mds->rpcs_out);
c36aae9a
PT
1723 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1724 data->mds_ops, how, 0);
1da177e4 1725 out_bad:
b57ff130 1726 nfs_retry_commit(head, NULL, cinfo, 0);
1da177e4
LT
1727 return -ENOMEM;
1728}
1729
1730/*
1731 * COMMIT call returned
1732 */
788e7a89 1733static void nfs_commit_done(struct rpc_task *task, void *calldata)
1da177e4 1734{
0b7c0153 1735 struct nfs_commit_data *data = calldata;
1da177e4 1736
a3f565b1 1737 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1da177e4
LT
1738 task->tk_pid, task->tk_status);
1739
788e7a89 1740 /* Call the NFS version-specific code */
c0d0e96b 1741 NFS_PROTO(data->inode)->commit_done(task, data);
c9d8f89d
TM
1742}
1743
f453a54a 1744static void nfs_commit_release_pages(struct nfs_commit_data *data)
c9d8f89d 1745{
5917ce84 1746 struct nfs_page *req;
c9d8f89d 1747 int status = data->task.tk_status;
f453a54a 1748 struct nfs_commit_info cinfo;
353db796 1749 struct nfs_server *nfss;
788e7a89 1750
1da177e4
LT
1751 while (!list_empty(&data->pages)) {
1752 req = nfs_list_entry(data->pages.next);
1753 nfs_list_remove_request(req);
d6d6dc7c 1754 nfs_clear_page_commit(req->wb_page);
1da177e4 1755
1e8968c5 1756 dprintk("NFS: commit (%s/%llu %d@%lld)",
3d4ff43d 1757 req->wb_context->dentry->d_sb->s_id,
2b0143b5 1758 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
1da177e4
LT
1759 req->wb_bytes,
1760 (long long)req_offset(req));
c9d8f89d
TM
1761 if (status < 0) {
1762 nfs_context_set_write_error(req->wb_context, status);
1da177e4 1763 nfs_inode_remove_request(req);
c9d8f89d 1764 dprintk(", error = %d\n", status);
1da177e4
LT
1765 goto next;
1766 }
1767
1768 /* Okay, COMMIT succeeded, apparently. Check the verifier
1769 * returned by the server against all stored verfs. */
2f2c63bc 1770 if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
1da177e4
LT
1771 /* We have a match */
1772 nfs_inode_remove_request(req);
1773 dprintk(" OK\n");
1774 goto next;
1775 }
1776 /* We have a mismatch. Write the page again */
1777 dprintk(" mismatch\n");
6d884e8f 1778 nfs_mark_request_dirty(req);
05990d1b 1779 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1da177e4 1780 next:
1d1afcbc 1781 nfs_unlock_and_release_request(req);
1da177e4 1782 }
353db796
N
1783 nfss = NFS_SERVER(data->inode);
1784 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1785 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
1786
f453a54a 1787 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
af7cf057 1788 nfs_commit_end(cinfo.mds);
5917ce84
FI
1789}
1790
1791static void nfs_commit_release(void *calldata)
1792{
0b7c0153 1793 struct nfs_commit_data *data = calldata;
5917ce84 1794
f453a54a 1795 data->completion_ops->completion(data);
c9d8f89d 1796 nfs_commitdata_release(calldata);
1da177e4 1797}
788e7a89
TM
1798
1799static const struct rpc_call_ops nfs_commit_ops = {
0b7c0153 1800 .rpc_call_prepare = nfs_commit_prepare,
788e7a89
TM
1801 .rpc_call_done = nfs_commit_done,
1802 .rpc_release = nfs_commit_release,
1803};
1da177e4 1804
f453a54a
FI
1805static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1806 .completion = nfs_commit_release_pages,
b20135d0 1807 .resched_write = nfs_commit_resched_write,
f453a54a
FI
1808};
1809
1763da12
FI
1810int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1811 int how, struct nfs_commit_info *cinfo)
84c53ab5
FI
1812{
1813 int status;
1814
ea2cf228 1815 status = pnfs_commit_list(inode, head, how, cinfo);
84c53ab5 1816 if (status == PNFS_NOT_ATTEMPTED)
ea2cf228 1817 status = nfs_commit_list(inode, head, how, cinfo);
84c53ab5
FI
1818 return status;
1819}
1820
b608b283 1821int nfs_commit_inode(struct inode *inode, int how)
1da177e4 1822{
1da177e4 1823 LIST_HEAD(head);
ea2cf228 1824 struct nfs_commit_info cinfo;
71d0a611 1825 int may_wait = how & FLUSH_SYNC;
af7cf057 1826 int error = 0;
b8413f98 1827 int res;
1da177e4 1828
ea2cf228 1829 nfs_init_cinfo_from_inode(&cinfo, inode);
af7cf057 1830 nfs_commit_begin(cinfo.mds);
ea2cf228 1831 res = nfs_scan_commit(inode, &head, &cinfo);
af7cf057 1832 if (res)
ea2cf228 1833 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
af7cf057
TM
1834 nfs_commit_end(cinfo.mds);
1835 if (error < 0)
1836 goto out_error;
1837 if (!may_wait)
1838 goto out_mark_dirty;
1839 error = wait_on_commit(cinfo.mds);
1840 if (error < 0)
1841 return error;
c5efa5fc 1842 return res;
af7cf057
TM
1843out_error:
1844 res = error;
c5efa5fc
TM
1845 /* Note: If we exit without ensuring that the commit is complete,
1846 * we must mark the inode as dirty. Otherwise, future calls to
1847 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1848 * that the data is on the disk.
1849 */
1850out_mark_dirty:
1851 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1da177e4
LT
1852 return res;
1853}
b20135d0 1854EXPORT_SYMBOL_GPL(nfs_commit_inode);
8fc795f7 1855
ae09c31f 1856int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
8fc795f7 1857{
420e3646
TM
1858 struct nfs_inode *nfsi = NFS_I(inode);
1859 int flags = FLUSH_SYNC;
1860 int ret = 0;
8fc795f7 1861
3236c3e1 1862 /* no commits means nothing needs to be done */
ea2cf228 1863 if (!nfsi->commit_info.ncommit)
3236c3e1
JL
1864 return ret;
1865
a00dd6c0
JL
1866 if (wbc->sync_mode == WB_SYNC_NONE) {
1867 /* Don't commit yet if this is a non-blocking flush and there
1868 * are a lot of outstanding writes for this mapping.
1869 */
cb1410c7 1870 if (nfsi->commit_info.ncommit <= (nfsi->nrequests >> 1))
a00dd6c0 1871 goto out_mark_dirty;
420e3646 1872
a00dd6c0 1873 /* don't wait for the COMMIT response */
420e3646 1874 flags = 0;
a00dd6c0
JL
1875 }
1876
420e3646
TM
1877 ret = nfs_commit_inode(inode, flags);
1878 if (ret >= 0) {
1879 if (wbc->sync_mode == WB_SYNC_NONE) {
1880 if (ret < wbc->nr_to_write)
1881 wbc->nr_to_write -= ret;
1882 else
1883 wbc->nr_to_write = 0;
1884 }
8fc795f7 1885 return 0;
420e3646
TM
1886 }
1887out_mark_dirty:
8fc795f7
TM
1888 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1889 return ret;
1890}
89d77c8f 1891EXPORT_SYMBOL_GPL(nfs_write_inode);
a8d8f02c 1892
acdc53b2
TM
1893/*
1894 * flush the inode to disk.
1895 */
1896int nfs_wb_all(struct inode *inode)
34901f70 1897{
f4ce1299
TM
1898 int ret;
1899
1900 trace_nfs_writeback_inode_enter(inode);
1901
5bb89b47 1902 ret = filemap_write_and_wait(inode->i_mapping);
6b196875
CL
1903 if (ret)
1904 goto out;
1905 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1906 if (ret < 0)
1907 goto out;
1908 pnfs_sync_inode(inode, true);
1909 ret = 0;
34901f70 1910
6b196875 1911out:
f4ce1299
TM
1912 trace_nfs_writeback_inode_exit(inode, ret);
1913 return ret;
1c75950b 1914}
ddda8e0a 1915EXPORT_SYMBOL_GPL(nfs_wb_all);
1c75950b 1916
1b3b4a1a
TM
1917int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1918{
1919 struct nfs_page *req;
1b3b4a1a
TM
1920 int ret = 0;
1921
3e217045
WAA
1922 wait_on_page_writeback(page);
1923
1924 /* blocking call to cancel all requests and join to a single (head)
1925 * request */
1926 req = nfs_lock_and_join_requests(page, false);
1927
1928 if (IS_ERR(req)) {
1929 ret = PTR_ERR(req);
1930 } else if (req) {
1931 /* all requests from this page have been cancelled by
1932 * nfs_lock_and_join_requests, so just remove the head
1933 * request from the inode / page_private pointer and
1934 * release it */
1935 nfs_inode_remove_request(req);
3e217045 1936 nfs_unlock_and_release_request(req);
1b3b4a1a 1937 }
3e217045 1938
1b3b4a1a
TM
1939 return ret;
1940}
1941
7f2f12d9
TM
1942/*
1943 * Write back all requests on one page - we do this before reading it.
1944 */
d6c843b9 1945int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1c75950b 1946{
29418aa4 1947 loff_t range_start = page_file_offset(page);
1c75950b 1948 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
4d770ccf 1949 struct writeback_control wbc = {
4d770ccf 1950 .sync_mode = WB_SYNC_ALL,
7f2f12d9 1951 .nr_to_write = 0,
4d770ccf
TM
1952 .range_start = range_start,
1953 .range_end = range_end,
1954 };
1955 int ret;
1c75950b 1956
f4ce1299
TM
1957 trace_nfs_writeback_page_enter(inode);
1958
0522f6ad 1959 for (;;) {
ba8b06e6 1960 wait_on_page_writeback(page);
73e3302f 1961 if (clear_page_dirty_for_io(page)) {
d6c843b9 1962 ret = nfs_writepage_locked(page, &wbc, launder);
73e3302f
TM
1963 if (ret < 0)
1964 goto out_error;
0522f6ad 1965 continue;
7f2f12d9 1966 }
f4ce1299 1967 ret = 0;
0522f6ad
TM
1968 if (!PagePrivate(page))
1969 break;
1970 ret = nfs_commit_inode(inode, FLUSH_SYNC);
ba8b06e6 1971 if (ret < 0)
73e3302f 1972 goto out_error;
7f2f12d9 1973 }
73e3302f 1974out_error:
f4ce1299 1975 trace_nfs_writeback_page_exit(inode, ret);
4d770ccf 1976 return ret;
1c75950b
TM
1977}
1978
074cc1de
TM
1979#ifdef CONFIG_MIGRATION
1980int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
a6bc32b8 1981 struct page *page, enum migrate_mode mode)
074cc1de 1982{
2da95652
JL
1983 /*
1984 * If PagePrivate is set, then the page is currently associated with
1985 * an in-progress read or write request. Don't try to migrate it.
1986 *
1987 * FIXME: we could do this in principle, but we'll need a way to ensure
1988 * that we can safely release the inode reference while holding
1989 * the page lock.
1990 */
1991 if (PagePrivate(page))
1992 return -EBUSY;
074cc1de 1993
8c209ce7
DH
1994 if (!nfs_fscache_release_page(page, GFP_KERNEL))
1995 return -EBUSY;
074cc1de 1996
a6bc32b8 1997 return migrate_page(mapping, newpage, page, mode);
074cc1de
TM
1998}
1999#endif
2000
f7b422b1 2001int __init nfs_init_writepagecache(void)
1da177e4
LT
2002{
2003 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1e7f3a48 2004 sizeof(struct nfs_pgio_header),
1da177e4 2005 0, SLAB_HWCACHE_ALIGN,
20c2df83 2006 NULL);
1da177e4
LT
2007 if (nfs_wdata_cachep == NULL)
2008 return -ENOMEM;
2009
93d2341c
MD
2010 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2011 nfs_wdata_cachep);
1da177e4 2012 if (nfs_wdata_mempool == NULL)
3dd4765f 2013 goto out_destroy_write_cache;
1da177e4 2014
0b7c0153
FI
2015 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2016 sizeof(struct nfs_commit_data),
2017 0, SLAB_HWCACHE_ALIGN,
2018 NULL);
2019 if (nfs_cdata_cachep == NULL)
3dd4765f 2020 goto out_destroy_write_mempool;
0b7c0153 2021
93d2341c 2022 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
4c100210 2023 nfs_cdata_cachep);
1da177e4 2024 if (nfs_commit_mempool == NULL)
3dd4765f 2025 goto out_destroy_commit_cache;
1da177e4 2026
89a09141
PZ
2027 /*
2028 * NFS congestion size, scale with available memory.
2029 *
2030 * 64MB: 8192k
2031 * 128MB: 11585k
2032 * 256MB: 16384k
2033 * 512MB: 23170k
2034 * 1GB: 32768k
2035 * 2GB: 46340k
2036 * 4GB: 65536k
2037 * 8GB: 92681k
2038 * 16GB: 131072k
2039 *
2040 * This allows larger machines to have larger/more transfers.
2041 * Limit the default to 256M
2042 */
2043 nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
2044 if (nfs_congestion_kb > 256*1024)
2045 nfs_congestion_kb = 256*1024;
2046
1da177e4 2047 return 0;
3dd4765f
JL
2048
2049out_destroy_commit_cache:
2050 kmem_cache_destroy(nfs_cdata_cachep);
2051out_destroy_write_mempool:
2052 mempool_destroy(nfs_wdata_mempool);
2053out_destroy_write_cache:
2054 kmem_cache_destroy(nfs_wdata_cachep);
2055 return -ENOMEM;
1da177e4
LT
2056}
2057
266bee88 2058void nfs_destroy_writepagecache(void)
1da177e4
LT
2059{
2060 mempool_destroy(nfs_commit_mempool);
3dd4765f 2061 kmem_cache_destroy(nfs_cdata_cachep);
1da177e4 2062 mempool_destroy(nfs_wdata_mempool);
1a1d92c1 2063 kmem_cache_destroy(nfs_wdata_cachep);
1da177e4
LT
2064}
2065
4a0de55c 2066static const struct nfs_rw_ops nfs_rw_write_ops = {
a4cdda59 2067 .rw_mode = FMODE_WRITE,
4a0de55c
AS
2068 .rw_alloc_header = nfs_writehdr_alloc,
2069 .rw_free_header = nfs_writehdr_free,
0eecb214
AS
2070 .rw_done = nfs_writeback_done,
2071 .rw_result = nfs_writeback_result,
1ed26f33 2072 .rw_initiate = nfs_initiate_write,
4a0de55c 2073};
This page took 0.940879 seconds and 5 git commands to generate.