2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
27 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
29 static struct kmem_cache
*nfs_page_cachep
;
30 static const struct rpc_call_ops nfs_pgio_common_ops
;
32 static void nfs_free_request(struct nfs_page
*);
34 static bool nfs_pgarray_set(struct nfs_page_array
*p
, unsigned int pagecount
)
36 p
->npages
= pagecount
;
37 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
38 p
->pagevec
= p
->page_array
;
40 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_KERNEL
);
44 return p
->pagevec
!= NULL
;
47 void nfs_pgheader_init(struct nfs_pageio_descriptor
*desc
,
48 struct nfs_pgio_header
*hdr
,
49 void (*release
)(struct nfs_pgio_header
*hdr
))
51 hdr
->req
= nfs_list_entry(desc
->pg_list
.next
);
52 hdr
->inode
= desc
->pg_inode
;
53 hdr
->cred
= hdr
->req
->wb_context
->cred
;
54 hdr
->io_start
= req_offset(hdr
->req
);
55 hdr
->good_bytes
= desc
->pg_count
;
56 hdr
->dreq
= desc
->pg_dreq
;
57 hdr
->layout_private
= desc
->pg_layout_private
;
58 hdr
->release
= release
;
59 hdr
->completion_ops
= desc
->pg_completion_ops
;
60 if (hdr
->completion_ops
->init_hdr
)
61 hdr
->completion_ops
->init_hdr(hdr
);
63 EXPORT_SYMBOL_GPL(nfs_pgheader_init
);
65 void nfs_set_pgio_error(struct nfs_pgio_header
*hdr
, int error
, loff_t pos
)
67 spin_lock(&hdr
->lock
);
68 if (pos
< hdr
->io_start
+ hdr
->good_bytes
) {
69 set_bit(NFS_IOHDR_ERROR
, &hdr
->flags
);
70 clear_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
71 hdr
->good_bytes
= pos
- hdr
->io_start
;
74 spin_unlock(&hdr
->lock
);
77 static inline struct nfs_page
*
80 struct nfs_page
*p
= kmem_cache_zalloc(nfs_page_cachep
, GFP_NOIO
);
82 INIT_LIST_HEAD(&p
->wb_list
);
87 nfs_page_free(struct nfs_page
*p
)
89 kmem_cache_free(nfs_page_cachep
, p
);
93 nfs_iocounter_inc(struct nfs_io_counter
*c
)
95 atomic_inc(&c
->io_count
);
99 nfs_iocounter_dec(struct nfs_io_counter
*c
)
101 if (atomic_dec_and_test(&c
->io_count
)) {
102 clear_bit(NFS_IO_INPROGRESS
, &c
->flags
);
103 smp_mb__after_atomic();
104 wake_up_bit(&c
->flags
, NFS_IO_INPROGRESS
);
109 __nfs_iocounter_wait(struct nfs_io_counter
*c
)
111 wait_queue_head_t
*wq
= bit_waitqueue(&c
->flags
, NFS_IO_INPROGRESS
);
112 DEFINE_WAIT_BIT(q
, &c
->flags
, NFS_IO_INPROGRESS
);
116 prepare_to_wait(wq
, &q
.wait
, TASK_KILLABLE
);
117 set_bit(NFS_IO_INPROGRESS
, &c
->flags
);
118 if (atomic_read(&c
->io_count
) == 0)
120 ret
= nfs_wait_bit_killable(&c
->flags
);
121 } while (atomic_read(&c
->io_count
) != 0);
122 finish_wait(wq
, &q
.wait
);
127 * nfs_iocounter_wait - wait for i/o to complete
128 * @c: nfs_io_counter to use
130 * returns -ERESTARTSYS if interrupted by a fatal signal.
131 * Otherwise returns 0 once the io_count hits 0.
134 nfs_iocounter_wait(struct nfs_io_counter
*c
)
136 if (atomic_read(&c
->io_count
) == 0)
138 return __nfs_iocounter_wait(c
);
141 static int nfs_wait_bit_uninterruptible(void *word
)
148 * nfs_page_group_lock - lock the head of the page group
149 * @req - request in group that is to be locked
151 * this lock must be held if modifying the page group list
154 nfs_page_group_lock(struct nfs_page
*req
)
156 struct nfs_page
*head
= req
->wb_head
;
158 WARN_ON_ONCE(head
!= head
->wb_head
);
160 wait_on_bit_lock(&head
->wb_flags
, PG_HEADLOCK
,
161 nfs_wait_bit_uninterruptible
,
162 TASK_UNINTERRUPTIBLE
);
166 * nfs_page_group_unlock - unlock the head of the page group
167 * @req - request in group that is to be unlocked
170 nfs_page_group_unlock(struct nfs_page
*req
)
172 struct nfs_page
*head
= req
->wb_head
;
174 WARN_ON_ONCE(head
!= head
->wb_head
);
176 smp_mb__before_atomic();
177 clear_bit(PG_HEADLOCK
, &head
->wb_flags
);
178 smp_mb__after_atomic();
179 wake_up_bit(&head
->wb_flags
, PG_HEADLOCK
);
183 * nfs_page_group_sync_on_bit_locked
185 * must be called with page group lock held
188 nfs_page_group_sync_on_bit_locked(struct nfs_page
*req
, unsigned int bit
)
190 struct nfs_page
*head
= req
->wb_head
;
191 struct nfs_page
*tmp
;
193 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &head
->wb_flags
));
194 WARN_ON_ONCE(test_and_set_bit(bit
, &req
->wb_flags
));
196 tmp
= req
->wb_this_page
;
198 if (!test_bit(bit
, &tmp
->wb_flags
))
200 tmp
= tmp
->wb_this_page
;
203 /* true! reset all bits */
206 clear_bit(bit
, &tmp
->wb_flags
);
207 tmp
= tmp
->wb_this_page
;
208 } while (tmp
!= req
);
214 * nfs_page_group_sync_on_bit - set bit on current request, but only
215 * return true if the bit is set for all requests in page group
216 * @req - request in page group
217 * @bit - PG_* bit that is used to sync page group
219 bool nfs_page_group_sync_on_bit(struct nfs_page
*req
, unsigned int bit
)
223 nfs_page_group_lock(req
);
224 ret
= nfs_page_group_sync_on_bit_locked(req
, bit
);
225 nfs_page_group_unlock(req
);
231 * nfs_page_group_init - Initialize the page group linkage for @req
232 * @req - a new nfs request
233 * @prev - the previous request in page group, or NULL if @req is the first
234 * or only request in the group (the head).
237 nfs_page_group_init(struct nfs_page
*req
, struct nfs_page
*prev
)
239 WARN_ON_ONCE(prev
== req
);
243 req
->wb_this_page
= req
;
245 WARN_ON_ONCE(prev
->wb_this_page
!= prev
->wb_head
);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &prev
->wb_head
->wb_flags
));
247 req
->wb_head
= prev
->wb_head
;
248 req
->wb_this_page
= prev
->wb_this_page
;
249 prev
->wb_this_page
= req
;
251 /* grab extra ref if head request has extra ref from
252 * the write/commit path to handle handoff between write
253 * and commit lists */
254 if (test_bit(PG_INODE_REF
, &prev
->wb_head
->wb_flags
))
255 kref_get(&req
->wb_kref
);
260 * nfs_page_group_destroy - sync the destruction of page groups
261 * @req - request that no longer needs the page group
263 * releases the page group reference from each member once all
264 * members have called this function.
267 nfs_page_group_destroy(struct kref
*kref
)
269 struct nfs_page
*req
= container_of(kref
, struct nfs_page
, wb_kref
);
270 struct nfs_page
*tmp
, *next
;
272 if (!nfs_page_group_sync_on_bit(req
, PG_TEARDOWN
))
277 next
= tmp
->wb_this_page
;
278 /* unlink and free */
279 tmp
->wb_this_page
= tmp
;
281 nfs_free_request(tmp
);
283 } while (tmp
!= req
);
287 * nfs_create_request - Create an NFS read/write request.
288 * @ctx: open context to use
289 * @page: page to write
290 * @last: last nfs request created for this page group or NULL if head
291 * @offset: starting offset within the page for the write
292 * @count: number of bytes to read/write
294 * The page must be locked by the caller. This makes sure we never
295 * create two different requests for the same page.
296 * User should ensure it is safe to sleep in this function.
299 nfs_create_request(struct nfs_open_context
*ctx
, struct page
*page
,
300 struct nfs_page
*last
, unsigned int offset
,
303 struct nfs_page
*req
;
304 struct nfs_lock_context
*l_ctx
;
306 if (test_bit(NFS_CONTEXT_BAD
, &ctx
->flags
))
307 return ERR_PTR(-EBADF
);
308 /* try to allocate the request struct */
309 req
= nfs_page_alloc();
311 return ERR_PTR(-ENOMEM
);
313 /* get lock context early so we can deal with alloc failures */
314 l_ctx
= nfs_get_lock_context(ctx
);
317 return ERR_CAST(l_ctx
);
319 req
->wb_lock_context
= l_ctx
;
320 nfs_iocounter_inc(&l_ctx
->io_count
);
322 /* Initialize the request struct. Initially, we assume a
323 * long write-back delay. This will be adjusted in
324 * update_nfs_request below if the region is not locked. */
326 req
->wb_index
= page_file_index(page
);
327 page_cache_get(page
);
328 req
->wb_offset
= offset
;
329 req
->wb_pgbase
= offset
;
330 req
->wb_bytes
= count
;
331 req
->wb_context
= get_nfs_open_context(ctx
);
332 kref_init(&req
->wb_kref
);
333 nfs_page_group_init(req
, last
);
338 * nfs_unlock_request - Unlock request and wake up sleepers.
341 void nfs_unlock_request(struct nfs_page
*req
)
343 if (!NFS_WBACK_BUSY(req
)) {
344 printk(KERN_ERR
"NFS: Invalid unlock attempted\n");
347 smp_mb__before_atomic();
348 clear_bit(PG_BUSY
, &req
->wb_flags
);
349 smp_mb__after_atomic();
350 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
354 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
357 void nfs_unlock_and_release_request(struct nfs_page
*req
)
359 nfs_unlock_request(req
);
360 nfs_release_request(req
);
364 * nfs_clear_request - Free up all resources allocated to the request
367 * Release page and open context resources associated with a read/write
368 * request after it has completed.
370 static void nfs_clear_request(struct nfs_page
*req
)
372 struct page
*page
= req
->wb_page
;
373 struct nfs_open_context
*ctx
= req
->wb_context
;
374 struct nfs_lock_context
*l_ctx
= req
->wb_lock_context
;
377 page_cache_release(page
);
381 nfs_iocounter_dec(&l_ctx
->io_count
);
382 nfs_put_lock_context(l_ctx
);
383 req
->wb_lock_context
= NULL
;
386 put_nfs_open_context(ctx
);
387 req
->wb_context
= NULL
;
392 * nfs_release_request - Release the count on an NFS read/write request
393 * @req: request to release
395 * Note: Should never be called with the spinlock held!
397 static void nfs_free_request(struct nfs_page
*req
)
399 WARN_ON_ONCE(req
->wb_this_page
!= req
);
401 /* extra debug: make sure no sync bits are still set */
402 WARN_ON_ONCE(test_bit(PG_TEARDOWN
, &req
->wb_flags
));
403 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE
, &req
->wb_flags
));
404 WARN_ON_ONCE(test_bit(PG_UPTODATE
, &req
->wb_flags
));
405 WARN_ON_ONCE(test_bit(PG_WB_END
, &req
->wb_flags
));
406 WARN_ON_ONCE(test_bit(PG_REMOVE
, &req
->wb_flags
));
408 /* Release struct file and open context */
409 nfs_clear_request(req
);
413 void nfs_release_request(struct nfs_page
*req
)
415 kref_put(&req
->wb_kref
, nfs_page_group_destroy
);
419 * nfs_wait_on_request - Wait for a request to complete.
420 * @req: request to wait upon.
422 * Interruptible by fatal signals only.
423 * The user is responsible for holding a count on the request.
426 nfs_wait_on_request(struct nfs_page
*req
)
428 return wait_on_bit(&req
->wb_flags
, PG_BUSY
,
429 nfs_wait_bit_uninterruptible
,
430 TASK_UNINTERRUPTIBLE
);
434 * nfs_generic_pg_test - determine if requests can be coalesced
435 * @desc: pointer to descriptor
436 * @prev: previous request in desc, or NULL
439 * Returns zero if @req can be coalesced into @desc, otherwise it returns
440 * the size of the request.
442 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor
*desc
,
443 struct nfs_page
*prev
, struct nfs_page
*req
)
445 if (desc
->pg_count
> desc
->pg_bsize
) {
446 /* should never happen */
451 return min(desc
->pg_bsize
- desc
->pg_count
, (size_t)req
->wb_bytes
);
453 EXPORT_SYMBOL_GPL(nfs_generic_pg_test
);
455 struct nfs_pgio_header
*nfs_pgio_header_alloc(const struct nfs_rw_ops
*ops
)
457 struct nfs_pgio_header
*hdr
= ops
->rw_alloc_header();
460 INIT_LIST_HEAD(&hdr
->pages
);
461 spin_lock_init(&hdr
->lock
);
466 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc
);
469 * nfs_pgio_header_free - Free a read or write header
470 * @hdr: The header to free
472 void nfs_pgio_header_free(struct nfs_pgio_header
*hdr
)
474 hdr
->rw_ops
->rw_free_header(hdr
);
476 EXPORT_SYMBOL_GPL(nfs_pgio_header_free
);
479 * nfs_pgio_data_destroy - make @hdr suitable for reuse
481 * Frees memory and releases refs from nfs_generic_pgio, so that it may
484 * @hdr: A header that has had nfs_generic_pgio called
486 void nfs_pgio_data_destroy(struct nfs_pgio_header
*hdr
)
488 put_nfs_open_context(hdr
->args
.context
);
489 if (hdr
->page_array
.pagevec
!= hdr
->page_array
.page_array
)
490 kfree(hdr
->page_array
.pagevec
);
492 EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy
);
495 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
496 * @hdr: The pageio hdr
497 * @count: Number of bytes to read
498 * @offset: Initial offset
499 * @how: How to commit data (writes only)
500 * @cinfo: Commit information for the call (writes only)
502 static void nfs_pgio_rpcsetup(struct nfs_pgio_header
*hdr
,
503 unsigned int count
, unsigned int offset
,
504 int how
, struct nfs_commit_info
*cinfo
)
506 struct nfs_page
*req
= hdr
->req
;
508 /* Set up the RPC argument and reply structs
509 * NB: take care not to mess about with hdr->commit et al. */
511 hdr
->args
.fh
= NFS_FH(hdr
->inode
);
512 hdr
->args
.offset
= req_offset(req
) + offset
;
513 /* pnfs_set_layoutcommit needs this */
514 hdr
->mds_offset
= hdr
->args
.offset
;
515 hdr
->args
.pgbase
= req
->wb_pgbase
+ offset
;
516 hdr
->args
.pages
= hdr
->page_array
.pagevec
;
517 hdr
->args
.count
= count
;
518 hdr
->args
.context
= get_nfs_open_context(req
->wb_context
);
519 hdr
->args
.lock_context
= req
->wb_lock_context
;
520 hdr
->args
.stable
= NFS_UNSTABLE
;
521 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
524 case FLUSH_COND_STABLE
:
525 if (nfs_reqs_to_commit(cinfo
))
528 hdr
->args
.stable
= NFS_FILE_SYNC
;
531 hdr
->res
.fattr
= &hdr
->fattr
;
532 hdr
->res
.count
= count
;
534 hdr
->res
.verf
= &hdr
->verf
;
535 nfs_fattr_init(&hdr
->fattr
);
539 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
540 * @task: The current task
541 * @calldata: pageio header to prepare
543 static void nfs_pgio_prepare(struct rpc_task
*task
, void *calldata
)
545 struct nfs_pgio_header
*hdr
= calldata
;
547 err
= NFS_PROTO(hdr
->inode
)->pgio_rpc_prepare(task
, hdr
);
552 int nfs_initiate_pgio(struct rpc_clnt
*clnt
, struct nfs_pgio_header
*hdr
,
553 const struct rpc_call_ops
*call_ops
, int how
, int flags
)
555 struct rpc_task
*task
;
556 struct rpc_message msg
= {
557 .rpc_argp
= &hdr
->args
,
558 .rpc_resp
= &hdr
->res
,
559 .rpc_cred
= hdr
->cred
,
561 struct rpc_task_setup task_setup_data
= {
565 .callback_ops
= call_ops
,
566 .callback_data
= hdr
,
567 .workqueue
= nfsiod_workqueue
,
568 .flags
= RPC_TASK_ASYNC
| flags
,
572 hdr
->rw_ops
->rw_initiate(hdr
, &msg
, &task_setup_data
, how
);
574 dprintk("NFS: %5u initiated pgio call "
575 "(req %s/%llu, %u bytes @ offset %llu)\n",
577 hdr
->inode
->i_sb
->s_id
,
578 (unsigned long long)NFS_FILEID(hdr
->inode
),
580 (unsigned long long)hdr
->args
.offset
);
582 task
= rpc_run_task(&task_setup_data
);
587 if (how
& FLUSH_SYNC
) {
588 ret
= rpc_wait_for_completion_task(task
);
590 ret
= task
->tk_status
;
596 EXPORT_SYMBOL_GPL(nfs_initiate_pgio
);
599 * nfs_pgio_error - Clean up from a pageio error
600 * @desc: IO descriptor
601 * @hdr: pageio header
603 static int nfs_pgio_error(struct nfs_pageio_descriptor
*desc
,
604 struct nfs_pgio_header
*hdr
)
606 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
607 nfs_pgio_data_destroy(hdr
);
608 hdr
->completion_ops
->completion(hdr
);
609 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
614 * nfs_pgio_release - Release pageio data
615 * @calldata: The pageio header to release
617 static void nfs_pgio_release(void *calldata
)
619 struct nfs_pgio_header
*hdr
= calldata
;
620 if (hdr
->rw_ops
->rw_release
)
621 hdr
->rw_ops
->rw_release(hdr
);
622 nfs_pgio_data_destroy(hdr
);
623 hdr
->completion_ops
->completion(hdr
);
627 * nfs_pageio_init - initialise a page io descriptor
628 * @desc: pointer to descriptor
629 * @inode: pointer to inode
630 * @doio: pointer to io function
631 * @bsize: io block size
632 * @io_flags: extra parameters for the io function
634 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
636 const struct nfs_pageio_ops
*pg_ops
,
637 const struct nfs_pgio_completion_ops
*compl_ops
,
638 const struct nfs_rw_ops
*rw_ops
,
642 INIT_LIST_HEAD(&desc
->pg_list
);
643 desc
->pg_bytes_written
= 0;
645 desc
->pg_bsize
= bsize
;
648 desc
->pg_recoalesce
= 0;
649 desc
->pg_inode
= inode
;
650 desc
->pg_ops
= pg_ops
;
651 desc
->pg_completion_ops
= compl_ops
;
652 desc
->pg_rw_ops
= rw_ops
;
653 desc
->pg_ioflags
= io_flags
;
655 desc
->pg_lseg
= NULL
;
656 desc
->pg_dreq
= NULL
;
657 desc
->pg_layout_private
= NULL
;
659 EXPORT_SYMBOL_GPL(nfs_pageio_init
);
662 * nfs_pgio_result - Basic pageio error handling
663 * @task: The task that ran
664 * @calldata: Pageio header to check
666 static void nfs_pgio_result(struct rpc_task
*task
, void *calldata
)
668 struct nfs_pgio_header
*hdr
= calldata
;
669 struct inode
*inode
= hdr
->inode
;
671 dprintk("NFS: %s: %5u, (status %d)\n", __func__
,
672 task
->tk_pid
, task
->tk_status
);
674 if (hdr
->rw_ops
->rw_done(task
, hdr
, inode
) != 0)
676 if (task
->tk_status
< 0)
677 nfs_set_pgio_error(hdr
, task
->tk_status
, hdr
->args
.offset
);
679 hdr
->rw_ops
->rw_result(task
, hdr
);
683 * Create an RPC task for the given read or write request and kick it.
684 * The page must have been locked by the caller.
686 * It may happen that the page we're passed is not marked dirty.
687 * This is the case if nfs_updatepage detects a conflicting request
688 * that has been written but not committed.
690 int nfs_generic_pgio(struct nfs_pageio_descriptor
*desc
,
691 struct nfs_pgio_header
*hdr
)
693 struct nfs_page
*req
;
695 struct list_head
*head
= &desc
->pg_list
;
696 struct nfs_commit_info cinfo
;
697 unsigned int pagecount
;
699 pagecount
= nfs_page_array_len(desc
->pg_base
, desc
->pg_count
);
700 if (!nfs_pgarray_set(&hdr
->page_array
, pagecount
))
701 return nfs_pgio_error(desc
, hdr
);
703 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
704 pages
= hdr
->page_array
.pagevec
;
705 while (!list_empty(head
)) {
706 req
= nfs_list_entry(head
->next
);
707 nfs_list_remove_request(req
);
708 nfs_list_add_request(req
, &hdr
->pages
);
709 *pages
++ = req
->wb_page
;
712 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
713 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
714 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
716 /* Set up the argument struct */
717 nfs_pgio_rpcsetup(hdr
, desc
->pg_count
, 0, desc
->pg_ioflags
, &cinfo
);
718 desc
->pg_rpc_callops
= &nfs_pgio_common_ops
;
721 EXPORT_SYMBOL_GPL(nfs_generic_pgio
);
723 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor
*desc
)
725 struct nfs_pgio_header
*hdr
;
728 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
730 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
733 nfs_pgheader_init(desc
, hdr
, nfs_pgio_header_free
);
734 ret
= nfs_generic_pgio(desc
, hdr
);
736 ret
= nfs_initiate_pgio(NFS_CLIENT(hdr
->inode
),
737 hdr
, desc
->pg_rpc_callops
,
738 desc
->pg_ioflags
, 0);
742 static bool nfs_match_open_context(const struct nfs_open_context
*ctx1
,
743 const struct nfs_open_context
*ctx2
)
745 return ctx1
->cred
== ctx2
->cred
&& ctx1
->state
== ctx2
->state
;
748 static bool nfs_match_lock_context(const struct nfs_lock_context
*l1
,
749 const struct nfs_lock_context
*l2
)
751 return l1
->lockowner
.l_owner
== l2
->lockowner
.l_owner
752 && l1
->lockowner
.l_pid
== l2
->lockowner
.l_pid
;
756 * nfs_can_coalesce_requests - test two requests for compatibility
757 * @prev: pointer to nfs_page
758 * @req: pointer to nfs_page
760 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
761 * page data area they describe is contiguous, and that their RPC
762 * credentials, NFSv4 open state, and lockowners are the same.
764 * Return 'true' if this is the case, else return 'false'.
766 static bool nfs_can_coalesce_requests(struct nfs_page
*prev
,
767 struct nfs_page
*req
,
768 struct nfs_pageio_descriptor
*pgio
)
773 if (!nfs_match_open_context(req
->wb_context
, prev
->wb_context
))
775 if (req
->wb_context
->dentry
->d_inode
->i_flock
!= NULL
&&
776 !nfs_match_lock_context(req
->wb_lock_context
,
777 prev
->wb_lock_context
))
779 if (req_offset(req
) != req_offset(prev
) + prev
->wb_bytes
)
782 size
= pgio
->pg_ops
->pg_test(pgio
, prev
, req
);
783 WARN_ON_ONCE(size
> req
->wb_bytes
);
784 if (size
&& size
< req
->wb_bytes
)
785 req
->wb_bytes
= size
;
790 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
791 * @desc: destination io descriptor
794 * Returns true if the request 'req' was successfully coalesced into the
795 * existing list of pages 'desc'.
797 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
798 struct nfs_page
*req
)
800 struct nfs_page
*prev
= NULL
;
801 if (desc
->pg_count
!= 0) {
802 prev
= nfs_list_entry(desc
->pg_list
.prev
);
804 if (desc
->pg_ops
->pg_init
)
805 desc
->pg_ops
->pg_init(desc
, req
);
806 desc
->pg_base
= req
->wb_pgbase
;
808 if (!nfs_can_coalesce_requests(prev
, req
, desc
))
810 nfs_list_remove_request(req
);
811 nfs_list_add_request(req
, &desc
->pg_list
);
812 desc
->pg_count
+= req
->wb_bytes
;
817 * Helper for nfs_pageio_add_request and nfs_pageio_complete
819 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
821 if (!list_empty(&desc
->pg_list
)) {
822 int error
= desc
->pg_ops
->pg_doio(desc
);
824 desc
->pg_error
= error
;
826 desc
->pg_bytes_written
+= desc
->pg_count
;
828 if (list_empty(&desc
->pg_list
)) {
835 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
836 * @desc: destination io descriptor
839 * This may split a request into subrequests which are all part of the
842 * Returns true if the request 'req' was successfully coalesced into the
843 * existing list of pages 'desc'.
845 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
846 struct nfs_page
*req
)
848 struct nfs_page
*subreq
;
849 unsigned int bytes_left
= 0;
850 unsigned int offset
, pgbase
;
852 nfs_page_group_lock(req
);
855 bytes_left
= subreq
->wb_bytes
;
856 offset
= subreq
->wb_offset
;
857 pgbase
= subreq
->wb_pgbase
;
860 if (!nfs_pageio_do_add_request(desc
, subreq
)) {
861 /* make sure pg_test call(s) did nothing */
862 WARN_ON_ONCE(subreq
->wb_bytes
!= bytes_left
);
863 WARN_ON_ONCE(subreq
->wb_offset
!= offset
);
864 WARN_ON_ONCE(subreq
->wb_pgbase
!= pgbase
);
866 nfs_page_group_unlock(req
);
868 nfs_pageio_doio(desc
);
869 if (desc
->pg_error
< 0)
872 if (desc
->pg_recoalesce
)
874 /* retry add_request for this subreq */
875 nfs_page_group_lock(req
);
879 /* check for buggy pg_test call(s) */
880 WARN_ON_ONCE(subreq
->wb_bytes
+ subreq
->wb_pgbase
> PAGE_SIZE
);
881 WARN_ON_ONCE(subreq
->wb_bytes
> bytes_left
);
882 WARN_ON_ONCE(subreq
->wb_bytes
== 0);
884 bytes_left
-= subreq
->wb_bytes
;
885 offset
+= subreq
->wb_bytes
;
886 pgbase
+= subreq
->wb_bytes
;
889 subreq
= nfs_create_request(req
->wb_context
,
891 subreq
, pgbase
, bytes_left
);
894 nfs_lock_request(subreq
);
895 subreq
->wb_offset
= offset
;
896 subreq
->wb_index
= req
->wb_index
;
898 } while (bytes_left
> 0);
900 nfs_page_group_unlock(req
);
903 desc
->pg_error
= PTR_ERR(subreq
);
904 nfs_page_group_unlock(req
);
908 static int nfs_do_recoalesce(struct nfs_pageio_descriptor
*desc
)
913 list_splice_init(&desc
->pg_list
, &head
);
914 desc
->pg_bytes_written
-= desc
->pg_count
;
917 desc
->pg_recoalesce
= 0;
919 while (!list_empty(&head
)) {
920 struct nfs_page
*req
;
922 req
= list_first_entry(&head
, struct nfs_page
, wb_list
);
923 nfs_list_remove_request(req
);
924 if (__nfs_pageio_add_request(desc
, req
))
926 if (desc
->pg_error
< 0)
930 } while (desc
->pg_recoalesce
);
934 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
935 struct nfs_page
*req
)
940 ret
= __nfs_pageio_add_request(desc
, req
);
943 if (desc
->pg_error
< 0)
945 ret
= nfs_do_recoalesce(desc
);
951 * nfs_pageio_resend - Transfer requests to new descriptor and resend
952 * @hdr - the pgio header to move request from
953 * @desc - the pageio descriptor to add requests to
955 * Try to move each request (nfs_page) from @hdr to @desc then attempt
958 * Returns 0 on success and < 0 on error.
960 int nfs_pageio_resend(struct nfs_pageio_descriptor
*desc
,
961 struct nfs_pgio_header
*hdr
)
965 desc
->pg_dreq
= hdr
->dreq
;
966 while (!list_empty(&hdr
->pages
)) {
967 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
969 nfs_list_remove_request(req
);
970 if (!nfs_pageio_add_request(desc
, req
))
971 nfs_list_add_request(req
, &failed
);
973 nfs_pageio_complete(desc
);
974 if (!list_empty(&failed
)) {
975 list_move(&failed
, &hdr
->pages
);
980 EXPORT_SYMBOL_GPL(nfs_pageio_resend
);
983 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
984 * @desc: pointer to io descriptor
986 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
989 nfs_pageio_doio(desc
);
990 if (!desc
->pg_recoalesce
)
992 if (!nfs_do_recoalesce(desc
))
998 * nfs_pageio_cond_complete - Conditional I/O completion
999 * @desc: pointer to io descriptor
1000 * @index: page index
1002 * It is important to ensure that processes don't try to take locks
1003 * on non-contiguous ranges of pages as that might deadlock. This
1004 * function should be called before attempting to wait on a locked
1005 * nfs_page. It will complete the I/O if the page index 'index'
1006 * is not contiguous with the existing list of pages in 'desc'.
1008 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor
*desc
, pgoff_t index
)
1010 if (!list_empty(&desc
->pg_list
)) {
1011 struct nfs_page
*prev
= nfs_list_entry(desc
->pg_list
.prev
);
1012 if (index
!= prev
->wb_index
+ 1)
1013 nfs_pageio_complete(desc
);
1017 int __init
nfs_init_nfspagecache(void)
1019 nfs_page_cachep
= kmem_cache_create("nfs_page",
1020 sizeof(struct nfs_page
),
1021 0, SLAB_HWCACHE_ALIGN
,
1023 if (nfs_page_cachep
== NULL
)
1029 void nfs_destroy_nfspagecache(void)
1031 kmem_cache_destroy(nfs_page_cachep
);
1034 static const struct rpc_call_ops nfs_pgio_common_ops
= {
1035 .rpc_call_prepare
= nfs_pgio_prepare
,
1036 .rpc_call_done
= nfs_pgio_result
,
1037 .rpc_release
= nfs_pgio_release
,
1040 const struct nfs_pageio_ops nfs_pgio_rw_ops
= {
1041 .pg_test
= nfs_generic_pg_test
,
1042 .pg_doio
= nfs_generic_pg_pgios
,