Merge tag 'mmc-updates-for-3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / fs / nfs / pagelist.c
1 /*
2 * linux/fs/nfs/pagelist.c
3 *
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
7 *
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9 *
10 */
11
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
23
24 #include "internal.h"
25 #include "pnfs.h"
26
27 static struct kmem_cache *nfs_page_cachep;
28
29 bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
30 {
31 p->npages = pagecount;
32 if (pagecount <= ARRAY_SIZE(p->page_array))
33 p->pagevec = p->page_array;
34 else {
35 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
36 if (!p->pagevec)
37 p->npages = 0;
38 }
39 return p->pagevec != NULL;
40 }
41
42 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 struct nfs_pgio_header *hdr,
44 void (*release)(struct nfs_pgio_header *hdr))
45 {
46 hdr->req = nfs_list_entry(desc->pg_list.next);
47 hdr->inode = desc->pg_inode;
48 hdr->cred = hdr->req->wb_context->cred;
49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count;
51 hdr->dreq = desc->pg_dreq;
52 hdr->layout_private = desc->pg_layout_private;
53 hdr->release = release;
54 hdr->completion_ops = desc->pg_completion_ops;
55 if (hdr->completion_ops->init_hdr)
56 hdr->completion_ops->init_hdr(hdr);
57 }
58 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
59
60 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
61 {
62 spin_lock(&hdr->lock);
63 if (pos < hdr->io_start + hdr->good_bytes) {
64 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
65 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
66 hdr->good_bytes = pos - hdr->io_start;
67 hdr->error = error;
68 }
69 spin_unlock(&hdr->lock);
70 }
71
72 static inline struct nfs_page *
73 nfs_page_alloc(void)
74 {
75 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
76 if (p)
77 INIT_LIST_HEAD(&p->wb_list);
78 return p;
79 }
80
81 static inline void
82 nfs_page_free(struct nfs_page *p)
83 {
84 kmem_cache_free(nfs_page_cachep, p);
85 }
86
87 static void
88 nfs_iocounter_inc(struct nfs_io_counter *c)
89 {
90 atomic_inc(&c->io_count);
91 }
92
93 static void
94 nfs_iocounter_dec(struct nfs_io_counter *c)
95 {
96 if (atomic_dec_and_test(&c->io_count)) {
97 clear_bit(NFS_IO_INPROGRESS, &c->flags);
98 smp_mb__after_atomic();
99 wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
100 }
101 }
102
103 static int
104 __nfs_iocounter_wait(struct nfs_io_counter *c)
105 {
106 wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
107 DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
108 int ret = 0;
109
110 do {
111 prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
112 set_bit(NFS_IO_INPROGRESS, &c->flags);
113 if (atomic_read(&c->io_count) == 0)
114 break;
115 ret = nfs_wait_bit_killable(&c->flags);
116 } while (atomic_read(&c->io_count) != 0);
117 finish_wait(wq, &q.wait);
118 return ret;
119 }
120
121 /**
122 * nfs_iocounter_wait - wait for i/o to complete
123 * @c: nfs_io_counter to use
124 *
125 * returns -ERESTARTSYS if interrupted by a fatal signal.
126 * Otherwise returns 0 once the io_count hits 0.
127 */
128 int
129 nfs_iocounter_wait(struct nfs_io_counter *c)
130 {
131 if (atomic_read(&c->io_count) == 0)
132 return 0;
133 return __nfs_iocounter_wait(c);
134 }
135
136 /**
137 * nfs_create_request - Create an NFS read/write request.
138 * @ctx: open context to use
139 * @inode: inode to which the request is attached
140 * @page: page to write
141 * @offset: starting offset within the page for the write
142 * @count: number of bytes to read/write
143 *
144 * The page must be locked by the caller. This makes sure we never
145 * create two different requests for the same page.
146 * User should ensure it is safe to sleep in this function.
147 */
148 struct nfs_page *
149 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
150 struct page *page,
151 unsigned int offset, unsigned int count)
152 {
153 struct nfs_page *req;
154 struct nfs_lock_context *l_ctx;
155
156 if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
157 return ERR_PTR(-EBADF);
158 /* try to allocate the request struct */
159 req = nfs_page_alloc();
160 if (req == NULL)
161 return ERR_PTR(-ENOMEM);
162
163 /* get lock context early so we can deal with alloc failures */
164 l_ctx = nfs_get_lock_context(ctx);
165 if (IS_ERR(l_ctx)) {
166 nfs_page_free(req);
167 return ERR_CAST(l_ctx);
168 }
169 req->wb_lock_context = l_ctx;
170 nfs_iocounter_inc(&l_ctx->io_count);
171
172 /* Initialize the request struct. Initially, we assume a
173 * long write-back delay. This will be adjusted in
174 * update_nfs_request below if the region is not locked. */
175 req->wb_page = page;
176 req->wb_index = page_file_index(page);
177 page_cache_get(page);
178 req->wb_offset = offset;
179 req->wb_pgbase = offset;
180 req->wb_bytes = count;
181 req->wb_context = get_nfs_open_context(ctx);
182 kref_init(&req->wb_kref);
183 return req;
184 }
185
186 /**
187 * nfs_unlock_request - Unlock request and wake up sleepers.
188 * @req:
189 */
190 void nfs_unlock_request(struct nfs_page *req)
191 {
192 if (!NFS_WBACK_BUSY(req)) {
193 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
194 BUG();
195 }
196 smp_mb__before_atomic();
197 clear_bit(PG_BUSY, &req->wb_flags);
198 smp_mb__after_atomic();
199 wake_up_bit(&req->wb_flags, PG_BUSY);
200 }
201
202 /**
203 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
204 * @req:
205 */
206 void nfs_unlock_and_release_request(struct nfs_page *req)
207 {
208 nfs_unlock_request(req);
209 nfs_release_request(req);
210 }
211
212 /*
213 * nfs_clear_request - Free up all resources allocated to the request
214 * @req:
215 *
216 * Release page and open context resources associated with a read/write
217 * request after it has completed.
218 */
219 static void nfs_clear_request(struct nfs_page *req)
220 {
221 struct page *page = req->wb_page;
222 struct nfs_open_context *ctx = req->wb_context;
223 struct nfs_lock_context *l_ctx = req->wb_lock_context;
224
225 if (page != NULL) {
226 page_cache_release(page);
227 req->wb_page = NULL;
228 }
229 if (l_ctx != NULL) {
230 nfs_iocounter_dec(&l_ctx->io_count);
231 nfs_put_lock_context(l_ctx);
232 req->wb_lock_context = NULL;
233 }
234 if (ctx != NULL) {
235 put_nfs_open_context(ctx);
236 req->wb_context = NULL;
237 }
238 }
239
240
241 /**
242 * nfs_release_request - Release the count on an NFS read/write request
243 * @req: request to release
244 *
245 * Note: Should never be called with the spinlock held!
246 */
247 static void nfs_free_request(struct kref *kref)
248 {
249 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
250
251 /* Release struct file and open context */
252 nfs_clear_request(req);
253 nfs_page_free(req);
254 }
255
256 void nfs_release_request(struct nfs_page *req)
257 {
258 kref_put(&req->wb_kref, nfs_free_request);
259 }
260
261 static int nfs_wait_bit_uninterruptible(void *word)
262 {
263 io_schedule();
264 return 0;
265 }
266
267 /**
268 * nfs_wait_on_request - Wait for a request to complete.
269 * @req: request to wait upon.
270 *
271 * Interruptible by fatal signals only.
272 * The user is responsible for holding a count on the request.
273 */
274 int
275 nfs_wait_on_request(struct nfs_page *req)
276 {
277 return wait_on_bit(&req->wb_flags, PG_BUSY,
278 nfs_wait_bit_uninterruptible,
279 TASK_UNINTERRUPTIBLE);
280 }
281
282 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
283 {
284 /*
285 * FIXME: ideally we should be able to coalesce all requests
286 * that are not block boundary aligned, but currently this
287 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
288 * since nfs_flush_multi and nfs_pagein_multi assume you
289 * can have only one struct nfs_page.
290 */
291 if (desc->pg_bsize < PAGE_SIZE)
292 return 0;
293
294 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
295 }
296 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
297
298 /**
299 * nfs_pageio_init - initialise a page io descriptor
300 * @desc: pointer to descriptor
301 * @inode: pointer to inode
302 * @doio: pointer to io function
303 * @bsize: io block size
304 * @io_flags: extra parameters for the io function
305 */
306 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
307 struct inode *inode,
308 const struct nfs_pageio_ops *pg_ops,
309 const struct nfs_pgio_completion_ops *compl_ops,
310 size_t bsize,
311 int io_flags)
312 {
313 INIT_LIST_HEAD(&desc->pg_list);
314 desc->pg_bytes_written = 0;
315 desc->pg_count = 0;
316 desc->pg_bsize = bsize;
317 desc->pg_base = 0;
318 desc->pg_moreio = 0;
319 desc->pg_recoalesce = 0;
320 desc->pg_inode = inode;
321 desc->pg_ops = pg_ops;
322 desc->pg_completion_ops = compl_ops;
323 desc->pg_ioflags = io_flags;
324 desc->pg_error = 0;
325 desc->pg_lseg = NULL;
326 desc->pg_dreq = NULL;
327 desc->pg_layout_private = NULL;
328 }
329 EXPORT_SYMBOL_GPL(nfs_pageio_init);
330
331 static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
332 const struct nfs_open_context *ctx2)
333 {
334 return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
335 }
336
337 static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
338 const struct nfs_lock_context *l2)
339 {
340 return l1->lockowner.l_owner == l2->lockowner.l_owner
341 && l1->lockowner.l_pid == l2->lockowner.l_pid;
342 }
343
344 /**
345 * nfs_can_coalesce_requests - test two requests for compatibility
346 * @prev: pointer to nfs_page
347 * @req: pointer to nfs_page
348 *
349 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
350 * page data area they describe is contiguous, and that their RPC
351 * credentials, NFSv4 open state, and lockowners are the same.
352 *
353 * Return 'true' if this is the case, else return 'false'.
354 */
355 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
356 struct nfs_page *req,
357 struct nfs_pageio_descriptor *pgio)
358 {
359 if (!nfs_match_open_context(req->wb_context, prev->wb_context))
360 return false;
361 if (req->wb_context->dentry->d_inode->i_flock != NULL &&
362 !nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context))
363 return false;
364 if (req->wb_pgbase != 0)
365 return false;
366 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
367 return false;
368 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
369 return false;
370 return pgio->pg_ops->pg_test(pgio, prev, req);
371 }
372
373 /**
374 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
375 * @desc: destination io descriptor
376 * @req: request
377 *
378 * Returns true if the request 'req' was successfully coalesced into the
379 * existing list of pages 'desc'.
380 */
381 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
382 struct nfs_page *req)
383 {
384 if (desc->pg_count != 0) {
385 struct nfs_page *prev;
386
387 prev = nfs_list_entry(desc->pg_list.prev);
388 if (!nfs_can_coalesce_requests(prev, req, desc))
389 return 0;
390 } else {
391 if (desc->pg_ops->pg_init)
392 desc->pg_ops->pg_init(desc, req);
393 desc->pg_base = req->wb_pgbase;
394 }
395 nfs_list_remove_request(req);
396 nfs_list_add_request(req, &desc->pg_list);
397 desc->pg_count += req->wb_bytes;
398 return 1;
399 }
400
401 /*
402 * Helper for nfs_pageio_add_request and nfs_pageio_complete
403 */
404 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
405 {
406 if (!list_empty(&desc->pg_list)) {
407 int error = desc->pg_ops->pg_doio(desc);
408 if (error < 0)
409 desc->pg_error = error;
410 else
411 desc->pg_bytes_written += desc->pg_count;
412 }
413 if (list_empty(&desc->pg_list)) {
414 desc->pg_count = 0;
415 desc->pg_base = 0;
416 }
417 }
418
419 /**
420 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
421 * @desc: destination io descriptor
422 * @req: request
423 *
424 * Returns true if the request 'req' was successfully coalesced into the
425 * existing list of pages 'desc'.
426 */
427 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
428 struct nfs_page *req)
429 {
430 while (!nfs_pageio_do_add_request(desc, req)) {
431 desc->pg_moreio = 1;
432 nfs_pageio_doio(desc);
433 if (desc->pg_error < 0)
434 return 0;
435 desc->pg_moreio = 0;
436 if (desc->pg_recoalesce)
437 return 0;
438 }
439 return 1;
440 }
441
442 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
443 {
444 LIST_HEAD(head);
445
446 do {
447 list_splice_init(&desc->pg_list, &head);
448 desc->pg_bytes_written -= desc->pg_count;
449 desc->pg_count = 0;
450 desc->pg_base = 0;
451 desc->pg_recoalesce = 0;
452
453 while (!list_empty(&head)) {
454 struct nfs_page *req;
455
456 req = list_first_entry(&head, struct nfs_page, wb_list);
457 nfs_list_remove_request(req);
458 if (__nfs_pageio_add_request(desc, req))
459 continue;
460 if (desc->pg_error < 0)
461 return 0;
462 break;
463 }
464 } while (desc->pg_recoalesce);
465 return 1;
466 }
467
468 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
469 struct nfs_page *req)
470 {
471 int ret;
472
473 do {
474 ret = __nfs_pageio_add_request(desc, req);
475 if (ret)
476 break;
477 if (desc->pg_error < 0)
478 break;
479 ret = nfs_do_recoalesce(desc);
480 } while (ret);
481 return ret;
482 }
483 EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
484
485 /**
486 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
487 * @desc: pointer to io descriptor
488 */
489 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
490 {
491 for (;;) {
492 nfs_pageio_doio(desc);
493 if (!desc->pg_recoalesce)
494 break;
495 if (!nfs_do_recoalesce(desc))
496 break;
497 }
498 }
499 EXPORT_SYMBOL_GPL(nfs_pageio_complete);
500
501 /**
502 * nfs_pageio_cond_complete - Conditional I/O completion
503 * @desc: pointer to io descriptor
504 * @index: page index
505 *
506 * It is important to ensure that processes don't try to take locks
507 * on non-contiguous ranges of pages as that might deadlock. This
508 * function should be called before attempting to wait on a locked
509 * nfs_page. It will complete the I/O if the page index 'index'
510 * is not contiguous with the existing list of pages in 'desc'.
511 */
512 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
513 {
514 if (!list_empty(&desc->pg_list)) {
515 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
516 if (index != prev->wb_index + 1)
517 nfs_pageio_complete(desc);
518 }
519 }
520
521 int __init nfs_init_nfspagecache(void)
522 {
523 nfs_page_cachep = kmem_cache_create("nfs_page",
524 sizeof(struct nfs_page),
525 0, SLAB_HWCACHE_ALIGN,
526 NULL);
527 if (nfs_page_cachep == NULL)
528 return -ENOMEM;
529
530 return 0;
531 }
532
533 void nfs_destroy_nfspagecache(void)
534 {
535 kmem_cache_destroy(nfs_page_cachep);
536 }
537
This page took 0.041604 seconds and 6 git commands to generate.