Merge branch 'for-next' of git://neil.brown.name/md
[deliverable/linux.git] / fs / nfs / pagelist.c
1 /*
2 * linux/fs/nfs/pagelist.c
3 *
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
7 *
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9 *
10 */
11
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
23
24 #include "internal.h"
25 #include "pnfs.h"
26
27 static struct kmem_cache *nfs_page_cachep;
28
29 bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
30 {
31 p->npages = pagecount;
32 if (pagecount <= ARRAY_SIZE(p->page_array))
33 p->pagevec = p->page_array;
34 else {
35 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
36 if (!p->pagevec)
37 p->npages = 0;
38 }
39 return p->pagevec != NULL;
40 }
41
42 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 struct nfs_pgio_header *hdr,
44 void (*release)(struct nfs_pgio_header *hdr))
45 {
46 hdr->req = nfs_list_entry(desc->pg_list.next);
47 hdr->inode = desc->pg_inode;
48 hdr->cred = hdr->req->wb_context->cred;
49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count;
51 hdr->dreq = desc->pg_dreq;
52 hdr->release = release;
53 hdr->completion_ops = desc->pg_completion_ops;
54 if (hdr->completion_ops->init_hdr)
55 hdr->completion_ops->init_hdr(hdr);
56 }
57 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
58
59 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
60 {
61 spin_lock(&hdr->lock);
62 if (pos < hdr->io_start + hdr->good_bytes) {
63 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
64 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
65 hdr->good_bytes = pos - hdr->io_start;
66 hdr->error = error;
67 }
68 spin_unlock(&hdr->lock);
69 }
70
71 static inline struct nfs_page *
72 nfs_page_alloc(void)
73 {
74 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
75 if (p)
76 INIT_LIST_HEAD(&p->wb_list);
77 return p;
78 }
79
80 static inline void
81 nfs_page_free(struct nfs_page *p)
82 {
83 kmem_cache_free(nfs_page_cachep, p);
84 }
85
86 /**
87 * nfs_create_request - Create an NFS read/write request.
88 * @ctx: open context to use
89 * @inode: inode to which the request is attached
90 * @page: page to write
91 * @offset: starting offset within the page for the write
92 * @count: number of bytes to read/write
93 *
94 * The page must be locked by the caller. This makes sure we never
95 * create two different requests for the same page.
96 * User should ensure it is safe to sleep in this function.
97 */
98 struct nfs_page *
99 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
100 struct page *page,
101 unsigned int offset, unsigned int count)
102 {
103 struct nfs_page *req;
104
105 /* try to allocate the request struct */
106 req = nfs_page_alloc();
107 if (req == NULL)
108 return ERR_PTR(-ENOMEM);
109
110 /* get lock context early so we can deal with alloc failures */
111 req->wb_lock_context = nfs_get_lock_context(ctx);
112 if (req->wb_lock_context == NULL) {
113 nfs_page_free(req);
114 return ERR_PTR(-ENOMEM);
115 }
116
117 /* Initialize the request struct. Initially, we assume a
118 * long write-back delay. This will be adjusted in
119 * update_nfs_request below if the region is not locked. */
120 req->wb_page = page;
121 req->wb_index = page_file_index(page);
122 page_cache_get(page);
123 req->wb_offset = offset;
124 req->wb_pgbase = offset;
125 req->wb_bytes = count;
126 req->wb_context = get_nfs_open_context(ctx);
127 kref_init(&req->wb_kref);
128 return req;
129 }
130
131 /**
132 * nfs_unlock_request - Unlock request and wake up sleepers.
133 * @req:
134 */
135 void nfs_unlock_request(struct nfs_page *req)
136 {
137 if (!NFS_WBACK_BUSY(req)) {
138 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
139 BUG();
140 }
141 smp_mb__before_clear_bit();
142 clear_bit(PG_BUSY, &req->wb_flags);
143 smp_mb__after_clear_bit();
144 wake_up_bit(&req->wb_flags, PG_BUSY);
145 }
146
147 /**
148 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
149 * @req:
150 */
151 void nfs_unlock_and_release_request(struct nfs_page *req)
152 {
153 nfs_unlock_request(req);
154 nfs_release_request(req);
155 }
156
157 /*
158 * nfs_clear_request - Free up all resources allocated to the request
159 * @req:
160 *
161 * Release page and open context resources associated with a read/write
162 * request after it has completed.
163 */
164 static void nfs_clear_request(struct nfs_page *req)
165 {
166 struct page *page = req->wb_page;
167 struct nfs_open_context *ctx = req->wb_context;
168 struct nfs_lock_context *l_ctx = req->wb_lock_context;
169
170 if (page != NULL) {
171 page_cache_release(page);
172 req->wb_page = NULL;
173 }
174 if (l_ctx != NULL) {
175 nfs_put_lock_context(l_ctx);
176 req->wb_lock_context = NULL;
177 }
178 if (ctx != NULL) {
179 put_nfs_open_context(ctx);
180 req->wb_context = NULL;
181 }
182 }
183
184
185 /**
186 * nfs_release_request - Release the count on an NFS read/write request
187 * @req: request to release
188 *
189 * Note: Should never be called with the spinlock held!
190 */
191 static void nfs_free_request(struct kref *kref)
192 {
193 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
194
195 /* Release struct file and open context */
196 nfs_clear_request(req);
197 nfs_page_free(req);
198 }
199
200 void nfs_release_request(struct nfs_page *req)
201 {
202 kref_put(&req->wb_kref, nfs_free_request);
203 }
204
205 static int nfs_wait_bit_uninterruptible(void *word)
206 {
207 io_schedule();
208 return 0;
209 }
210
211 /**
212 * nfs_wait_on_request - Wait for a request to complete.
213 * @req: request to wait upon.
214 *
215 * Interruptible by fatal signals only.
216 * The user is responsible for holding a count on the request.
217 */
218 int
219 nfs_wait_on_request(struct nfs_page *req)
220 {
221 return wait_on_bit(&req->wb_flags, PG_BUSY,
222 nfs_wait_bit_uninterruptible,
223 TASK_UNINTERRUPTIBLE);
224 }
225
226 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
227 {
228 /*
229 * FIXME: ideally we should be able to coalesce all requests
230 * that are not block boundary aligned, but currently this
231 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
232 * since nfs_flush_multi and nfs_pagein_multi assume you
233 * can have only one struct nfs_page.
234 */
235 if (desc->pg_bsize < PAGE_SIZE)
236 return 0;
237
238 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
239 }
240 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
241
242 /**
243 * nfs_pageio_init - initialise a page io descriptor
244 * @desc: pointer to descriptor
245 * @inode: pointer to inode
246 * @doio: pointer to io function
247 * @bsize: io block size
248 * @io_flags: extra parameters for the io function
249 */
250 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
251 struct inode *inode,
252 const struct nfs_pageio_ops *pg_ops,
253 const struct nfs_pgio_completion_ops *compl_ops,
254 size_t bsize,
255 int io_flags)
256 {
257 INIT_LIST_HEAD(&desc->pg_list);
258 desc->pg_bytes_written = 0;
259 desc->pg_count = 0;
260 desc->pg_bsize = bsize;
261 desc->pg_base = 0;
262 desc->pg_moreio = 0;
263 desc->pg_recoalesce = 0;
264 desc->pg_inode = inode;
265 desc->pg_ops = pg_ops;
266 desc->pg_completion_ops = compl_ops;
267 desc->pg_ioflags = io_flags;
268 desc->pg_error = 0;
269 desc->pg_lseg = NULL;
270 desc->pg_dreq = NULL;
271 }
272 EXPORT_SYMBOL_GPL(nfs_pageio_init);
273
274 /**
275 * nfs_can_coalesce_requests - test two requests for compatibility
276 * @prev: pointer to nfs_page
277 * @req: pointer to nfs_page
278 *
279 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
280 * page data area they describe is contiguous, and that their RPC
281 * credentials, NFSv4 open state, and lockowners are the same.
282 *
283 * Return 'true' if this is the case, else return 'false'.
284 */
285 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
286 struct nfs_page *req,
287 struct nfs_pageio_descriptor *pgio)
288 {
289 if (req->wb_context->cred != prev->wb_context->cred)
290 return false;
291 if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
292 return false;
293 if (req->wb_context->state != prev->wb_context->state)
294 return false;
295 if (req->wb_pgbase != 0)
296 return false;
297 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
298 return false;
299 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
300 return false;
301 return pgio->pg_ops->pg_test(pgio, prev, req);
302 }
303
304 /**
305 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
306 * @desc: destination io descriptor
307 * @req: request
308 *
309 * Returns true if the request 'req' was successfully coalesced into the
310 * existing list of pages 'desc'.
311 */
312 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
313 struct nfs_page *req)
314 {
315 if (desc->pg_count != 0) {
316 struct nfs_page *prev;
317
318 prev = nfs_list_entry(desc->pg_list.prev);
319 if (!nfs_can_coalesce_requests(prev, req, desc))
320 return 0;
321 } else {
322 if (desc->pg_ops->pg_init)
323 desc->pg_ops->pg_init(desc, req);
324 desc->pg_base = req->wb_pgbase;
325 }
326 nfs_list_remove_request(req);
327 nfs_list_add_request(req, &desc->pg_list);
328 desc->pg_count += req->wb_bytes;
329 return 1;
330 }
331
332 /*
333 * Helper for nfs_pageio_add_request and nfs_pageio_complete
334 */
335 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
336 {
337 if (!list_empty(&desc->pg_list)) {
338 int error = desc->pg_ops->pg_doio(desc);
339 if (error < 0)
340 desc->pg_error = error;
341 else
342 desc->pg_bytes_written += desc->pg_count;
343 }
344 if (list_empty(&desc->pg_list)) {
345 desc->pg_count = 0;
346 desc->pg_base = 0;
347 }
348 }
349
350 /**
351 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
352 * @desc: destination io descriptor
353 * @req: request
354 *
355 * Returns true if the request 'req' was successfully coalesced into the
356 * existing list of pages 'desc'.
357 */
358 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
359 struct nfs_page *req)
360 {
361 while (!nfs_pageio_do_add_request(desc, req)) {
362 desc->pg_moreio = 1;
363 nfs_pageio_doio(desc);
364 if (desc->pg_error < 0)
365 return 0;
366 desc->pg_moreio = 0;
367 if (desc->pg_recoalesce)
368 return 0;
369 }
370 return 1;
371 }
372
373 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
374 {
375 LIST_HEAD(head);
376
377 do {
378 list_splice_init(&desc->pg_list, &head);
379 desc->pg_bytes_written -= desc->pg_count;
380 desc->pg_count = 0;
381 desc->pg_base = 0;
382 desc->pg_recoalesce = 0;
383
384 while (!list_empty(&head)) {
385 struct nfs_page *req;
386
387 req = list_first_entry(&head, struct nfs_page, wb_list);
388 nfs_list_remove_request(req);
389 if (__nfs_pageio_add_request(desc, req))
390 continue;
391 if (desc->pg_error < 0)
392 return 0;
393 break;
394 }
395 } while (desc->pg_recoalesce);
396 return 1;
397 }
398
399 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
400 struct nfs_page *req)
401 {
402 int ret;
403
404 do {
405 ret = __nfs_pageio_add_request(desc, req);
406 if (ret)
407 break;
408 if (desc->pg_error < 0)
409 break;
410 ret = nfs_do_recoalesce(desc);
411 } while (ret);
412 return ret;
413 }
414 EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
415
416 /**
417 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
418 * @desc: pointer to io descriptor
419 */
420 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
421 {
422 for (;;) {
423 nfs_pageio_doio(desc);
424 if (!desc->pg_recoalesce)
425 break;
426 if (!nfs_do_recoalesce(desc))
427 break;
428 }
429 }
430 EXPORT_SYMBOL_GPL(nfs_pageio_complete);
431
432 /**
433 * nfs_pageio_cond_complete - Conditional I/O completion
434 * @desc: pointer to io descriptor
435 * @index: page index
436 *
437 * It is important to ensure that processes don't try to take locks
438 * on non-contiguous ranges of pages as that might deadlock. This
439 * function should be called before attempting to wait on a locked
440 * nfs_page. It will complete the I/O if the page index 'index'
441 * is not contiguous with the existing list of pages in 'desc'.
442 */
443 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
444 {
445 if (!list_empty(&desc->pg_list)) {
446 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
447 if (index != prev->wb_index + 1)
448 nfs_pageio_complete(desc);
449 }
450 }
451
452 int __init nfs_init_nfspagecache(void)
453 {
454 nfs_page_cachep = kmem_cache_create("nfs_page",
455 sizeof(struct nfs_page),
456 0, SLAB_HWCACHE_ALIGN,
457 NULL);
458 if (nfs_page_cachep == NULL)
459 return -ENOMEM;
460
461 return 0;
462 }
463
464 void nfs_destroy_nfspagecache(void)
465 {
466 kmem_cache_destroy(nfs_page_cachep);
467 }
468
This page took 0.054054 seconds and 5 git commands to generate.