Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/pagelist.c | |
3 | * | |
4 | * A set of helper functions for managing NFS read and write requests. | |
5 | * The main purpose of these routines is to provide support for the | |
6 | * coalescing of several requests into a single RPC call. | |
7 | * | |
8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> | |
9 | * | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
1da177e4 | 15 | #include <linux/sunrpc/clnt.h> |
1313e603 | 16 | #include <linux/nfs.h> |
1da177e4 LT |
17 | #include <linux/nfs3.h> |
18 | #include <linux/nfs4.h> | |
19 | #include <linux/nfs_page.h> | |
20 | #include <linux/nfs_fs.h> | |
21 | #include <linux/nfs_mount.h> | |
afeacc8c | 22 | #include <linux/export.h> |
1da177e4 | 23 | |
8d5658c9 | 24 | #include "internal.h" |
bae724ef | 25 | #include "pnfs.h" |
8d5658c9 | 26 | |
e18b890b | 27 | static struct kmem_cache *nfs_page_cachep; |
1da177e4 | 28 | |
30dd374f FI |
29 | bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) |
30 | { | |
31 | p->npages = pagecount; | |
32 | if (pagecount <= ARRAY_SIZE(p->page_array)) | |
33 | p->pagevec = p->page_array; | |
34 | else { | |
35 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | |
36 | if (!p->pagevec) | |
37 | p->npages = 0; | |
38 | } | |
39 | return p->pagevec != NULL; | |
40 | } | |
41 | ||
4db6e0b7 FI |
42 | void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, |
43 | struct nfs_pgio_header *hdr, | |
44 | void (*release)(struct nfs_pgio_header *hdr)) | |
45 | { | |
46 | hdr->req = nfs_list_entry(desc->pg_list.next); | |
47 | hdr->inode = desc->pg_inode; | |
48 | hdr->cred = hdr->req->wb_context->cred; | |
49 | hdr->io_start = req_offset(hdr->req); | |
50 | hdr->good_bytes = desc->pg_count; | |
51 | hdr->release = release; | |
52 | } | |
53 | ||
54 | void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) | |
55 | { | |
56 | spin_lock(&hdr->lock); | |
57 | if (pos < hdr->io_start + hdr->good_bytes) { | |
58 | set_bit(NFS_IOHDR_ERROR, &hdr->flags); | |
59 | clear_bit(NFS_IOHDR_EOF, &hdr->flags); | |
60 | hdr->good_bytes = pos - hdr->io_start; | |
61 | hdr->error = error; | |
62 | } | |
63 | spin_unlock(&hdr->lock); | |
64 | } | |
65 | ||
1da177e4 LT |
66 | static inline struct nfs_page * |
67 | nfs_page_alloc(void) | |
68 | { | |
72895b1a JJ |
69 | struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); |
70 | if (p) | |
1da177e4 | 71 | INIT_LIST_HEAD(&p->wb_list); |
1da177e4 LT |
72 | return p; |
73 | } | |
74 | ||
75 | static inline void | |
76 | nfs_page_free(struct nfs_page *p) | |
77 | { | |
78 | kmem_cache_free(nfs_page_cachep, p); | |
79 | } | |
80 | ||
81 | /** | |
82 | * nfs_create_request - Create an NFS read/write request. | |
c02f557d | 83 | * @ctx: open context to use |
1da177e4 LT |
84 | * @inode: inode to which the request is attached |
85 | * @page: page to write | |
86 | * @offset: starting offset within the page for the write | |
87 | * @count: number of bytes to read/write | |
88 | * | |
89 | * The page must be locked by the caller. This makes sure we never | |
a19b89ca | 90 | * create two different requests for the same page. |
1da177e4 LT |
91 | * User should ensure it is safe to sleep in this function. |
92 | */ | |
93 | struct nfs_page * | |
94 | nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |
95 | struct page *page, | |
96 | unsigned int offset, unsigned int count) | |
97 | { | |
1da177e4 LT |
98 | struct nfs_page *req; |
99 | ||
18eb8842 TM |
100 | /* try to allocate the request struct */ |
101 | req = nfs_page_alloc(); | |
102 | if (req == NULL) | |
103 | return ERR_PTR(-ENOMEM); | |
1da177e4 | 104 | |
015f0212 JL |
105 | /* get lock context early so we can deal with alloc failures */ |
106 | req->wb_lock_context = nfs_get_lock_context(ctx); | |
107 | if (req->wb_lock_context == NULL) { | |
108 | nfs_page_free(req); | |
109 | return ERR_PTR(-ENOMEM); | |
110 | } | |
111 | ||
1da177e4 LT |
112 | /* Initialize the request struct. Initially, we assume a |
113 | * long write-back delay. This will be adjusted in | |
114 | * update_nfs_request below if the region is not locked. */ | |
115 | req->wb_page = page; | |
116 | atomic_set(&req->wb_complete, 0); | |
117 | req->wb_index = page->index; | |
118 | page_cache_get(page); | |
cd52ed35 TM |
119 | BUG_ON(PagePrivate(page)); |
120 | BUG_ON(!PageLocked(page)); | |
121 | BUG_ON(page->mapping->host != inode); | |
1da177e4 LT |
122 | req->wb_offset = offset; |
123 | req->wb_pgbase = offset; | |
124 | req->wb_bytes = count; | |
1da177e4 | 125 | req->wb_context = get_nfs_open_context(ctx); |
c03b4024 | 126 | kref_init(&req->wb_kref); |
1da177e4 LT |
127 | return req; |
128 | } | |
129 | ||
130 | /** | |
131 | * nfs_unlock_request - Unlock request and wake up sleepers. | |
132 | * @req: | |
133 | */ | |
134 | void nfs_unlock_request(struct nfs_page *req) | |
135 | { | |
136 | if (!NFS_WBACK_BUSY(req)) { | |
137 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); | |
138 | BUG(); | |
139 | } | |
140 | smp_mb__before_clear_bit(); | |
141 | clear_bit(PG_BUSY, &req->wb_flags); | |
142 | smp_mb__after_clear_bit(); | |
464a98bd | 143 | wake_up_bit(&req->wb_flags, PG_BUSY); |
1da177e4 LT |
144 | nfs_release_request(req); |
145 | } | |
146 | ||
4d65c520 | 147 | /* |
1da177e4 LT |
148 | * nfs_clear_request - Free up all resources allocated to the request |
149 | * @req: | |
150 | * | |
bb6fbc45 TM |
151 | * Release page and open context resources associated with a read/write |
152 | * request after it has completed. | |
1da177e4 | 153 | */ |
4d65c520 | 154 | static void nfs_clear_request(struct nfs_page *req) |
1da177e4 | 155 | { |
cd52ed35 | 156 | struct page *page = req->wb_page; |
bb6fbc45 | 157 | struct nfs_open_context *ctx = req->wb_context; |
f11ac8db | 158 | struct nfs_lock_context *l_ctx = req->wb_lock_context; |
bb6fbc45 | 159 | |
cd52ed35 | 160 | if (page != NULL) { |
cd52ed35 | 161 | page_cache_release(page); |
1da177e4 LT |
162 | req->wb_page = NULL; |
163 | } | |
f11ac8db TM |
164 | if (l_ctx != NULL) { |
165 | nfs_put_lock_context(l_ctx); | |
166 | req->wb_lock_context = NULL; | |
167 | } | |
bb6fbc45 TM |
168 | if (ctx != NULL) { |
169 | put_nfs_open_context(ctx); | |
170 | req->wb_context = NULL; | |
171 | } | |
1da177e4 LT |
172 | } |
173 | ||
174 | ||
175 | /** | |
176 | * nfs_release_request - Release the count on an NFS read/write request | |
177 | * @req: request to release | |
178 | * | |
179 | * Note: Should never be called with the spinlock held! | |
180 | */ | |
c03b4024 | 181 | static void nfs_free_request(struct kref *kref) |
1da177e4 | 182 | { |
c03b4024 | 183 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
1da177e4 | 184 | |
bb6fbc45 | 185 | /* Release struct file and open context */ |
1da177e4 | 186 | nfs_clear_request(req); |
1da177e4 LT |
187 | nfs_page_free(req); |
188 | } | |
189 | ||
c03b4024 TM |
190 | void nfs_release_request(struct nfs_page *req) |
191 | { | |
192 | kref_put(&req->wb_kref, nfs_free_request); | |
193 | } | |
194 | ||
9f557cd8 TM |
195 | static int nfs_wait_bit_uninterruptible(void *word) |
196 | { | |
197 | io_schedule(); | |
198 | return 0; | |
199 | } | |
200 | ||
1da177e4 LT |
201 | /** |
202 | * nfs_wait_on_request - Wait for a request to complete. | |
203 | * @req: request to wait upon. | |
204 | * | |
150030b7 | 205 | * Interruptible by fatal signals only. |
1da177e4 LT |
206 | * The user is responsible for holding a count on the request. |
207 | */ | |
208 | int | |
209 | nfs_wait_on_request(struct nfs_page *req) | |
210 | { | |
9f557cd8 TM |
211 | return wait_on_bit(&req->wb_flags, PG_BUSY, |
212 | nfs_wait_bit_uninterruptible, | |
213 | TASK_UNINTERRUPTIBLE); | |
1da177e4 LT |
214 | } |
215 | ||
19345cb2 | 216 | bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) |
5b36c7dc BH |
217 | { |
218 | /* | |
219 | * FIXME: ideally we should be able to coalesce all requests | |
220 | * that are not block boundary aligned, but currently this | |
221 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | |
222 | * since nfs_flush_multi and nfs_pagein_multi assume you | |
223 | * can have only one struct nfs_page. | |
224 | */ | |
225 | if (desc->pg_bsize < PAGE_SIZE) | |
226 | return 0; | |
227 | ||
228 | return desc->pg_count + req->wb_bytes <= desc->pg_bsize; | |
229 | } | |
19345cb2 | 230 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); |
5b36c7dc | 231 | |
1da177e4 | 232 | /** |
d8a5ad75 TM |
233 | * nfs_pageio_init - initialise a page io descriptor |
234 | * @desc: pointer to descriptor | |
bcb71bba TM |
235 | * @inode: pointer to inode |
236 | * @doio: pointer to io function | |
237 | * @bsize: io block size | |
238 | * @io_flags: extra parameters for the io function | |
d8a5ad75 | 239 | */ |
bcb71bba TM |
240 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
241 | struct inode *inode, | |
1751c363 | 242 | const struct nfs_pageio_ops *pg_ops, |
84dde76c | 243 | size_t bsize, |
bcb71bba | 244 | int io_flags) |
d8a5ad75 TM |
245 | { |
246 | INIT_LIST_HEAD(&desc->pg_list); | |
bcb71bba | 247 | desc->pg_bytes_written = 0; |
d8a5ad75 TM |
248 | desc->pg_count = 0; |
249 | desc->pg_bsize = bsize; | |
250 | desc->pg_base = 0; | |
b31268ac | 251 | desc->pg_moreio = 0; |
d9156f9f | 252 | desc->pg_recoalesce = 0; |
bcb71bba | 253 | desc->pg_inode = inode; |
1751c363 | 254 | desc->pg_ops = pg_ops; |
bcb71bba TM |
255 | desc->pg_ioflags = io_flags; |
256 | desc->pg_error = 0; | |
94ad1c80 | 257 | desc->pg_lseg = NULL; |
d8a5ad75 TM |
258 | } |
259 | ||
260 | /** | |
261 | * nfs_can_coalesce_requests - test two requests for compatibility | |
262 | * @prev: pointer to nfs_page | |
263 | * @req: pointer to nfs_page | |
264 | * | |
265 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | |
266 | * page data area they describe is contiguous, and that their RPC | |
267 | * credentials, NFSv4 open state, and lockowners are the same. | |
268 | * | |
269 | * Return 'true' if this is the case, else return 'false'. | |
270 | */ | |
18ad0a9f BH |
271 | static bool nfs_can_coalesce_requests(struct nfs_page *prev, |
272 | struct nfs_page *req, | |
273 | struct nfs_pageio_descriptor *pgio) | |
d8a5ad75 TM |
274 | { |
275 | if (req->wb_context->cred != prev->wb_context->cred) | |
18ad0a9f | 276 | return false; |
f11ac8db | 277 | if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner) |
18ad0a9f | 278 | return false; |
d8a5ad75 | 279 | if (req->wb_context->state != prev->wb_context->state) |
18ad0a9f | 280 | return false; |
d8a5ad75 | 281 | if (req->wb_index != (prev->wb_index + 1)) |
18ad0a9f | 282 | return false; |
d8a5ad75 | 283 | if (req->wb_pgbase != 0) |
18ad0a9f | 284 | return false; |
d8a5ad75 | 285 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) |
18ad0a9f | 286 | return false; |
1751c363 | 287 | return pgio->pg_ops->pg_test(pgio, prev, req); |
d8a5ad75 TM |
288 | } |
289 | ||
290 | /** | |
bcb71bba | 291 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
d8a5ad75 TM |
292 | * @desc: destination io descriptor |
293 | * @req: request | |
294 | * | |
295 | * Returns true if the request 'req' was successfully coalesced into the | |
296 | * existing list of pages 'desc'. | |
297 | */ | |
bcb71bba TM |
298 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
299 | struct nfs_page *req) | |
d8a5ad75 | 300 | { |
d8a5ad75 TM |
301 | if (desc->pg_count != 0) { |
302 | struct nfs_page *prev; | |
303 | ||
d8a5ad75 | 304 | prev = nfs_list_entry(desc->pg_list.prev); |
94ad1c80 | 305 | if (!nfs_can_coalesce_requests(prev, req, desc)) |
d8a5ad75 | 306 | return 0; |
5b36c7dc | 307 | } else { |
d8007d4d TM |
308 | if (desc->pg_ops->pg_init) |
309 | desc->pg_ops->pg_init(desc, req); | |
d8a5ad75 | 310 | desc->pg_base = req->wb_pgbase; |
5b36c7dc | 311 | } |
d8a5ad75 TM |
312 | nfs_list_remove_request(req); |
313 | nfs_list_add_request(req, &desc->pg_list); | |
5b36c7dc | 314 | desc->pg_count += req->wb_bytes; |
d8a5ad75 TM |
315 | return 1; |
316 | } | |
317 | ||
bcb71bba TM |
318 | /* |
319 | * Helper for nfs_pageio_add_request and nfs_pageio_complete | |
320 | */ | |
321 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) | |
322 | { | |
323 | if (!list_empty(&desc->pg_list)) { | |
1751c363 | 324 | int error = desc->pg_ops->pg_doio(desc); |
bcb71bba TM |
325 | if (error < 0) |
326 | desc->pg_error = error; | |
327 | else | |
328 | desc->pg_bytes_written += desc->pg_count; | |
329 | } | |
330 | if (list_empty(&desc->pg_list)) { | |
331 | desc->pg_count = 0; | |
332 | desc->pg_base = 0; | |
333 | } | |
334 | } | |
335 | ||
336 | /** | |
337 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | |
338 | * @desc: destination io descriptor | |
339 | * @req: request | |
340 | * | |
341 | * Returns true if the request 'req' was successfully coalesced into the | |
342 | * existing list of pages 'desc'. | |
343 | */ | |
d9156f9f | 344 | static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
8b09bee3 | 345 | struct nfs_page *req) |
bcb71bba TM |
346 | { |
347 | while (!nfs_pageio_do_add_request(desc, req)) { | |
b31268ac | 348 | desc->pg_moreio = 1; |
bcb71bba TM |
349 | nfs_pageio_doio(desc); |
350 | if (desc->pg_error < 0) | |
351 | return 0; | |
b31268ac | 352 | desc->pg_moreio = 0; |
d9156f9f TM |
353 | if (desc->pg_recoalesce) |
354 | return 0; | |
bcb71bba TM |
355 | } |
356 | return 1; | |
357 | } | |
358 | ||
d9156f9f TM |
359 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) |
360 | { | |
361 | LIST_HEAD(head); | |
362 | ||
363 | do { | |
364 | list_splice_init(&desc->pg_list, &head); | |
365 | desc->pg_bytes_written -= desc->pg_count; | |
366 | desc->pg_count = 0; | |
367 | desc->pg_base = 0; | |
368 | desc->pg_recoalesce = 0; | |
369 | ||
370 | while (!list_empty(&head)) { | |
371 | struct nfs_page *req; | |
372 | ||
373 | req = list_first_entry(&head, struct nfs_page, wb_list); | |
374 | nfs_list_remove_request(req); | |
375 | if (__nfs_pageio_add_request(desc, req)) | |
376 | continue; | |
377 | if (desc->pg_error < 0) | |
378 | return 0; | |
379 | break; | |
380 | } | |
381 | } while (desc->pg_recoalesce); | |
382 | return 1; | |
383 | } | |
384 | ||
385 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |
386 | struct nfs_page *req) | |
387 | { | |
388 | int ret; | |
389 | ||
390 | do { | |
391 | ret = __nfs_pageio_add_request(desc, req); | |
392 | if (ret) | |
393 | break; | |
394 | if (desc->pg_error < 0) | |
395 | break; | |
396 | ret = nfs_do_recoalesce(desc); | |
397 | } while (ret); | |
398 | return ret; | |
399 | } | |
400 | ||
bcb71bba TM |
401 | /** |
402 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | |
403 | * @desc: pointer to io descriptor | |
404 | */ | |
405 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | |
406 | { | |
d9156f9f TM |
407 | for (;;) { |
408 | nfs_pageio_doio(desc); | |
409 | if (!desc->pg_recoalesce) | |
410 | break; | |
411 | if (!nfs_do_recoalesce(desc)) | |
412 | break; | |
413 | } | |
bcb71bba TM |
414 | } |
415 | ||
7fe7f848 TM |
416 | /** |
417 | * nfs_pageio_cond_complete - Conditional I/O completion | |
418 | * @desc: pointer to io descriptor | |
419 | * @index: page index | |
420 | * | |
421 | * It is important to ensure that processes don't try to take locks | |
422 | * on non-contiguous ranges of pages as that might deadlock. This | |
423 | * function should be called before attempting to wait on a locked | |
424 | * nfs_page. It will complete the I/O if the page index 'index' | |
425 | * is not contiguous with the existing list of pages in 'desc'. | |
426 | */ | |
427 | void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) | |
428 | { | |
429 | if (!list_empty(&desc->pg_list)) { | |
430 | struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); | |
431 | if (index != prev->wb_index + 1) | |
d9156f9f | 432 | nfs_pageio_complete(desc); |
7fe7f848 TM |
433 | } |
434 | } | |
435 | ||
f7b422b1 | 436 | int __init nfs_init_nfspagecache(void) |
1da177e4 LT |
437 | { |
438 | nfs_page_cachep = kmem_cache_create("nfs_page", | |
439 | sizeof(struct nfs_page), | |
440 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 441 | NULL); |
1da177e4 LT |
442 | if (nfs_page_cachep == NULL) |
443 | return -ENOMEM; | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
266bee88 | 448 | void nfs_destroy_nfspagecache(void) |
1da177e4 | 449 | { |
1a1d92c1 | 450 | kmem_cache_destroy(nfs_page_cachep); |
1da177e4 LT |
451 | } |
452 |