Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/pagelist.c | |
3 | * | |
4 | * A set of helper functions for managing NFS read and write requests. | |
5 | * The main purpose of these routines is to provide support for the | |
6 | * coalescing of several requests into a single RPC call. | |
7 | * | |
8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> | |
9 | * | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
1da177e4 | 15 | #include <linux/sunrpc/clnt.h> |
1313e603 | 16 | #include <linux/nfs.h> |
1da177e4 LT |
17 | #include <linux/nfs3.h> |
18 | #include <linux/nfs4.h> | |
19 | #include <linux/nfs_page.h> | |
20 | #include <linux/nfs_fs.h> | |
21 | #include <linux/nfs_mount.h> | |
afeacc8c | 22 | #include <linux/export.h> |
1da177e4 | 23 | |
8d5658c9 | 24 | #include "internal.h" |
bae724ef | 25 | #include "pnfs.h" |
8d5658c9 | 26 | |
0eecb214 AS |
27 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
28 | ||
e18b890b | 29 | static struct kmem_cache *nfs_page_cachep; |
1da177e4 | 30 | |
00bfa30a | 31 | static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) |
30dd374f FI |
32 | { |
33 | p->npages = pagecount; | |
34 | if (pagecount <= ARRAY_SIZE(p->page_array)) | |
35 | p->pagevec = p->page_array; | |
36 | else { | |
37 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | |
38 | if (!p->pagevec) | |
39 | p->npages = 0; | |
40 | } | |
41 | return p->pagevec != NULL; | |
42 | } | |
43 | ||
4db6e0b7 FI |
44 | void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, |
45 | struct nfs_pgio_header *hdr, | |
46 | void (*release)(struct nfs_pgio_header *hdr)) | |
47 | { | |
48 | hdr->req = nfs_list_entry(desc->pg_list.next); | |
49 | hdr->inode = desc->pg_inode; | |
50 | hdr->cred = hdr->req->wb_context->cred; | |
51 | hdr->io_start = req_offset(hdr->req); | |
52 | hdr->good_bytes = desc->pg_count; | |
584aa810 | 53 | hdr->dreq = desc->pg_dreq; |
f6166384 | 54 | hdr->layout_private = desc->pg_layout_private; |
4db6e0b7 | 55 | hdr->release = release; |
061ae2ed | 56 | hdr->completion_ops = desc->pg_completion_ops; |
584aa810 FI |
57 | if (hdr->completion_ops->init_hdr) |
58 | hdr->completion_ops->init_hdr(hdr); | |
4db6e0b7 | 59 | } |
89d77c8f | 60 | EXPORT_SYMBOL_GPL(nfs_pgheader_init); |
4db6e0b7 FI |
61 | |
62 | void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) | |
63 | { | |
64 | spin_lock(&hdr->lock); | |
65 | if (pos < hdr->io_start + hdr->good_bytes) { | |
66 | set_bit(NFS_IOHDR_ERROR, &hdr->flags); | |
67 | clear_bit(NFS_IOHDR_EOF, &hdr->flags); | |
68 | hdr->good_bytes = pos - hdr->io_start; | |
69 | hdr->error = error; | |
70 | } | |
71 | spin_unlock(&hdr->lock); | |
72 | } | |
73 | ||
1da177e4 LT |
74 | static inline struct nfs_page * |
75 | nfs_page_alloc(void) | |
76 | { | |
192e501b | 77 | struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO); |
72895b1a | 78 | if (p) |
1da177e4 | 79 | INIT_LIST_HEAD(&p->wb_list); |
1da177e4 LT |
80 | return p; |
81 | } | |
82 | ||
83 | static inline void | |
84 | nfs_page_free(struct nfs_page *p) | |
85 | { | |
86 | kmem_cache_free(nfs_page_cachep, p); | |
87 | } | |
88 | ||
577b4232 TM |
89 | static void |
90 | nfs_iocounter_inc(struct nfs_io_counter *c) | |
91 | { | |
92 | atomic_inc(&c->io_count); | |
93 | } | |
94 | ||
95 | static void | |
96 | nfs_iocounter_dec(struct nfs_io_counter *c) | |
97 | { | |
98 | if (atomic_dec_and_test(&c->io_count)) { | |
99 | clear_bit(NFS_IO_INPROGRESS, &c->flags); | |
100 | smp_mb__after_clear_bit(); | |
101 | wake_up_bit(&c->flags, NFS_IO_INPROGRESS); | |
102 | } | |
103 | } | |
104 | ||
105 | static int | |
106 | __nfs_iocounter_wait(struct nfs_io_counter *c) | |
107 | { | |
108 | wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS); | |
109 | DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS); | |
110 | int ret = 0; | |
111 | ||
112 | do { | |
113 | prepare_to_wait(wq, &q.wait, TASK_KILLABLE); | |
114 | set_bit(NFS_IO_INPROGRESS, &c->flags); | |
115 | if (atomic_read(&c->io_count) == 0) | |
116 | break; | |
117 | ret = nfs_wait_bit_killable(&c->flags); | |
118 | } while (atomic_read(&c->io_count) != 0); | |
119 | finish_wait(wq, &q.wait); | |
120 | return ret; | |
121 | } | |
122 | ||
123 | /** | |
124 | * nfs_iocounter_wait - wait for i/o to complete | |
125 | * @c: nfs_io_counter to use | |
126 | * | |
127 | * returns -ERESTARTSYS if interrupted by a fatal signal. | |
128 | * Otherwise returns 0 once the io_count hits 0. | |
129 | */ | |
130 | int | |
131 | nfs_iocounter_wait(struct nfs_io_counter *c) | |
132 | { | |
133 | if (atomic_read(&c->io_count) == 0) | |
134 | return 0; | |
135 | return __nfs_iocounter_wait(c); | |
136 | } | |
137 | ||
1da177e4 LT |
138 | /** |
139 | * nfs_create_request - Create an NFS read/write request. | |
c02f557d | 140 | * @ctx: open context to use |
1da177e4 LT |
141 | * @inode: inode to which the request is attached |
142 | * @page: page to write | |
143 | * @offset: starting offset within the page for the write | |
144 | * @count: number of bytes to read/write | |
145 | * | |
146 | * The page must be locked by the caller. This makes sure we never | |
a19b89ca | 147 | * create two different requests for the same page. |
1da177e4 LT |
148 | * User should ensure it is safe to sleep in this function. |
149 | */ | |
150 | struct nfs_page * | |
151 | nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |
152 | struct page *page, | |
153 | unsigned int offset, unsigned int count) | |
154 | { | |
1da177e4 | 155 | struct nfs_page *req; |
b3c54de6 | 156 | struct nfs_lock_context *l_ctx; |
1da177e4 | 157 | |
c58c8441 TM |
158 | if (test_bit(NFS_CONTEXT_BAD, &ctx->flags)) |
159 | return ERR_PTR(-EBADF); | |
18eb8842 TM |
160 | /* try to allocate the request struct */ |
161 | req = nfs_page_alloc(); | |
162 | if (req == NULL) | |
163 | return ERR_PTR(-ENOMEM); | |
1da177e4 | 164 | |
015f0212 | 165 | /* get lock context early so we can deal with alloc failures */ |
b3c54de6 TM |
166 | l_ctx = nfs_get_lock_context(ctx); |
167 | if (IS_ERR(l_ctx)) { | |
015f0212 | 168 | nfs_page_free(req); |
b3c54de6 | 169 | return ERR_CAST(l_ctx); |
015f0212 | 170 | } |
b3c54de6 | 171 | req->wb_lock_context = l_ctx; |
577b4232 | 172 | nfs_iocounter_inc(&l_ctx->io_count); |
015f0212 | 173 | |
1da177e4 LT |
174 | /* Initialize the request struct. Initially, we assume a |
175 | * long write-back delay. This will be adjusted in | |
176 | * update_nfs_request below if the region is not locked. */ | |
177 | req->wb_page = page; | |
d56b4ddf | 178 | req->wb_index = page_file_index(page); |
1da177e4 LT |
179 | page_cache_get(page); |
180 | req->wb_offset = offset; | |
181 | req->wb_pgbase = offset; | |
182 | req->wb_bytes = count; | |
1da177e4 | 183 | req->wb_context = get_nfs_open_context(ctx); |
c03b4024 | 184 | kref_init(&req->wb_kref); |
1da177e4 LT |
185 | return req; |
186 | } | |
187 | ||
188 | /** | |
1d1afcbc | 189 | * nfs_unlock_request - Unlock request and wake up sleepers. |
1da177e4 LT |
190 | * @req: |
191 | */ | |
1d1afcbc | 192 | void nfs_unlock_request(struct nfs_page *req) |
1da177e4 LT |
193 | { |
194 | if (!NFS_WBACK_BUSY(req)) { | |
195 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); | |
196 | BUG(); | |
197 | } | |
198 | smp_mb__before_clear_bit(); | |
199 | clear_bit(PG_BUSY, &req->wb_flags); | |
200 | smp_mb__after_clear_bit(); | |
464a98bd | 201 | wake_up_bit(&req->wb_flags, PG_BUSY); |
3aff4ebb TM |
202 | } |
203 | ||
204 | /** | |
1d1afcbc TM |
205 | * nfs_unlock_and_release_request - Unlock request and release the nfs_page |
206 | * @req: | |
3aff4ebb | 207 | */ |
1d1afcbc | 208 | void nfs_unlock_and_release_request(struct nfs_page *req) |
3aff4ebb | 209 | { |
1d1afcbc | 210 | nfs_unlock_request(req); |
1da177e4 LT |
211 | nfs_release_request(req); |
212 | } | |
213 | ||
4d65c520 | 214 | /* |
1da177e4 LT |
215 | * nfs_clear_request - Free up all resources allocated to the request |
216 | * @req: | |
217 | * | |
bb6fbc45 TM |
218 | * Release page and open context resources associated with a read/write |
219 | * request after it has completed. | |
1da177e4 | 220 | */ |
4d65c520 | 221 | static void nfs_clear_request(struct nfs_page *req) |
1da177e4 | 222 | { |
cd52ed35 | 223 | struct page *page = req->wb_page; |
bb6fbc45 | 224 | struct nfs_open_context *ctx = req->wb_context; |
f11ac8db | 225 | struct nfs_lock_context *l_ctx = req->wb_lock_context; |
bb6fbc45 | 226 | |
cd52ed35 | 227 | if (page != NULL) { |
cd52ed35 | 228 | page_cache_release(page); |
1da177e4 LT |
229 | req->wb_page = NULL; |
230 | } | |
f11ac8db | 231 | if (l_ctx != NULL) { |
577b4232 | 232 | nfs_iocounter_dec(&l_ctx->io_count); |
f11ac8db TM |
233 | nfs_put_lock_context(l_ctx); |
234 | req->wb_lock_context = NULL; | |
235 | } | |
bb6fbc45 TM |
236 | if (ctx != NULL) { |
237 | put_nfs_open_context(ctx); | |
238 | req->wb_context = NULL; | |
239 | } | |
1da177e4 LT |
240 | } |
241 | ||
242 | ||
243 | /** | |
244 | * nfs_release_request - Release the count on an NFS read/write request | |
245 | * @req: request to release | |
246 | * | |
247 | * Note: Should never be called with the spinlock held! | |
248 | */ | |
c03b4024 | 249 | static void nfs_free_request(struct kref *kref) |
1da177e4 | 250 | { |
c03b4024 | 251 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
1da177e4 | 252 | |
bb6fbc45 | 253 | /* Release struct file and open context */ |
1da177e4 | 254 | nfs_clear_request(req); |
1da177e4 LT |
255 | nfs_page_free(req); |
256 | } | |
257 | ||
c03b4024 TM |
258 | void nfs_release_request(struct nfs_page *req) |
259 | { | |
260 | kref_put(&req->wb_kref, nfs_free_request); | |
261 | } | |
262 | ||
9f557cd8 TM |
263 | static int nfs_wait_bit_uninterruptible(void *word) |
264 | { | |
265 | io_schedule(); | |
266 | return 0; | |
267 | } | |
268 | ||
1da177e4 LT |
269 | /** |
270 | * nfs_wait_on_request - Wait for a request to complete. | |
271 | * @req: request to wait upon. | |
272 | * | |
150030b7 | 273 | * Interruptible by fatal signals only. |
1da177e4 LT |
274 | * The user is responsible for holding a count on the request. |
275 | */ | |
276 | int | |
277 | nfs_wait_on_request(struct nfs_page *req) | |
278 | { | |
9f557cd8 TM |
279 | return wait_on_bit(&req->wb_flags, PG_BUSY, |
280 | nfs_wait_bit_uninterruptible, | |
281 | TASK_UNINTERRUPTIBLE); | |
1da177e4 LT |
282 | } |
283 | ||
19345cb2 | 284 | bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) |
5b36c7dc BH |
285 | { |
286 | /* | |
287 | * FIXME: ideally we should be able to coalesce all requests | |
288 | * that are not block boundary aligned, but currently this | |
289 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | |
290 | * since nfs_flush_multi and nfs_pagein_multi assume you | |
291 | * can have only one struct nfs_page. | |
292 | */ | |
293 | if (desc->pg_bsize < PAGE_SIZE) | |
294 | return 0; | |
295 | ||
296 | return desc->pg_count + req->wb_bytes <= desc->pg_bsize; | |
297 | } | |
19345cb2 | 298 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); |
5b36c7dc | 299 | |
00bfa30a AS |
300 | static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr) |
301 | { | |
302 | return container_of(hdr, struct nfs_rw_header, header); | |
303 | } | |
304 | ||
4a0de55c AS |
305 | /** |
306 | * nfs_rw_header_alloc - Allocate a header for a read or write | |
307 | * @ops: Read or write function vector | |
308 | */ | |
309 | struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops) | |
310 | { | |
311 | struct nfs_rw_header *header = ops->rw_alloc_header(); | |
312 | ||
313 | if (header) { | |
314 | struct nfs_pgio_header *hdr = &header->header; | |
315 | ||
316 | INIT_LIST_HEAD(&hdr->pages); | |
317 | INIT_LIST_HEAD(&hdr->rpc_list); | |
318 | spin_lock_init(&hdr->lock); | |
319 | atomic_set(&hdr->refcnt, 0); | |
320 | hdr->rw_ops = ops; | |
321 | } | |
322 | return header; | |
323 | } | |
324 | EXPORT_SYMBOL_GPL(nfs_rw_header_alloc); | |
325 | ||
326 | /* | |
327 | * nfs_rw_header_free - Free a read or write header | |
328 | * @hdr: The header to free | |
329 | */ | |
330 | void nfs_rw_header_free(struct nfs_pgio_header *hdr) | |
331 | { | |
332 | hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr)); | |
333 | } | |
334 | EXPORT_SYMBOL_GPL(nfs_rw_header_free); | |
335 | ||
00bfa30a AS |
336 | /** |
337 | * nfs_pgio_data_alloc - Allocate pageio data | |
338 | * @hdr: The header making a request | |
339 | * @pagecount: Number of pages to create | |
340 | */ | |
341 | struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr, | |
342 | unsigned int pagecount) | |
343 | { | |
344 | struct nfs_pgio_data *data, *prealloc; | |
345 | ||
346 | prealloc = &NFS_RW_HEADER(hdr)->rpc_data; | |
347 | if (prealloc->header == NULL) | |
348 | data = prealloc; | |
349 | else | |
350 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
351 | if (!data) | |
352 | goto out; | |
353 | ||
354 | if (nfs_pgarray_set(&data->pages, pagecount)) { | |
355 | data->header = hdr; | |
356 | atomic_inc(&hdr->refcnt); | |
357 | } else { | |
358 | if (data != prealloc) | |
359 | kfree(data); | |
360 | data = NULL; | |
361 | } | |
362 | out: | |
363 | return data; | |
364 | } | |
365 | ||
366 | /** | |
367 | * nfs_pgio_data_release - Properly free pageio data | |
368 | * @data: The data to release | |
369 | */ | |
370 | void nfs_pgio_data_release(struct nfs_pgio_data *data) | |
371 | { | |
372 | struct nfs_pgio_header *hdr = data->header; | |
373 | struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr); | |
374 | ||
375 | put_nfs_open_context(data->args.context); | |
376 | if (data->pages.pagevec != data->pages.page_array) | |
377 | kfree(data->pages.pagevec); | |
378 | if (data == &pageio_header->rpc_data) { | |
379 | data->header = NULL; | |
380 | data = NULL; | |
381 | } | |
382 | if (atomic_dec_and_test(&hdr->refcnt)) | |
383 | hdr->completion_ops->completion(hdr); | |
384 | /* Note: we only free the rpc_task after callbacks are done. | |
385 | * See the comment in rpc_free_task() for why | |
386 | */ | |
387 | kfree(data); | |
388 | } | |
389 | EXPORT_SYMBOL_GPL(nfs_pgio_data_release); | |
390 | ||
ce59515c AS |
391 | /** |
392 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call | |
393 | * @data: The pageio data | |
394 | * @count: Number of bytes to read | |
395 | * @offset: Initial offset | |
396 | * @how: How to commit data (writes only) | |
397 | * @cinfo: Commit information for the call (writes only) | |
398 | */ | |
399 | void nfs_pgio_rpcsetup(struct nfs_pgio_data *data, | |
400 | unsigned int count, unsigned int offset, | |
401 | int how, struct nfs_commit_info *cinfo) | |
402 | { | |
403 | struct nfs_page *req = data->header->req; | |
404 | ||
405 | /* Set up the RPC argument and reply structs | |
406 | * NB: take care not to mess about with data->commit et al. */ | |
407 | ||
408 | data->args.fh = NFS_FH(data->header->inode); | |
409 | data->args.offset = req_offset(req) + offset; | |
410 | /* pnfs_set_layoutcommit needs this */ | |
411 | data->mds_offset = data->args.offset; | |
412 | data->args.pgbase = req->wb_pgbase + offset; | |
413 | data->args.pages = data->pages.pagevec; | |
414 | data->args.count = count; | |
415 | data->args.context = get_nfs_open_context(req->wb_context); | |
416 | data->args.lock_context = req->wb_lock_context; | |
417 | data->args.stable = NFS_UNSTABLE; | |
418 | switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { | |
419 | case 0: | |
420 | break; | |
421 | case FLUSH_COND_STABLE: | |
422 | if (nfs_reqs_to_commit(cinfo)) | |
423 | break; | |
424 | default: | |
425 | data->args.stable = NFS_FILE_SYNC; | |
426 | } | |
427 | ||
428 | data->res.fattr = &data->fattr; | |
429 | data->res.count = count; | |
430 | data->res.eof = 0; | |
431 | data->res.verf = &data->verf; | |
432 | nfs_fattr_init(&data->fattr); | |
433 | } | |
434 | ||
a4cdda59 AS |
435 | /** |
436 | * nfs_pgio_prepare - Prepare pageio data to go over the wire | |
437 | * @task: The current task | |
438 | * @calldata: pageio data to prepare | |
439 | */ | |
6f92fa45 | 440 | static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) |
a4cdda59 AS |
441 | { |
442 | struct nfs_pgio_data *data = calldata; | |
443 | int err; | |
444 | err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data); | |
445 | if (err) | |
446 | rpc_exit(task, err); | |
447 | } | |
448 | ||
844c9e69 AS |
449 | /** |
450 | * nfs_pgio_error - Clean up from a pageio error | |
451 | * @desc: IO descriptor | |
452 | * @hdr: pageio header | |
453 | */ | |
454 | int nfs_pgio_error(struct nfs_pageio_descriptor *desc, | |
455 | struct nfs_pgio_header *hdr) | |
456 | { | |
457 | struct nfs_pgio_data *data; | |
458 | ||
459 | set_bit(NFS_IOHDR_REDO, &hdr->flags); | |
460 | while (!list_empty(&hdr->rpc_list)) { | |
461 | data = list_first_entry(&hdr->rpc_list, struct nfs_pgio_data, list); | |
462 | list_del(&data->list); | |
463 | nfs_pgio_data_release(data); | |
464 | } | |
465 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | |
466 | return -ENOMEM; | |
467 | } | |
468 | ||
a4cdda59 AS |
469 | /** |
470 | * nfs_pgio_release - Release pageio data | |
471 | * @calldata: The pageio data to release | |
472 | */ | |
6f92fa45 | 473 | static void nfs_pgio_release(void *calldata) |
a4cdda59 AS |
474 | { |
475 | struct nfs_pgio_data *data = calldata; | |
476 | if (data->header->rw_ops->rw_release) | |
477 | data->header->rw_ops->rw_release(data); | |
478 | nfs_pgio_data_release(data); | |
479 | } | |
480 | ||
1da177e4 | 481 | /** |
d8a5ad75 TM |
482 | * nfs_pageio_init - initialise a page io descriptor |
483 | * @desc: pointer to descriptor | |
bcb71bba TM |
484 | * @inode: pointer to inode |
485 | * @doio: pointer to io function | |
486 | * @bsize: io block size | |
487 | * @io_flags: extra parameters for the io function | |
d8a5ad75 | 488 | */ |
bcb71bba TM |
489 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
490 | struct inode *inode, | |
1751c363 | 491 | const struct nfs_pageio_ops *pg_ops, |
061ae2ed | 492 | const struct nfs_pgio_completion_ops *compl_ops, |
4a0de55c | 493 | const struct nfs_rw_ops *rw_ops, |
84dde76c | 494 | size_t bsize, |
bcb71bba | 495 | int io_flags) |
d8a5ad75 TM |
496 | { |
497 | INIT_LIST_HEAD(&desc->pg_list); | |
bcb71bba | 498 | desc->pg_bytes_written = 0; |
d8a5ad75 TM |
499 | desc->pg_count = 0; |
500 | desc->pg_bsize = bsize; | |
501 | desc->pg_base = 0; | |
b31268ac | 502 | desc->pg_moreio = 0; |
d9156f9f | 503 | desc->pg_recoalesce = 0; |
bcb71bba | 504 | desc->pg_inode = inode; |
1751c363 | 505 | desc->pg_ops = pg_ops; |
061ae2ed | 506 | desc->pg_completion_ops = compl_ops; |
4a0de55c | 507 | desc->pg_rw_ops = rw_ops; |
bcb71bba TM |
508 | desc->pg_ioflags = io_flags; |
509 | desc->pg_error = 0; | |
94ad1c80 | 510 | desc->pg_lseg = NULL; |
584aa810 | 511 | desc->pg_dreq = NULL; |
f6166384 | 512 | desc->pg_layout_private = NULL; |
d8a5ad75 | 513 | } |
89d77c8f | 514 | EXPORT_SYMBOL_GPL(nfs_pageio_init); |
d8a5ad75 | 515 | |
0eecb214 AS |
516 | /** |
517 | * nfs_pgio_result - Basic pageio error handling | |
518 | * @task: The task that ran | |
519 | * @calldata: Pageio data to check | |
520 | */ | |
6f92fa45 | 521 | static void nfs_pgio_result(struct rpc_task *task, void *calldata) |
0eecb214 AS |
522 | { |
523 | struct nfs_pgio_data *data = calldata; | |
524 | struct inode *inode = data->header->inode; | |
525 | ||
526 | dprintk("NFS: %s: %5u, (status %d)\n", __func__, | |
527 | task->tk_pid, task->tk_status); | |
528 | ||
529 | if (data->header->rw_ops->rw_done(task, data, inode) != 0) | |
530 | return; | |
531 | if (task->tk_status < 0) | |
532 | nfs_set_pgio_error(data->header, task->tk_status, data->args.offset); | |
533 | else | |
534 | data->header->rw_ops->rw_result(task, data); | |
535 | } | |
536 | ||
4109bb74 TM |
537 | static bool nfs_match_open_context(const struct nfs_open_context *ctx1, |
538 | const struct nfs_open_context *ctx2) | |
539 | { | |
540 | return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state; | |
541 | } | |
542 | ||
543 | static bool nfs_match_lock_context(const struct nfs_lock_context *l1, | |
544 | const struct nfs_lock_context *l2) | |
545 | { | |
546 | return l1->lockowner.l_owner == l2->lockowner.l_owner | |
547 | && l1->lockowner.l_pid == l2->lockowner.l_pid; | |
548 | } | |
549 | ||
d8a5ad75 TM |
550 | /** |
551 | * nfs_can_coalesce_requests - test two requests for compatibility | |
552 | * @prev: pointer to nfs_page | |
553 | * @req: pointer to nfs_page | |
554 | * | |
555 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | |
556 | * page data area they describe is contiguous, and that their RPC | |
557 | * credentials, NFSv4 open state, and lockowners are the same. | |
558 | * | |
559 | * Return 'true' if this is the case, else return 'false'. | |
560 | */ | |
18ad0a9f BH |
561 | static bool nfs_can_coalesce_requests(struct nfs_page *prev, |
562 | struct nfs_page *req, | |
563 | struct nfs_pageio_descriptor *pgio) | |
d8a5ad75 | 564 | { |
4109bb74 | 565 | if (!nfs_match_open_context(req->wb_context, prev->wb_context)) |
18ad0a9f | 566 | return false; |
4109bb74 TM |
567 | if (req->wb_context->dentry->d_inode->i_flock != NULL && |
568 | !nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context)) | |
18ad0a9f | 569 | return false; |
d8a5ad75 | 570 | if (req->wb_pgbase != 0) |
18ad0a9f | 571 | return false; |
d8a5ad75 | 572 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) |
18ad0a9f | 573 | return false; |
1825a0d0 FI |
574 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) |
575 | return false; | |
1751c363 | 576 | return pgio->pg_ops->pg_test(pgio, prev, req); |
d8a5ad75 TM |
577 | } |
578 | ||
579 | /** | |
bcb71bba | 580 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
d8a5ad75 TM |
581 | * @desc: destination io descriptor |
582 | * @req: request | |
583 | * | |
584 | * Returns true if the request 'req' was successfully coalesced into the | |
585 | * existing list of pages 'desc'. | |
586 | */ | |
bcb71bba TM |
587 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
588 | struct nfs_page *req) | |
d8a5ad75 | 589 | { |
d8a5ad75 TM |
590 | if (desc->pg_count != 0) { |
591 | struct nfs_page *prev; | |
592 | ||
d8a5ad75 | 593 | prev = nfs_list_entry(desc->pg_list.prev); |
94ad1c80 | 594 | if (!nfs_can_coalesce_requests(prev, req, desc)) |
d8a5ad75 | 595 | return 0; |
5b36c7dc | 596 | } else { |
d8007d4d TM |
597 | if (desc->pg_ops->pg_init) |
598 | desc->pg_ops->pg_init(desc, req); | |
d8a5ad75 | 599 | desc->pg_base = req->wb_pgbase; |
5b36c7dc | 600 | } |
d8a5ad75 TM |
601 | nfs_list_remove_request(req); |
602 | nfs_list_add_request(req, &desc->pg_list); | |
5b36c7dc | 603 | desc->pg_count += req->wb_bytes; |
d8a5ad75 TM |
604 | return 1; |
605 | } | |
606 | ||
bcb71bba TM |
607 | /* |
608 | * Helper for nfs_pageio_add_request and nfs_pageio_complete | |
609 | */ | |
610 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) | |
611 | { | |
612 | if (!list_empty(&desc->pg_list)) { | |
1751c363 | 613 | int error = desc->pg_ops->pg_doio(desc); |
bcb71bba TM |
614 | if (error < 0) |
615 | desc->pg_error = error; | |
616 | else | |
617 | desc->pg_bytes_written += desc->pg_count; | |
618 | } | |
619 | if (list_empty(&desc->pg_list)) { | |
620 | desc->pg_count = 0; | |
621 | desc->pg_base = 0; | |
622 | } | |
623 | } | |
624 | ||
625 | /** | |
626 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | |
627 | * @desc: destination io descriptor | |
628 | * @req: request | |
629 | * | |
630 | * Returns true if the request 'req' was successfully coalesced into the | |
631 | * existing list of pages 'desc'. | |
632 | */ | |
d9156f9f | 633 | static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
8b09bee3 | 634 | struct nfs_page *req) |
bcb71bba TM |
635 | { |
636 | while (!nfs_pageio_do_add_request(desc, req)) { | |
b31268ac | 637 | desc->pg_moreio = 1; |
bcb71bba TM |
638 | nfs_pageio_doio(desc); |
639 | if (desc->pg_error < 0) | |
640 | return 0; | |
b31268ac | 641 | desc->pg_moreio = 0; |
d9156f9f TM |
642 | if (desc->pg_recoalesce) |
643 | return 0; | |
bcb71bba TM |
644 | } |
645 | return 1; | |
646 | } | |
647 | ||
d9156f9f TM |
648 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) |
649 | { | |
650 | LIST_HEAD(head); | |
651 | ||
652 | do { | |
653 | list_splice_init(&desc->pg_list, &head); | |
654 | desc->pg_bytes_written -= desc->pg_count; | |
655 | desc->pg_count = 0; | |
656 | desc->pg_base = 0; | |
657 | desc->pg_recoalesce = 0; | |
658 | ||
659 | while (!list_empty(&head)) { | |
660 | struct nfs_page *req; | |
661 | ||
662 | req = list_first_entry(&head, struct nfs_page, wb_list); | |
663 | nfs_list_remove_request(req); | |
664 | if (__nfs_pageio_add_request(desc, req)) | |
665 | continue; | |
666 | if (desc->pg_error < 0) | |
667 | return 0; | |
668 | break; | |
669 | } | |
670 | } while (desc->pg_recoalesce); | |
671 | return 1; | |
672 | } | |
673 | ||
674 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |
675 | struct nfs_page *req) | |
676 | { | |
677 | int ret; | |
678 | ||
679 | do { | |
680 | ret = __nfs_pageio_add_request(desc, req); | |
681 | if (ret) | |
682 | break; | |
683 | if (desc->pg_error < 0) | |
684 | break; | |
685 | ret = nfs_do_recoalesce(desc); | |
686 | } while (ret); | |
687 | return ret; | |
688 | } | |
89d77c8f | 689 | EXPORT_SYMBOL_GPL(nfs_pageio_add_request); |
d9156f9f | 690 | |
bcb71bba TM |
691 | /** |
692 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | |
693 | * @desc: pointer to io descriptor | |
694 | */ | |
695 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | |
696 | { | |
d9156f9f TM |
697 | for (;;) { |
698 | nfs_pageio_doio(desc); | |
699 | if (!desc->pg_recoalesce) | |
700 | break; | |
701 | if (!nfs_do_recoalesce(desc)) | |
702 | break; | |
703 | } | |
bcb71bba | 704 | } |
89d77c8f | 705 | EXPORT_SYMBOL_GPL(nfs_pageio_complete); |
bcb71bba | 706 | |
7fe7f848 TM |
707 | /** |
708 | * nfs_pageio_cond_complete - Conditional I/O completion | |
709 | * @desc: pointer to io descriptor | |
710 | * @index: page index | |
711 | * | |
712 | * It is important to ensure that processes don't try to take locks | |
713 | * on non-contiguous ranges of pages as that might deadlock. This | |
714 | * function should be called before attempting to wait on a locked | |
715 | * nfs_page. It will complete the I/O if the page index 'index' | |
716 | * is not contiguous with the existing list of pages in 'desc'. | |
717 | */ | |
718 | void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) | |
719 | { | |
720 | if (!list_empty(&desc->pg_list)) { | |
721 | struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); | |
722 | if (index != prev->wb_index + 1) | |
d9156f9f | 723 | nfs_pageio_complete(desc); |
7fe7f848 TM |
724 | } |
725 | } | |
726 | ||
f7b422b1 | 727 | int __init nfs_init_nfspagecache(void) |
1da177e4 LT |
728 | { |
729 | nfs_page_cachep = kmem_cache_create("nfs_page", | |
730 | sizeof(struct nfs_page), | |
731 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 732 | NULL); |
1da177e4 LT |
733 | if (nfs_page_cachep == NULL) |
734 | return -ENOMEM; | |
735 | ||
736 | return 0; | |
737 | } | |
738 | ||
266bee88 | 739 | void nfs_destroy_nfspagecache(void) |
1da177e4 | 740 | { |
1a1d92c1 | 741 | kmem_cache_destroy(nfs_page_cachep); |
1da177e4 LT |
742 | } |
743 | ||
6f92fa45 AS |
744 | const struct rpc_call_ops nfs_pgio_common_ops = { |
745 | .rpc_call_prepare = nfs_pgio_prepare, | |
746 | .rpc_call_done = nfs_pgio_result, | |
747 | .rpc_release = nfs_pgio_release, | |
748 | }; |